diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
index bc5e3144a1b4aba99dea2e187d3cbeaf98cf9bb0..576efab00e7f372e01a389937acb6e291b3695d5 100644
--- a/Godeps/Godeps.json
+++ b/Godeps/Godeps.json
@@ -10,11 +10,6 @@
 			"Comment": "null-12",
 			"Rev": "7dda39b2e7d5e265014674c5af696ba4186679e9"
 		},
-		{
-			"ImportPath": "code.google.com/p/snappy-go/snappy",
-			"Comment": "null-15",
-			"Rev": "12e4b4183793ac4b061921e7980845e750679fd0"
-		},
 		{
 			"ImportPath": "github.com/codegangsta/cli",
 			"Comment": "1.2.0-95-g9b2bd2b",
@@ -25,10 +20,6 @@
 			"Comment": "v23.1-82-g908aad3",
 			"Rev": "908aad345c9fbf3ab9bbb94031dc02d0d90df1b8"
 		},
-		{
-			"ImportPath": "github.com/ethereum/serpent-go",
-			"Rev": "5767a0dbd759d313df3f404dadb7f98d7ab51443"
-		},
 		{
 			"ImportPath": "github.com/howeyc/fsnotify",
 			"Comment": "v0.9.0-11-g6b1ef89",
@@ -46,10 +37,6 @@
 			"ImportPath": "github.com/kardianos/osext",
 			"Rev": "ccfcd0245381f0c94c68f50626665eed3c6b726a"
 		},
-		{
-			"ImportPath": "github.com/robertkrimen/otto",
-			"Rev": "dea31a3d392779af358ec41f77a07fcc7e9d04ba"
-		},
 		{
 			"ImportPath": "github.com/obscuren/qml",
 			"Rev": "c288002b52e905973b131089a8a7c761d4a2c36a"
@@ -67,27 +54,7 @@
 			"Rev": "907cca0f578a5316fb864ec6992dc3d9730ec58c"
 		},
 		{
-			"ImportPath": "github.com/robertkrimen/otto/ast",
-			"Rev": "dea31a3d392779af358ec41f77a07fcc7e9d04ba"
-		},
-		{
-			"ImportPath": "github.com/robertkrimen/otto/dbg",
-			"Rev": "dea31a3d392779af358ec41f77a07fcc7e9d04ba"
-		},
-		{
-			"ImportPath": "github.com/robertkrimen/otto/file",
-			"Rev": "dea31a3d392779af358ec41f77a07fcc7e9d04ba"
-		},
-		{
-			"ImportPath": "github.com/robertkrimen/otto/parser",
-			"Rev": "dea31a3d392779af358ec41f77a07fcc7e9d04ba"
-		},
-		{
-			"ImportPath": "github.com/robertkrimen/otto/registry",
-			"Rev": "dea31a3d392779af358ec41f77a07fcc7e9d04ba"
-		},
-		{
-			"ImportPath": "github.com/robertkrimen/otto/token",
+			"ImportPath": "github.com/robertkrimen/otto",
 			"Rev": "dea31a3d392779af358ec41f77a07fcc7e9d04ba"
 		},
 		{
@@ -96,7 +63,11 @@
 		},
 		{
 			"ImportPath": "github.com/syndtr/goleveldb/leveldb",
-			"Rev": "832fa7ed4d28545eab80f19e1831fc004305cade"
+			"Rev": "4875955338b0a434238a31165cb87255ab6e9e4a"
+		},
+		{
+			"ImportPath": "github.com/syndtr/gosnappy/snappy",
+			"Rev": "156a073208e131d7d2e212cb749feae7c339e846"
 		},
 		{
 			"ImportPath": "golang.org/x/crypto/pbkdf2",
diff --git a/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/decode.go b/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/decode.go
deleted file mode 100644
index d93c1b9dbfd7cea5fe7b86520548181d3729fa94..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/decode.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2011 The Snappy-Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package snappy
-
-import (
-	"encoding/binary"
-	"errors"
-)
-
-// ErrCorrupt reports that the input is invalid.
-var ErrCorrupt = errors.New("snappy: corrupt input")
-
-// DecodedLen returns the length of the decoded block.
-func DecodedLen(src []byte) (int, error) {
-	v, _, err := decodedLen(src)
-	return v, err
-}
-
-// decodedLen returns the length of the decoded block and the number of bytes
-// that the length header occupied.
-func decodedLen(src []byte) (blockLen, headerLen int, err error) {
-	v, n := binary.Uvarint(src)
-	if n == 0 {
-		return 0, 0, ErrCorrupt
-	}
-	if uint64(int(v)) != v {
-		return 0, 0, errors.New("snappy: decoded block is too large")
-	}
-	return int(v), n, nil
-}
-
-// Decode returns the decoded form of src. The returned slice may be a sub-
-// slice of dst if dst was large enough to hold the entire decoded block.
-// Otherwise, a newly allocated slice will be returned.
-// It is valid to pass a nil dst.
-func Decode(dst, src []byte) ([]byte, error) {
-	dLen, s, err := decodedLen(src)
-	if err != nil {
-		return nil, err
-	}
-	if len(dst) < dLen {
-		dst = make([]byte, dLen)
-	}
-
-	var d, offset, length int
-	for s < len(src) {
-		switch src[s] & 0x03 {
-		case tagLiteral:
-			x := uint(src[s] >> 2)
-			switch {
-			case x < 60:
-				s += 1
-			case x == 60:
-				s += 2
-				if s > len(src) {
-					return nil, ErrCorrupt
-				}
-				x = uint(src[s-1])
-			case x == 61:
-				s += 3
-				if s > len(src) {
-					return nil, ErrCorrupt
-				}
-				x = uint(src[s-2]) | uint(src[s-1])<<8
-			case x == 62:
-				s += 4
-				if s > len(src) {
-					return nil, ErrCorrupt
-				}
-				x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16
-			case x == 63:
-				s += 5
-				if s > len(src) {
-					return nil, ErrCorrupt
-				}
-				x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24
-			}
-			length = int(x + 1)
-			if length <= 0 {
-				return nil, errors.New("snappy: unsupported literal length")
-			}
-			if length > len(dst)-d || length > len(src)-s {
-				return nil, ErrCorrupt
-			}
-			copy(dst[d:], src[s:s+length])
-			d += length
-			s += length
-			continue
-
-		case tagCopy1:
-			s += 2
-			if s > len(src) {
-				return nil, ErrCorrupt
-			}
-			length = 4 + int(src[s-2])>>2&0x7
-			offset = int(src[s-2])&0xe0<<3 | int(src[s-1])
-
-		case tagCopy2:
-			s += 3
-			if s > len(src) {
-				return nil, ErrCorrupt
-			}
-			length = 1 + int(src[s-3])>>2
-			offset = int(src[s-2]) | int(src[s-1])<<8
-
-		case tagCopy4:
-			return nil, errors.New("snappy: unsupported COPY_4 tag")
-		}
-
-		end := d + length
-		if offset > d || end > len(dst) {
-			return nil, ErrCorrupt
-		}
-		for ; d < end; d++ {
-			dst[d] = dst[d-offset]
-		}
-	}
-	if d != dLen {
-		return nil, ErrCorrupt
-	}
-	return dst[:d], nil
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/.gitignore b/Godeps/_workspace/src/github.com/ethereum/serpent-go/.gitignore
deleted file mode 100644
index 5d02b54e5467b0a83e686d391fd35a463e1de20b..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-/tmp
-*/**/*un~
-*un~
-.DS_Store
-*/**/.DS_Store
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/.gitmodules b/Godeps/_workspace/src/github.com/ethereum/serpent-go/.gitmodules
deleted file mode 100644
index 054c7d62854cdc881bb37249110e2be5e0cd597e..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/.gitmodules
+++ /dev/null
@@ -1,3 +0,0 @@
-[submodule "serp"]
-	path = serpent
-	url = https://github.com/ethereum/serpent.git
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/README.md b/Godeps/_workspace/src/github.com/ethereum/serpent-go/README.md
deleted file mode 100644
index 404f1b3802c3078f27b484652146b5c26ae2e947..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-[serpent](https://github.com/ethereum/serpent) go bindings.
-
-## Build instructions
-
-```
-go get -d github.com/ethereum/serpent-go
-cd $GOPATH/src/github.com/ethereum/serpent-go
-git submodule init
-git submodule update
-```
-
-You're now ready to go :-)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/all.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/all.cpp
deleted file mode 100644
index 80032f900d65b63741cddb2e7eeab81a86c33256..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/all.cpp
+++ /dev/null
@@ -1,16 +0,0 @@
-#include "serpent/bignum.cpp"
-#include "serpent/util.cpp"
-#include "serpent/tokenize.cpp"
-#include "serpent/parser.cpp"
-#include "serpent/compiler.cpp"
-#include "serpent/funcs.cpp"
-#include "serpent/lllparser.cpp"
-#include "serpent/rewriter.cpp"
-
-#include "serpent/opcodes.cpp"
-#include "serpent/optimize.cpp"
-#include "serpent/functions.cpp"
-#include "serpent/preprocess.cpp"
-#include "serpent/rewriteutils.cpp"
-
-#include "cpp/api.cpp"
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/cpp/api.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/cpp/api.cpp
deleted file mode 100644
index bd2c85c7d6f48c3d8982171fba24c4f5fca66a55..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/cpp/api.cpp
+++ /dev/null
@@ -1,26 +0,0 @@
-#include <string>
-
-#include "serpent/lllparser.h"
-#include "serpent/bignum.h"
-#include "serpent/util.h"
-#include "serpent/tokenize.h"
-#include "serpent/parser.h"
-#include "serpent/compiler.h"
-
-#include "cpp/api.h"
-
-const char *compileGo(char *code, int *err)
-{
-    try {
-        std::string c = binToHex(compile(std::string(code)));
-
-        return c.c_str();
-    }
-    catch(std::string &error) {
-        *err = 1;
-        return error.c_str();
-    }
-    catch(...) {
-        return "Unknown error";
-    }
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/cpp/api.h b/Godeps/_workspace/src/github.com/ethereum/serpent-go/cpp/api.h
deleted file mode 100644
index 235b5eb4a1fff14b79e21db644a39b4d3f480ee8..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/cpp/api.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef CPP_API_H
-#define CPP_API_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-const char *compileGo(char *code, int *err);
-
-#ifdef __cplusplus
-}
-#endif
- 
-#endif
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent.go b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent.go
deleted file mode 100644
index 39b60eed74e1a8987d1f444353e463e4c1059071..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package serpent
-
-// #cgo CXXFLAGS: -I. -Ilangs/ -std=c++0x -Wall -fno-strict-aliasing
-// #cgo LDFLAGS: -lstdc++
-//
-// #include "cpp/api.h"
-//
-import "C"
-
-import (
-	"encoding/hex"
-	"errors"
-	"unsafe"
-)
-
-func Compile(str string) ([]byte, error) {
-	var err C.int
-	out := C.GoString(C.compileGo(C.CString(str), (*C.int)(unsafe.Pointer(&err))))
-
-	if err == C.int(1) {
-		return nil, errors.New(out)
-	}
-
-	bytes, _ := hex.DecodeString(out)
-
-	return bytes, nil
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/.gitignore b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/.gitignore
deleted file mode 100644
index 72b65e446603563c5ae5d72efb3c0d170bdd4326..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/.gitignore
+++ /dev/null
@@ -1,12 +0,0 @@
-[._]*.s[a-w][a-z]
-[._]s[a-w][a-z]
-*.un~
-Session.vim
-.netrwhist
-*~
-*.o
-serpent
-libserpent.a
-pyserpent.so
-dist
-*.egg-info
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/MANIFEST.in b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/MANIFEST.in
deleted file mode 100644
index 5f5766cedb9fe1b6e98f6494b76e660a691df0a1..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/MANIFEST.in
+++ /dev/null
@@ -1,5 +0,0 @@
-include *.cpp
-include *.h
-include *py
-include README.md
-include Makefile
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/Makefile b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/Makefile
deleted file mode 100644
index 28c38728e7ec214e949d903e0545ed749719c4ce..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/Makefile
+++ /dev/null
@@ -1,55 +0,0 @@
-PLATFORM_OPTS = 
-PYTHON = /usr/include/python2.7
-CXXFLAGS = -fPIC
-# -g3 -O0
-BOOST_INC = /usr/include
-BOOST_LIB = /usr/lib
-TARGET = pyserpent
-COMMON_OBJS = bignum.o util.o tokenize.o lllparser.o parser.o opcodes.o optimize.o functions.o rewriteutils.o preprocess.o rewriter.o compiler.o funcs.o
-HEADERS = bignum.h util.h tokenize.h lllparser.h parser.h opcodes.h functions.h optimize.h rewriteutils.h preprocess.h rewriter.h compiler.h funcs.h
-PYTHON_VERSION = 2.7
-
-serpent : serpentc lib
-
-lib:
-	ar rvs libserpent.a $(COMMON_OBJS) 
-	g++ $(CXXFLAGS) -shared $(COMMON_OBJS) -o libserpent.so
-
-serpentc: $(COMMON_OBJS) cmdline.o
-	rm -rf serpent
-	g++ -Wall $(COMMON_OBJS) cmdline.o -o serpent
-
-bignum.o : bignum.cpp bignum.h
-
-opcodes.o : opcodes.cpp opcodes.h
-
-util.o : util.cpp util.h bignum.o
-
-tokenize.o : tokenize.cpp tokenize.h util.o
-
-lllparser.o : lllparser.cpp lllparser.h tokenize.o util.o
-
-parser.o : parser.cpp parser.h tokenize.o util.o
-
-rewriter.o : rewriter.cpp rewriter.h lllparser.o util.o rewriteutils.o preprocess.o opcodes.o functions.o
-
-preprocessor.o: rewriteutils.o functions.o
-
-compiler.o : compiler.cpp compiler.h util.o
-
-funcs.o : funcs.cpp funcs.h
-
-cmdline.o: cmdline.cpp
-
-pyext.o: pyext.cpp
-
-clean:
-	rm -f serpent *\.o libserpent.a libserpent.so
-
-install:
-	cp serpent /usr/local/bin
-	cp libserpent.a /usr/local/lib
-	cp libserpent.so /usr/local/lib
-	rm -rf /usr/local/include/libserpent
-	mkdir -p /usr/local/include/libserpent
-	cp $(HEADERS) /usr/local/include/libserpent
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/README.md b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/README.md
deleted file mode 100644
index 03dfcc15f8feddf0b8bfd7f902b129aeab6b9c32..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-Installation:
-
-```make && sudo make install```
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/bignum.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/bignum.cpp
deleted file mode 100644
index 108b1eb04530a12ccdb352ae75b41c305371cdcd..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/bignum.cpp
+++ /dev/null
@@ -1,112 +0,0 @@
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "bignum.h"
-
-//Integer to string conversion
-std::string unsignedToDecimal(unsigned branch) {
-    if (branch < 10) return nums.substr(branch, 1);
-    else return unsignedToDecimal(branch / 10) + nums.substr(branch % 10,1);
-}
-
-//Add two strings representing decimal values
-std::string decimalAdd(std::string a, std::string b) {
-    std::string o = a;
-    while (b.length() < a.length()) b = "0" + b;
-    while (o.length() < b.length()) o = "0" + o;
-    bool carry = false;
-    for (int i = o.length() - 1; i >= 0; i--) {
-        o[i] = o[i] + b[i] - '0';
-        if (carry) o[i]++;
-        if (o[i] > '9') {
-            o[i] -= 10;
-            carry = true;
-        }
-        else carry = false;
-    }
-    if (carry) o = "1" + o;
-    return o;
-}
-
-//Helper function for decimalMul
-std::string decimalDigitMul(std::string a, int dig) {
-    if (dig == 0) return "0";
-    else return decimalAdd(a, decimalDigitMul(a, dig - 1));
-}
-
-//Multiply two strings representing decimal values
-std::string decimalMul(std::string a, std::string b) {
-    std::string o = "0";
-	for (unsigned i = 0; i < b.length(); i++) {
-        std::string n = decimalDigitMul(a, b[i] - '0');
-        if (n != "0") {
-			for (unsigned j = i + 1; j < b.length(); j++) n += "0";
-        }
-        o = decimalAdd(o, n);
-    }
-    return o;
-}
-
-//Modexp
-std::string decimalModExp(std::string b, std::string e, std::string m) {
-    if (e == "0") return "1";
-    else if (e == "1") return b;
-    else if (decimalMod(e, "2") == "0") {
-        std::string o = decimalModExp(b, decimalDiv(e, "2"), m);
-        return decimalMod(decimalMul(o, o), m);
-    }
-    else {
-        std::string o = decimalModExp(b, decimalDiv(e, "2"), m);
-        return decimalMod(decimalMul(decimalMul(o, o), b), m);
-    }
-}
-
-//Is a greater than b? Flag allows equality
-bool decimalGt(std::string a, std::string b, bool eqAllowed) {
-    if (a == b) return eqAllowed;
-    return (a.length() > b.length()) || (a.length() >= b.length() && a > b);
-}
-
-//Subtract the two strings representing decimal values
-std::string decimalSub(std::string a, std::string b) {
-    if (b == "0") return a;
-    if (b == a) return "0";
-    while (b.length() < a.length()) b = "0" + b;
-    std::string c = b;
-	for (unsigned i = 0; i < c.length(); i++) c[i] = '0' + ('9' - c[i]);
-    std::string o = decimalAdd(decimalAdd(a, c).substr(1), "1");
-    while (o.size() > 1 && o[0] == '0') o = o.substr(1);
-    return o;
-}
-
-//Divide the two strings representing decimal values
-std::string decimalDiv(std::string a, std::string b) {
-    std::string c = b;
-    if (decimalGt(c, a)) return "0";
-    int zeroes = -1;
-    while (decimalGt(a, c, true)) {
-        zeroes += 1;
-        c = c + "0";
-    }
-    c = c.substr(0, c.size() - 1);
-    std::string quot = "0";
-    while (decimalGt(a, c, true)) {
-        a = decimalSub(a, c);
-        quot = decimalAdd(quot, "1");
-    }
-    for (int i = 0; i < zeroes; i++) quot += "0";
-    return decimalAdd(quot, decimalDiv(a, b));
-}
-
-//Modulo the two strings representing decimal values
-std::string decimalMod(std::string a, std::string b) {
-    return decimalSub(a, decimalMul(decimalDiv(a, b), b));
-}
-
-//String to int conversion
-unsigned decimalToUnsigned(std::string a) {
-    if (a.size() == 0) return 0;
-    else return (a[a.size() - 1] - '0') 
-        + decimalToUnsigned(a.substr(0,a.size()-1)) * 10;
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/bignum.h b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/bignum.h
deleted file mode 100644
index 99571acd21cdb878e9d7283a077dcfd2bb1f8239..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/bignum.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef ETHSERP_BIGNUM
-#define ETHSERP_BIGNUM
-
-const std::string nums = "0123456789";
-
-const std::string tt256 = 
-"115792089237316195423570985008687907853269984665640564039457584007913129639936"
-;
-
-const std::string tt256m1 = 
-"115792089237316195423570985008687907853269984665640564039457584007913129639935"
-;
-
-const std::string tt255 =
-"57896044618658097711785492504343953926634992332820282019728792003956564819968";
-
-const std::string tt176 =
-"95780971304118053647396689196894323976171195136475136";
-
-std::string unsignedToDecimal(unsigned branch);
-
-std::string decimalAdd(std::string a, std::string b);
-
-std::string decimalMul(std::string a, std::string b);
-
-std::string decimalSub(std::string a, std::string b);
-
-std::string decimalDiv(std::string a, std::string b);
-
-std::string decimalMod(std::string a, std::string b);
-
-std::string decimalModExp(std::string b, std::string e, std::string m);
-
-bool decimalGt(std::string a, std::string b, bool eqAllowed=false);
-
-unsigned decimalToUnsigned(std::string a);
-
-#define utd unsignedToDecimal
-#define dtu decimalToUnsigned
-
-#endif
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/cmdline.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/cmdline.cpp
deleted file mode 100644
index fe25608303c1526ffdda8fd992d5fda86f35c0c4..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/cmdline.cpp
+++ /dev/null
@@ -1,132 +0,0 @@
-#include <stdio.h>
-#include <string>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "funcs.h"
-
-int main(int argv, char** argc) {
-    if (argv == 1) {
-        std::cerr << "Must provide a command and arguments! Try parse, rewrite, compile, assemble\n";
-        return 0;
-    }
-    if (argv == 2 && std::string(argc[1]) == "--help" || std::string(argc[1]) == "-h" ) {
-        std::cout << argc[1] << "\n";
-        
-        std::cout << "serpent command input\n";
-        std::cout << "where input -s for from stdin, a file, or interpreted as serpent code if does not exist as file.";
-        std::cout << "where command: \n";
-        std::cout << " parse:          Just parses and returns s-expression code.\n";
-        std::cout << " rewrite:        Parse, use rewrite rules print s-expressions of result.\n";
-        std::cout << " compile:        Return resulting compiled EVM code in hex.\n";
-        std::cout << " assemble:       Return result from step before compilation.\n";
-        return 0;
-    }
-        
-    std::string flag = "";
-    std::string command = argc[1];
-    std::string input;
-    std::string secondInput;
-    if (std::string(argc[1]) == "-s") {
-        flag = command.substr(1);
-        command = argc[2];
-        input = "";
-        std::string line;
-        while (std::getline(std::cin, line)) {
-            input += line + "\n";
-        }
-        secondInput = argv == 3 ? "" : argc[3];
-    }
-    else {
-        if (argv == 2) {
-            std::cerr << "Not enough arguments for serpent cmdline\n";
-            throw(0);
-        }
-        input = argc[2];
-        secondInput = argv == 3 ? "" : argc[3];
-    }
-    bool haveSec = secondInput.length() > 0;
-    if (command == "parse" || command == "parse_serpent") {
-        std::cout << printAST(parseSerpent(input), haveSec) << "\n";
-    }
-    else if (command == "rewrite") {
-        std::cout << printAST(rewrite(parseLLL(input, true)), haveSec) << "\n";
-    }
-    else if (command == "compile_to_lll") {
-        std::cout << printAST(compileToLLL(input), haveSec) << "\n";
-    }
-    else if (command == "rewrite_chunk") {
-        std::cout << printAST(rewriteChunk(parseLLL(input, true)), haveSec) << "\n";
-    }
-    else if (command == "compile_chunk_to_lll") {
-        std::cout << printAST(compileChunkToLLL(input), haveSec) << "\n";
-    }
-    else if (command == "build_fragtree") {
-        std::cout << printAST(buildFragmentTree(parseLLL(input, true))) << "\n";
-    }
-    else if (command == "compile_lll") {
-        std::cout << binToHex(compileLLL(parseLLL(input, true))) << "\n";
-    }
-    else if (command == "dereference") {
-        std::cout << printAST(dereference(parseLLL(input, true)), haveSec) <<"\n";
-    }
-    else if (command == "pretty_assemble") {
-        std::cout << printTokens(prettyAssemble(parseLLL(input, true))) <<"\n";
-    }
-    else if (command == "pretty_compile_lll") {
-        std::cout << printTokens(prettyCompileLLL(parseLLL(input, true))) << "\n";
-    }
-    else if (command == "pretty_compile") {
-        std::cout << printTokens(prettyCompile(input)) << "\n";
-    }
-    else if (command == "pretty_compile_chunk") {
-        std::cout << printTokens(prettyCompileChunk(input)) << "\n";
-    }
-    else if (command == "assemble") {
-        std::cout << assemble(parseLLL(input, true)) << "\n";
-    }
-    else if (command == "serialize") {
-        std::cout << binToHex(serialize(tokenize(input, Metadata(), false))) << "\n";
-    }
-    else if (command == "flatten") {
-        std::cout << printTokens(flatten(parseLLL(input, true))) << "\n";
-    }
-    else if (command == "deserialize") {
-        std::cout << printTokens(deserialize(hexToBin(input))) << "\n";
-    }
-    else if (command == "compile") {
-        std::cout << binToHex(compile(input)) << "\n";
-    }
-    else if (command == "compile_chunk") {
-        std::cout << binToHex(compileChunk(input)) << "\n";
-    }
-    else if (command == "encode_datalist") {
-        std::vector<Node> tokens = tokenize(input);
-        std::vector<std::string> o;
-        for (int i = 0; i < (int)tokens.size(); i++) {
-            o.push_back(tokens[i].val);
-        }
-        std::cout << binToHex(encodeDatalist(o)) << "\n";
-    }
-    else if (command == "decode_datalist") {
-        std::vector<std::string> o = decodeDatalist(hexToBin(input));
-        std::vector<Node> tokens;
-        for (int i = 0; i < (int)o.size(); i++)
-            tokens.push_back(token(o[i]));
-        std::cout << printTokens(tokens) << "\n";
-    }
-    else if (command == "tokenize") {
-        std::cout << printTokens(tokenize(input));
-    }
-    else if (command == "biject") {
-        if (argv == 3)
-             std::cerr << "Not enough arguments for biject\n";
-        int pos = decimalToUnsigned(secondInput);
-        std::vector<Node> n = prettyCompile(input);
-        if (pos >= (int)n.size())
-             std::cerr << "Code position too high\n";
-        Metadata m = n[pos].metadata;
-        std::cout << "Opcode: " << n[pos].val << ", file: " << m.file << 
-             ", line: " << m.ln << ", char: " << m.ch << "\n";
-    }
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/compiler.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/compiler.cpp
deleted file mode 100644
index b9281dcbcf58060385cc90b5c618d97fbdf74c57..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/compiler.cpp
+++ /dev/null
@@ -1,554 +0,0 @@
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-#include "bignum.h"
-#include "opcodes.h"
-
-struct programAux {
-    std::map<std::string, std::string> vars;
-    int nextVarMem;
-    bool allocUsed;
-    bool calldataUsed;
-    int step;
-    int labelLength;
-};
-
-struct programVerticalAux {
-    int height;
-    std::string innerScopeName;
-    std::map<std::string, int> dupvars;
-    std::map<std::string, int> funvars;
-    std::vector<mss> scopes;
-};
-
-struct programData {
-    programAux aux;
-    Node code;
-    int outs;
-};
-
-programAux Aux() {
-    programAux o;
-    o.allocUsed = false;
-    o.calldataUsed = false;
-    o.step = 0;
-    o.nextVarMem = 32;
-    return o;
-}
-
-programVerticalAux verticalAux() {
-    programVerticalAux o;
-    o.height = 0;
-    o.dupvars = std::map<std::string, int>();
-    o.funvars = std::map<std::string, int>();
-    o.scopes = std::vector<mss>();
-    return o;
-}
-
-programData pd(programAux aux = Aux(), Node code=token("_"), int outs=0) {
-    programData o;
-    o.aux = aux;
-    o.code = code;
-    o.outs = outs;
-    return o;
-}
-
-Node multiToken(Node nodes[], int len, Metadata met) {
-    std::vector<Node> out;
-    for (int i = 0; i < len; i++) {
-        out.push_back(nodes[i]);
-    }
-    return astnode("_", out, met);
-}
-
-Node finalize(programData c);
-
-Node popwrap(Node node) {
-    Node nodelist[] = {
-        node,
-        token("POP", node.metadata)
-    };
-    return multiToken(nodelist, 2, node.metadata);
-}
-
-// Grabs variables
-mss getVariables(Node node, mss cur=mss()) {
-    Metadata m = node.metadata;
-    // Tokens don't contain any variables
-    if (node.type == TOKEN)
-        return cur;
-    // Don't descend into call fragments
-    else if (node.val == "lll")
-        return getVariables(node.args[1], cur);
-    // At global scope get/set/ref also declare    
-    else if (node.val == "get" || node.val == "set" || node.val == "ref") {
-        if (node.args[0].type != TOKEN)
-            err("Variable name must be simple token,"
-                " not complex expression!", m);
-        if (!cur.count(node.args[0].val)) {
-            cur[node.args[0].val] = utd(cur.size() * 32 + 32);
-            //std::cerr << node.args[0].val << " " << cur[node.args[0].val] << "\n";
-        }
-    }
-    // Recursively process children
-    for (unsigned i = 0; i < node.args.size(); i++) {
-        cur = getVariables(node.args[i], cur);
-    }
-    return cur;
-}
-
-// Turns LLL tree into tree of code fragments
-programData opcodeify(Node node,
-                      programAux aux=Aux(),
-                      programVerticalAux vaux=verticalAux()) {
-    std::string symb = "_"+mkUniqueToken();
-    Metadata m = node.metadata;
-    // Get variables
-    if (!aux.vars.size()) {
-        aux.vars = getVariables(node);
-        aux.nextVarMem = aux.vars.size() * 32 + 32;
-    }
-    // Numbers
-    if (node.type == TOKEN) {
-        return pd(aux, nodeToNumeric(node), 1);
-    }
-    else if (node.val == "ref" || node.val == "get" || node.val == "set") {
-        std::string varname = node.args[0].val;
-        // Determine reference to variable
-        Node varNode = tkn(aux.vars[varname], m);
-        //std::cerr << varname << " " << printSimple(varNode) << "\n";
-        // Set variable
-        if (node.val == "set") {
-            programData sub = opcodeify(node.args[1], aux, vaux);
-            if (!sub.outs)
-                err("Value to set variable must have nonzero arity!", m);
-            // What if we are setting a stack variable?
-            if (vaux.dupvars.count(node.args[0].val)) {
-                int h = vaux.height - vaux.dupvars[node.args[0].val];
-                if (h > 16) err("Too deep for stack variable (max 16)", m);
-                Node nodelist[] = {
-                    sub.code,
-                    token("SWAP"+unsignedToDecimal(h), m),
-                    token("POP", m)
-                };
-                return pd(sub.aux, multiToken(nodelist, 3, m), 0);                   
-            }
-            // Setting a memory variable
-            else {
-                Node nodelist[] = {
-                    sub.code,
-                    varNode,
-                    token("MSTORE", m),
-                };
-                return pd(sub.aux, multiToken(nodelist, 3, m), 0);                   
-            }
-        }
-        // Get variable
-        else if (node.val == "get") {
-            // Getting a stack variable
-            if (vaux.dupvars.count(node.args[0].val)) {
-                 int h = vaux.height - vaux.dupvars[node.args[0].val];
-                if (h > 16) err("Too deep for stack variable (max 16)", m);
-                return pd(aux, token("DUP"+unsignedToDecimal(h)), 1);                   
-            }
-            // Getting a memory variable
-            else {
-                Node nodelist[] = 
-                     { varNode, token("MLOAD", m) };
-                return pd(aux, multiToken(nodelist, 2, m), 1);
-            }
-        }
-        // Refer variable
-        else if (node.val == "ref") {
-            if (vaux.dupvars.count(node.args[0].val))
-                err("Cannot ref stack variable!", m);
-            return pd(aux, varNode, 1);
-        }
-    }
-    // Comments do nothing
-    else if (node.val == "comment") {
-        Node nodelist[] = { };
-        return pd(aux, multiToken(nodelist, 0, m), 0);
-    }
-    // Custom operation sequence
-    // eg. (ops bytez id msize swap1 msize add 0 swap1 mstore) == alloc
-    if (node.val == "ops") {
-        std::vector<Node>  subs2;
-        int depth = 0;
-        for (unsigned i = 0; i < node.args.size(); i++) {
-            std::string op = upperCase(node.args[i].val);
-            if (node.args[i].type == ASTNODE || opinputs(op) == -1) {
-                programVerticalAux vaux2 = vaux;
-                vaux2.height = vaux.height - i - 1 + node.args.size();
-                programData sub = opcodeify(node.args[i], aux, vaux2);
-                aux = sub.aux;
-                depth += sub.outs;
-                subs2.push_back(sub.code);
-            }
-            else {
-                subs2.push_back(token(op, m));
-                depth += opoutputs(op) - opinputs(op);
-            }
-        }
-        if (depth < 0 || depth > 1) err("Stack depth mismatch", m);
-        return pd(aux, astnode("_", subs2, m), 0);
-    }
-    // Code blocks
-    if (node.val == "lll" && node.args.size() == 2) {
-        if (node.args[1].val != "0") aux.allocUsed = true;
-        std::vector<Node> o;
-        o.push_back(finalize(opcodeify(node.args[0])));
-        programData sub = opcodeify(node.args[1], aux, vaux);
-        Node code = astnode("____CODE", o, m);
-        Node nodelist[] = {
-            token("$begincode"+symb+".endcode"+symb, m), token("DUP1", m),
-            token("$begincode"+symb, m), sub.code, token("CODECOPY", m),
-            token("$endcode"+symb, m), token("JUMP", m),
-            token("~begincode"+symb, m), code, 
-            token("~endcode"+symb, m), token("JUMPDEST", m)
-        };
-        return pd(sub.aux, multiToken(nodelist, 11, m), 1);
-    }
-    // Stack variables
-    if (node.val == "with") {
-        programData initial = opcodeify(node.args[1], aux, vaux);
-        programVerticalAux vaux2 = vaux;
-        vaux2.dupvars[node.args[0].val] = vaux.height;
-        vaux2.height += 1;
-        if (!initial.outs)
-            err("Initial variable value must have nonzero arity!", m);
-        programData sub = opcodeify(node.args[2], initial.aux, vaux2);
-        Node nodelist[] = {
-            initial.code,
-            sub.code
-        };
-        programData o = pd(sub.aux, multiToken(nodelist, 2, m), sub.outs);
-        if (sub.outs)
-            o.code.args.push_back(token("SWAP1", m));
-        o.code.args.push_back(token("POP", m));
-        return o;
-    }
-    // Seq of multiple statements
-    if (node.val == "seq") {
-        std::vector<Node> children;
-        int lastOut = 0;
-        for (unsigned i = 0; i < node.args.size(); i++) {
-            programData sub = opcodeify(node.args[i], aux, vaux);
-            aux = sub.aux;
-            if (sub.outs == 1) {
-                if (i < node.args.size() - 1) sub.code = popwrap(sub.code);
-                else lastOut = 1;
-            }
-            children.push_back(sub.code);
-        }
-        return pd(aux, astnode("_", children, m), lastOut);
-    }
-    // 2-part conditional (if gets rewritten to unless in rewrites)
-    else if (node.val == "unless" && node.args.size() == 2) {
-        programData cond = opcodeify(node.args[0], aux, vaux);
-        programData action = opcodeify(node.args[1], cond.aux, vaux);
-        aux = action.aux;
-        if (!cond.outs) err("Condition of if/unless statement has arity 0", m);
-        if (action.outs) action.code = popwrap(action.code);
-        Node nodelist[] = {
-            cond.code,
-            token("$endif"+symb, m), token("JUMPI", m),
-            action.code,
-            token("~endif"+symb, m), token("JUMPDEST", m)
-        };
-        return pd(aux, multiToken(nodelist, 6, m), 0);
-    }
-    // 3-part conditional
-    else if (node.val == "if" && node.args.size() == 3) {
-        programData ifd = opcodeify(node.args[0], aux, vaux);
-        programData thend = opcodeify(node.args[1], ifd.aux, vaux);
-        programData elsed = opcodeify(node.args[2], thend.aux, vaux);
-        aux = elsed.aux;
-        if (!ifd.outs)
-            err("Condition of if/unless statement has arity 0", m);
-        // Handle cases where one conditional outputs something
-        // and the other does not
-        int outs = (thend.outs && elsed.outs) ? 1 : 0;
-        if (thend.outs > outs) thend.code = popwrap(thend.code);
-        if (elsed.outs > outs) elsed.code = popwrap(elsed.code);
-        Node nodelist[] = {
-            ifd.code,
-            token("ISZERO", m),
-            token("$else"+symb, m), token("JUMPI", m),
-            thend.code,
-            token("$endif"+symb, m), token("JUMP", m),
-            token("~else"+symb, m), token("JUMPDEST", m),
-            elsed.code,
-            token("~endif"+symb, m), token("JUMPDEST", m)
-        };
-        return pd(aux, multiToken(nodelist, 12, m), outs);
-    }
-    // While (rewritten to this in rewrites)
-    else if (node.val == "until") {
-        programData cond = opcodeify(node.args[0], aux, vaux);
-        programData action = opcodeify(node.args[1], cond.aux, vaux);
-        aux = action.aux;
-        if (!cond.outs)
-            err("Condition of while/until loop has arity 0", m);
-        if (action.outs) action.code = popwrap(action.code);
-        Node nodelist[] = {
-            token("~beg"+symb, m), token("JUMPDEST", m),
-            cond.code,
-            token("$end"+symb, m), token("JUMPI", m),
-            action.code,
-            token("$beg"+symb, m), token("JUMP", m),
-            token("~end"+symb, m), token("JUMPDEST", m),
-        };
-        return pd(aux, multiToken(nodelist, 10, m));
-    }
-    // Memory allocations
-    else if (node.val == "alloc") {
-        programData bytez = opcodeify(node.args[0], aux, vaux);
-        aux = bytez.aux;
-        if (!bytez.outs)
-            err("Alloc input has arity 0", m);
-        aux.allocUsed = true;
-        Node nodelist[] = {
-            bytez.code,
-            token("MSIZE", m), token("SWAP1", m), token("MSIZE", m),
-            token("ADD", m), 
-            token("0", m), token("SWAP1", m), token("MSTORE", m)
-        };
-        return pd(aux, multiToken(nodelist, 8, m), 1);
-    }
-    // All other functions/operators
-    else {
-        std::vector<Node>  subs2;
-        int depth = opinputs(upperCase(node.val));
-        if (depth == -1)
-            err("Not a function or opcode: "+node.val, m);
-        if ((int)node.args.size() != depth)
-            err("Invalid arity for "+node.val, m);
-        for (int i = node.args.size() - 1; i >= 0; i--) {
-            programVerticalAux vaux2 = vaux;
-            vaux2.height = vaux.height - i - 1 + node.args.size();
-            programData sub = opcodeify(node.args[i], aux, vaux2);
-            aux = sub.aux;
-            if (!sub.outs)
-                err("Input "+unsignedToDecimal(i)+" has arity 0", sub.code.metadata);
-            subs2.push_back(sub.code);
-        }
-        subs2.push_back(token(upperCase(node.val), m));
-        int outdepth = opoutputs(upperCase(node.val));
-        return pd(aux, astnode("_", subs2, m), outdepth);
-    }
-}
-
-// Adds necessary wrappers to a program
-Node finalize(programData c) {
-    std::vector<Node> bottom;
-    Metadata m = c.code.metadata;
-    // If we are using both alloc and variables, we need to pre-zfill
-    // some memory
-    if ((c.aux.allocUsed || c.aux.calldataUsed) && c.aux.vars.size() > 0) {
-        Node nodelist[] = {
-            token("0", m), 
-            token(unsignedToDecimal(c.aux.nextVarMem - 1)),
-            token("MSTORE8", m)
-        };
-        bottom.push_back(multiToken(nodelist, 3, m));
-    }
-    // The actual code
-    bottom.push_back(c.code);
-    return astnode("_", bottom, m);
-}
-
-//LLL -> code fragment tree
-Node buildFragmentTree(Node node) {
-    return finalize(opcodeify(node));
-}
-
-
-// Builds a dictionary mapping labels to variable names
-programAux buildDict(Node program, programAux aux, int labelLength) {
-    Metadata m = program.metadata;
-    // Token
-    if (program.type == TOKEN) {
-        if (isNumberLike(program)) {
-            aux.step += 1 + toByteArr(program.val, m).size();
-        }
-        else if (program.val[0] == '~') {
-            aux.vars[program.val.substr(1)] = unsignedToDecimal(aux.step);
-        }
-        else if (program.val[0] == '$') {
-            aux.step += labelLength + 1;
-        }
-        else aux.step += 1;
-    }
-    // A sub-program (ie. LLL)
-    else if (program.val == "____CODE") {
-        programAux auks = Aux();
-        for (unsigned i = 0; i < program.args.size(); i++) {
-            auks = buildDict(program.args[i], auks, labelLength);
-        }
-        for (std::map<std::string,std::string>::iterator it=auks.vars.begin();
-             it != auks.vars.end();
-             it++) {
-            aux.vars[(*it).first] = (*it).second;
-        }
-        aux.step += auks.step;
-    }
-    // Normal sub-block
-    else {
-        for (unsigned i = 0; i < program.args.size(); i++) {
-            aux = buildDict(program.args[i], aux, labelLength);
-        }
-    }
-    return aux;
-}
-
-// Applies that dictionary
-Node substDict(Node program, programAux aux, int labelLength) {
-    Metadata m = program.metadata;
-    std::vector<Node> out;
-    std::vector<Node> inner;
-    if (program.type == TOKEN) {
-        if (program.val[0] == '$') {
-            std::string tokStr = "PUSH"+unsignedToDecimal(labelLength);
-            out.push_back(token(tokStr, m));
-            int dotLoc = program.val.find('.');
-            if (dotLoc == -1) {
-                std::string val = aux.vars[program.val.substr(1)];
-                inner = toByteArr(val, m, labelLength);
-            }
-            else {
-                std::string start = aux.vars[program.val.substr(1, dotLoc-1)],
-                            end = aux.vars[program.val.substr(dotLoc + 1)],
-                            dist = decimalSub(end, start);
-                inner = toByteArr(dist, m, labelLength);
-            }
-            out.push_back(astnode("_", inner, m));
-        }
-        else if (program.val[0] == '~') { }
-        else if (isNumberLike(program)) {
-            inner = toByteArr(program.val, m);
-            out.push_back(token("PUSH"+unsignedToDecimal(inner.size())));
-            out.push_back(astnode("_", inner, m));
-        }
-        else return program;
-    }
-    else {
-        for (unsigned i = 0; i < program.args.size(); i++) {
-            Node n = substDict(program.args[i], aux, labelLength);
-            if (n.type == TOKEN || n.args.size()) out.push_back(n);
-        }
-    }
-    return astnode("_", out, m);
-}
-
-// Compiled fragtree -> compiled fragtree without labels
-Node dereference(Node program) {
-    int sz = treeSize(program) * 4;
-    int labelLength = 1;
-    while (sz >= 256) { labelLength += 1; sz /= 256; }
-    programAux aux = buildDict(program, Aux(), labelLength);
-    return substDict(program, aux, labelLength);
-}
-
-// Dereferenced fragtree -> opcodes
-std::vector<Node> flatten(Node derefed) {
-    std::vector<Node> o;
-    if (derefed.type == TOKEN) {
-        o.push_back(derefed);
-    }
-    else {
-        for (unsigned i = 0; i < derefed.args.size(); i++) {
-            std::vector<Node> oprime = flatten(derefed.args[i]);
-            for (unsigned j = 0; j < oprime.size(); j++) o.push_back(oprime[j]);
-        }
-    }
-    return o;
-}
-
-// Opcodes -> bin
-std::string serialize(std::vector<Node> codons) {
-    std::string o;
-    for (unsigned i = 0; i < codons.size(); i++) {
-        int v;
-        if (isNumberLike(codons[i])) {
-            v = decimalToUnsigned(codons[i].val);
-        }
-        else if (codons[i].val.substr(0,4) == "PUSH") {
-            v = 95 + decimalToUnsigned(codons[i].val.substr(4));
-        }
-        else {
-            v = opcode(codons[i].val);
-        }
-        o += (char)v;
-    }
-    return o;
-}
-
-// Bin -> opcodes
-std::vector<Node> deserialize(std::string ser) {
-    std::vector<Node> o;
-    int backCount = 0;
-    for (unsigned i = 0; i < ser.length(); i++) {
-        unsigned char v = (unsigned char)ser[i];
-        std::string oper = op((int)v);
-        if (oper != "" && backCount <= 0) o.push_back(token(oper));
-        else if (v >= 96 && v < 128 && backCount <= 0) {
-            o.push_back(token("PUSH"+unsignedToDecimal(v - 95)));
-        }
-        else o.push_back(token(unsignedToDecimal(v)));
-        if (v >= 96 && v < 128 && backCount <= 0) {
-            backCount = v - 95;
-        }
-        else backCount--;
-    }
-    return o;
-}
-
-// Fragtree -> bin
-std::string assemble(Node fragTree) {
-    return serialize(flatten(dereference(fragTree)));
-}
-
-// Fragtree -> tokens
-std::vector<Node> prettyAssemble(Node fragTree) {
-    return flatten(dereference(fragTree));
-}
-
-// LLL -> bin
-std::string compileLLL(Node program) {
-    return assemble(buildFragmentTree(program));
-}
-
-// LLL -> tokens
-std::vector<Node> prettyCompileLLL(Node program) {
-    return prettyAssemble(buildFragmentTree(program));
-}
-
-// Converts a list of integer values to binary transaction data
-std::string encodeDatalist(std::vector<std::string> vals) {
-    std::string o;
-    for (unsigned i = 0; i < vals.size(); i++) {
-        std::vector<Node> n = toByteArr(strToNumeric(vals[i]), Metadata(), 32);
-        for (unsigned j = 0; j < n.size(); j++) {
-            int v = decimalToUnsigned(n[j].val);
-            o += (char)v;
-        }
-    }
-    return o;
-}
-
-// Converts binary transaction data into a list of integer values
-std::vector<std::string> decodeDatalist(std::string ser) {
-    std::vector<std::string> out;
-    for (unsigned i = 0; i < ser.length(); i+= 32) {
-        std::string o = "0";
-		for (unsigned j = i; j < i + 32; j++) {
-            int vj = (int)(unsigned char)ser[j];
-            o = decimalAdd(decimalMul(o, "256"), unsignedToDecimal(vj));
-        }
-        out.push_back(o);
-    }
-    return out;
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/compiler.h b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/compiler.h
deleted file mode 100644
index aecaa371819537a6d080dd555639e007ccceba3f..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/compiler.h
+++ /dev/null
@@ -1,43 +0,0 @@
-#ifndef ETHSERP_COMPILER
-#define ETHSERP_COMPILER
-
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-
-// Compiled fragtree -> compiled fragtree without labels
-Node dereference(Node program);
-
-// LLL -> fragtree
-Node buildFragmentTree(Node program);
-
-// Dereferenced fragtree -> opcodes
-std::vector<Node> flatten(Node derefed);
-
-// opcodes -> bin
-std::string serialize(std::vector<Node> codons);
-
-// Fragtree -> bin
-std::string assemble(Node fragTree);
-
-// Fragtree -> opcodes
-std::vector<Node> prettyAssemble(Node fragTree);
-
-// LLL -> bin
-std::string compileLLL(Node program);
-
-// LLL -> opcodes
-std::vector<Node> prettyCompileLLL(Node program);
-
-// bin -> opcodes
-std::vector<Node> deserialize(std::string ser);
-
-// Converts a list of integer values to binary transaction data
-std::string encodeDatalist(std::vector<std::string> vals);
-
-// Converts binary transaction data into a list of integer values
-std::vector<std::string> decodeDatalist(std::string ser);
-
-#endif
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/example.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/example.cpp
deleted file mode 100644
index 1ce2590d0666f72e245d8e87610567409f5b6df7..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/example.cpp
+++ /dev/null
@@ -1,11 +0,0 @@
-#include <libserpent/funcs.h>
-#include <libserpent/bignum.h>
-#include <iostream>
-
-using namespace std;
-
-int main() {
-	cout << printAST(compileToLLL(get_file_contents("examples/namecoin.se"))) << "\n";
-    cout << decimalSub("10234", "10234") << "\n";
-    cout << decimalSub("10234", "10233") << "\n";
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/collatz.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/collatz.se
deleted file mode 100644
index 148b47b59c3d4236750f9210e26703e6e07ae267..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/collatz.se
+++ /dev/null
@@ -1,11 +0,0 @@
-x = msg.data[0]
-steps = 0
-
-while x > 1:
-    steps += 1
-    if (x % 2) == 0:
-        x /= 2
-    else:
-        x = 3 * x + 1
-
-return(steps)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/counterparty/counterparty.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/counterparty/counterparty.se
deleted file mode 100644
index abec0d102c8ba138f29de4c3813d50d380fda401..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/counterparty/counterparty.se
+++ /dev/null
@@ -1,274 +0,0 @@
-# Ethereum forks Counterparty in 340 lines of serpent
-# Not yet tested
-
-# assets[i] = a registered asset, assets[i].holders[j] = former or current i-holder
-data assets[2^50](creator, name, calldate, callprice, dividend_paid, holders[2^50], holdersCount)
-data nextAssetId
-
-# holdersMap: holdersMap[addr][asset] = 1 if addr holds asset
-data holdersMap[2^160][2^50]
-
-# balances[x][y] = how much of y x holds
-data balances[2^160][2^50]
-
-# orders[a][b] = heap of indices to (c, d, e) 
-# = c offers to sell d units of a at a price of e units of b per 10^18 units
-# of a
-data orderbooks[2^50][2^50]
-
-# store of general order data
-data orders[2^50](seller, asset_sold, quantity, price)
-data ordersCount
-
-# data feeds
-data feeds[2^50](owner, value)
-data feedCount
-
-# heap
-data heap
-extern heap: [register, push, pop, top, size]
-
-data cfds[2^50](maker, acceptor, feed, asset, strike, leverage, min, max, maturity)
-data cfdCount
-
-data bets[2^50](maker, acceptor, feed, asset, makerstake, acceptorstake, eqtest, maturity)
-data betCount
-
-def init():
-    heap = create('heap.se')
-
-# Add units (internal method)
-def add(to, asset, value):
-    assert msg.sender == self
-    self.balances[to][asset] += value
-    # Add the holder to the holders list
-    if not self.holdersMap[to][asset]:
-        self.holdersMap[to][asset] = 1
-        c = self.assets[asset].holdersCount
-        self.assets[asset].holders[c] = to
-        self.assets[asset].holdersCount = c + 1
-
-# Register a new asset
-def register_asset(q, name, calldate, callprice): 
-    newid = self.nextAssetId
-    self.assets[newid].creator = msg.sender
-    self.assets[newid].name = name
-    self.assets[newid].calldate = calldate
-    self.assets[newid].callprice = callprice
-    self.assets[newid].holders[0] = msg.sender
-    self.assets[newid].holdersCount = 1
-    self.balances[msg.sender][newid] = q
-    self.holdersMap[msg.sender][newid] = 1
-
-# Send
-def send(to, asset, value):
-    fromval = self.balances[msg.sender][asset]
-    if fromval >= value:
-        self.balances[msg.sender][asset] -= value
-        self.add(to, asset, value)
-
-# Order
-def mkorder(selling, buying, quantity, price):
-    # Make sure you have enough to pay for the order
-    assert self.balances[msg.sender][selling] >= quantity:
-    # Try to match existing orders
-    o = orderbooks[buying][selling]
-    if not o:
-        o = self.heap.register()
-        orderbooks[selling][buying] = o
-    sz = self.heap.size(o)
-    invprice = 10^36 / price
-    while quantity > 0 and sz > 0: 
-        orderid = self.heap.pop()
-        p = self.orders[orderid].price
-        if p > invprice:
-            sz = 0
-        else:
-            q = self.orders[orderid].quantity
-            oq = min(q, quantity)
-            b = self.orders[orderid].seller
-            self.balances[msg.sender][selling] -= oq * p / 10^18
-            self.add(msg.sender, buying, oq)
-            self.add(b, selling, oq * p / 10^18)
-            self.orders[orderid].quantity = q - oq
-            if oq == q:
-                self.orders[orderid].seller = 0
-                self.orders[orderid].price = 0
-                self.orders[orderid].asset_sold = 0
-            quantity -= oq
-            sz -= 1
-    assert quantity > 0
-    # Make the order
-    c = self.ordersCount
-    self.orders[c].seller = msg.sender
-    self.orders[c].asset_sold = selling
-    self.orders[c].quantity = quantity
-    self.orders[c].price = price
-    self.ordersCount += 1
-    # Add it to the heap
-    o = orderbooks[selling][buying]
-    if not o:
-        o = self.heap.register()
-        orderbooks[selling][buying] = o
-    self.balances[msg.sender][selling] -= quantity
-    self.heap.push(o, price, c)
-    return(c)
-
-def cancel_order(id):
-    if self.orders[id].seller == msg.sender:
-        self.orders[id].seller = 0
-        self.orders[id].price = 0
-        self.balances[msg.sender][self.orders[id].asset_sold] += self.orders[id].quantity
-        self.orders[id].quantity = 0
-        self.orders[id].asset_sold = 0
-
-def register_feed():
-    c = self.feedCount
-    self.feeds[c].owner = msg.sender
-    self.feedCount = c + 1
-    return(c)
-
-def set_feed(id, v):
-    if self.feeds[id].owner == msg.sender:
-        self.feeds[id].value = v
-
-def mk_cfd_offer(feed, asset, strike, leverage, min, max, maturity):
-    b = self.balances[msg.sender][asset]
-    req = max((strike - min) * leverage, (strike - max) * leverage)
-    assert b >= req
-    self.balances[msg.sender][asset] = b - req
-    c = self.cfdCount
-    self.cfds[c].maker = msg.sender
-    self.cfds[c].feed = feed
-    self.cfds[c].asset = asset
-    self.cfds[c].strike = strike
-    self.cfds[c].leverage = leverage
-    self.cfds[c].min = min
-    self.cfds[c].max = max
-    self.cfds[c].maturity = maturity
-    self.cfdCount = c + 1
-    return(c)
-
-def accept_cfd_offer(c):
-    assert not self.cfds[c].acceptor and self.cfds[c].maker
-    asset = self.cfds[c].asset
-    strike = self.cfds[c].strike
-    min = self.cfds[c].min
-    max = self.cfds[c].max
-    leverage = self.cfds[c].leverage
-    b = self.balances[msg.sender][asset]
-    req = max((min - strike) * leverage, (max - strike) * leverage)
-    assert b >= req
-    self.balances[msg.sender][asset] = b - req
-    self.cfds[c].acceptor = msg.sender
-    self.cfds[c].maturity += block.timestamp
-
-def claim_cfd_offer(c):
-    asset = self.cfds[c].asset
-    strike = self.cfds[c].strike
-    min = self.cfds[c].min
-    max = self.cfds[c].max
-    leverage = self.cfds[c].leverage
-    v = self.feeds[self.cfds[c].feed].value
-    assert v <= min or v >= max or block.timestamp >= self.cfds[c].maturity
-    maker_req = max((strike - min) * leverage, (strike - max) * leverage)
-    acceptor_req = max((min - strike) * leverage, (max - strike) * leverage)
-    paydelta = (strike - v) * leverage
-    self.add(self.cfds[c].maker, asset, maker_req + paydelta)
-    self.add(self.cfds[c].acceptor, asset, acceptor_req - paydelta)
-    self.cfds[c].maker = 0
-    self.cfds[c].acceptor = 0
-    self.cfds[c].feed = 0
-    self.cfds[c].asset = 0
-    self.cfds[c].strike = 0
-    self.cfds[c].leverage = 0
-    self.cfds[c].min = 0
-    self.cfds[c].max = 0
-    self.cfds[c].maturity = 0
-
-def withdraw_cfd_offer(c):
-    if self.cfds[c].maker == msg.sender and not self.cfds[c].acceptor:
-        asset = self.cfds[c].asset
-        strike = self.cfds[c].strike
-        min = self.cfds[c].min
-        max = self.cfds[c].max
-        leverage = self.cfds[c].leverage
-        maker_req = max((strike - min) * leverage, (strike - max) * leverage)
-        self.balances[self.cfds[c].maker][asset] += maker_req
-        self.cfds[c].maker = 0
-        self.cfds[c].acceptor = 0
-        self.cfds[c].feed = 0
-        self.cfds[c].asset = 0
-        self.cfds[c].strike = 0
-        self.cfds[c].leverage = 0
-        self.cfds[c].min = 0
-        self.cfds[c].max = 0
-        self.cfds[c].maturity = 0
-        
-
-def mk_bet_offer(feed, asset, makerstake, acceptorstake, eqtest, maturity):
-    assert self.balances[msg.sender][asset] >= makerstake
-    c = self.betCount
-    self.bets[c].maker = msg.sender
-    self.bets[c].feed = feed
-    self.bets[c].asset = asset
-    self.bets[c].makerstake = makerstake
-    self.bets[c].acceptorstake = acceptorstake
-    self.bets[c].eqtest = eqtest
-    self.bets[c].maturity = maturity
-    self.balances[msg.sender][asset] -= makerstake
-    self.betCount = c + 1
-    return(c)
-
-def accept_bet_offer(c):
-    assert self.bets[c].maker and not self.bets[c].acceptor
-    asset = self.bets[c].asset
-    acceptorstake = self.bets[c].acceptorstake
-    assert self.balances[msg.sender][asset] >= acceptorstake
-    self.balances[msg.sender][asset] -= acceptorstake
-    self.bets[c].acceptor = msg.sender
-
-def claim_bet_offer(c):
-    assert block.timestamp >= self.bets[c].maturity
-    v = self.feeds[self.bets[c].feed].value
-    totalstake = self.bets[c].makerstake + self.bets[c].acceptorstake
-    if v == self.bets[c].eqtest:
-        self.add(self.bets[c].maker, self.bets[c].asset, totalstake)
-    else:
-        self.add(self.bets[c].acceptor, self.bets[c].asset, totalstake)
-    self.bets[c].maker = 0
-    self.bets[c].feed = 0
-    self.bets[c].asset = 0
-    self.bets[c].makerstake = 0
-    self.bets[c].acceptorstake = 0
-    self.bets[c].eqtest = 0
-    self.bets[c].maturity = 0
-
-def cancel_bet(c):
-    assert not self.bets[c].acceptor and msg.sender == self.bets[c].maker
-    self.balances[msg.sender][self.bets[c].asset] += self.bets[c].makerstake
-    self.bets[c].maker = 0
-    self.bets[c].feed = 0
-    self.bets[c].asset = 0
-    self.bets[c].makerstake = 0
-    self.bets[c].acceptorstake = 0
-    self.bets[c].eqtest = 0
-    self.bets[c].maturity = 0
-
-def dividend(holder_asset, divvying_asset, ratio):
-    i = 0
-    sz = self.assets[holder_asset].holdersCount
-    t = 0
-    holders = array(sz)
-    payments = array(sz)
-    while i < sz:
-        holders[i] = self.assets[holder_asset].holders[i]
-        payments[i] = self.balances[holders[i]][holder_asset] * ratio / 10^18
-        t += payments[i]
-        i += 1
-    if self.balances[msg.sender][divvying_asset] >= t:
-        i = 0
-        while i < sz:
-            self.add(holders[i], divvying_asset, payments[i])
-            i += 1
-        self.balances[msg.sender][divvying_asset] -= t
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/counterparty/heap.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/counterparty/heap.se
deleted file mode 100644
index 4a43a397417dd44e20707a67c33a4301e636da9f..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/counterparty/heap.se
+++ /dev/null
@@ -1,69 +0,0 @@
-data heaps[2^50](owner, size, nodes[2^50](key, value))
-data heapIndex
-
-def register():
-    i = self.heapIndex
-    self.heaps[i].owner = msg.sender
-    self.heapIndex = i + 1
-    return(i)
-
-def push(heap, key, value):
-    assert msg.sender == self.heaps[heap].owner
-    sz = self.heaps[heap].size
-    self.heaps[heap].nodes[sz].key = key
-    self.heaps[heap].nodes[sz].value = value
-    k = sz + 1
-    while k > 1:
-        bottom = self.heaps[heap].nodes[k].key
-        top = self.heaps[heap].nodes[k/2].key
-        if bottom < top:
-            tvalue = self.heaps[heap].nodes[k/2].value
-            bvalue = self.heaps[heap].nodes[k].value
-            self.heaps[heap].nodes[k].key = top
-            self.heaps[heap].nodes[k].value = tvalue
-            self.heaps[heap].nodes[k/2].key = bottom
-            self.heaps[heap].nodes[k/2].value = bvalue
-            k /= 2
-        else:
-            k = 0
-    self.heaps[heap].size = sz + 1
-
-def pop(heap):
-    sz = self.heaps[heap].size
-    assert sz
-    prevtop = self.heaps[heap].nodes[1].value
-    self.heaps[heap].nodes[1].key = self.heaps[heap].nodes[sz].key
-    self.heaps[heap].nodes[1].value = self.heaps[heap].nodes[sz].value
-    self.heaps[heap].nodes[sz].key = 0
-    self.heaps[heap].nodes[sz].value = 0
-    top = self.heaps[heap].nodes[1].key
-    k = 1
-    while k * 2 < sz:
-        bottom1 = self.heaps[heap].nodes[k * 2].key
-        bottom2 = self.heaps[heap].nodes[k * 2 + 1].key
-        if bottom1 < top and (bottom1 < bottom2 or k * 2 + 1 >= sz):
-            tvalue = self.heaps[heap].nodes[1].value
-            bvalue = self.heaps[heap].nodes[k * 2].value
-            self.heaps[heap].nodes[k].key = bottom1
-            self.heaps[heap].nodes[k].value = bvalue
-            self.heaps[heap].nodes[k * 2].key = top
-            self.heaps[heap].nodes[k * 2].value = tvalue
-            k = k * 2
-        elif bottom2 < top and bottom2 < bottom1 and k * 2 + 1 < sz:
-            tvalue = self.heaps[heap].nodes[1].value
-            bvalue = self.heaps[heap].nodes[k * 2 + 1].value
-            self.heaps[heap].nodes[k].key = bottom2
-            self.heaps[heap].nodes[k].value = bvalue
-            self.heaps[heap].nodes[k * 2 + 1].key = top
-            self.heaps[heap].nodes[k * 2 + 1].value = tvalue
-            k = k * 2 + 1
-        else:
-            k = sz
-    self.heaps[heap].size = sz - 1
-    return(prevtop)
-
-def top(heap):
-    return(self.heaps[heap].nodes[1].value)
-
-def size(heap):
-    return(self.heaps[heap].size)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/crowdfund.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/crowdfund.se
deleted file mode 100644
index 9fd1e0643bbed49cc64f49dd90eab81bf59f8748..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/crowdfund.se
+++ /dev/null
@@ -1,53 +0,0 @@
-data campaigns[2^80](recipient, goal, deadline, contrib_total, contrib_count, contribs[2^50](sender, value))
-
-def create_campaign(id, recipient, goal, timelimit):
-    if self.campaigns[id].recipient:
-        return(0)
-    self.campaigns[id].recipient = recipient
-    self.campaigns[id].goal = goal
-    self.campaigns[id].deadline = block.timestamp + timelimit
-
-def contribute(id):
-    # Update contribution total
-    total_contributed = self.campaigns[id].contrib_total + msg.value
-    self.campaigns[id].contrib_total = total_contributed
-
-    # Record new contribution
-    sub_index = self.campaigns[id].contrib_count
-    self.campaigns[id].contribs[sub_index].sender = msg.sender
-    self.campaigns[id].contribs[sub_index].value = msg.value
-    self.campaigns[id].contrib_count = sub_index + 1
-
-    # Enough funding?
-    if total_contributed >= self.campaigns[id].goal:
-        send(self.campaigns[id].recipient, total_contributed)
-        self.clear(id)
-        return(1)
-
-    # Expired?
-    if block.timestamp > self.campaigns[id].deadline:
-        i = 0
-        c = self.campaigns[id].contrib_count
-        while i < c:
-            send(self.campaigns[id].contribs[i].sender, self.campaigns[id].contribs[i].value)
-            i += 1
-        self.clear(id)
-        return(2)
-
-def progress_report(id):
-    return(self.campaigns[id].contrib_total)
-
-# Clearing function for internal use
-def clear(id):
-    if self == msg.sender:
-        self.campaigns[id].recipient = 0
-        self.campaigns[id].goal = 0
-        self.campaigns[id].deadline = 0
-        c = self.campaigns[id].contrib_count
-        self.campaigns[id].contrib_count = 0
-        self.campaigns[id].contrib_total = 0
-        i = 0
-        while i < c:
-            self.campaigns[id].contribs[i].sender = 0
-            self.campaigns[id].contribs[i].value = 0
-            i += 1
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/futarchy.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/futarchy.se
deleted file mode 100644
index 0d68622ac53bf961fd2d32eda20fbab7874d256f..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/futarchy.se
+++ /dev/null
@@ -1,136 +0,0 @@
-# 0: current epoch
-# 1: number of proposals
-# 2: master currency
-# 3: last winning market
-# 4: last txid
-# 5: long-term ema currency units purchased
-# 6: last block when currency units purchased
-# 7: ether allocated to last round
-# 8: last block when currency units claimed
-# 9: ether allocated to current round
-# 1000+: [proposal address, market ID, totprice, totvolume]
-
-init:
-    # We technically have two levels of epoch here. We have
-    # one epoch of 1000, to synchronize with the 1000 epoch
-    # of the market, and then 100 of those epochs make a
-    # meta-epoch (I'll nominate the term "seculum") over
-    # which the futarchy protocol will take place
-    contract.storage[0] = block.number / 1000
-    # The master currency of the futarchy. The futarchy will
-    # assign currency units to whoever the prediction market
-    # thinks will best increase the currency's value
-    master_currency = create('subcurrency.se')
-    contract.storage[2] = master_currency
-code:
-    curepoch = block.number / 1000
-    prevepoch = contract.storage[0]
-    if curepoch > prevepoch:
-        if (curepoch % 100) > 50:
-            # Collect price data
-            # We take an average over 50 subepochs to determine
-            # the price of each asset, weighting by volume to
-            # prevent abuse
-            contract.storage[0] = curepoch
-            i = 0
-            numprop = contract.storage[1]
-            while i < numprop:
-                market = contract.storage[1001 + i * 4]
-                price = call(market, 2)
-                volume = call(market, 3)
-                contract.storage[1002 + i * 4] += price
-                contract.storage[1003 + i * 4] += volume * price
-                i += 1
-        if (curepoch / 100) > (prevepoch / 100):
-            # If we are entering a new seculum, we determine the
-            # market with the highest total average price
-            best = 0
-            bestmarket = 0
-            besti = 0
-            i = 0
-            while i < numprop:
-                curtotprice = contract.storage[1002 + i * 4]
-                curvolume = contract.storage[1002 + i * 4]
-                curavgprice = curtotprice / curvolume
-                if curavgprice > best:
-                    best = curavgprice
-                    besti = i
-                    bestmarket = contract.storage[1003 + i * 4]
-                i += 1
-            # Reset the number of proposals to 0
-            contract.storage[1] = 0
-            # Reward the highest proposal
-            call(contract.storage[2], [best, 10^9, 0], 3)
-            # Record the winning market so we can later appropriately
-            # compensate the participants
-            contract.storage[2] = bestmarket
-            # The amount of ether allocated to the last round
-            contract.storage[7] = contract.storage[9]
-            # The amount of ether allocated to the next round
-            contract.storage[9] = contract.balance / 2
-    # Make a proposal [0, address]
-    if msg.data[0] == 0 and curepoch % 100 < 50:
-        pid = contract.storage[1]
-        market = create('market.se')
-        c1 = create('subcurrency.se')
-        c2 = create('subcurrency.se')
-        call(market,  [c1, c2], 2)
-        contract.storage[1000 + pid * 4] = msg.data[1]
-        contract.storage[1001 + pid * 4] = market
-        contract.storage[1] += 1        
-    # Claim ether [1, address]
-    # One unit of the first currency in the last round's winning
-    # market entitles you to a quantity of ether that was decided
-    # at the start of that epoch
-    elif msg.data[0] == 1:
-        first_subcurrency = call(contract.storage[2], 3)
-        # We ask the first subcurrency contract what the last transaction was. The
-        # way to make a claim is to send the amount of first currency units that
-        # you wish to claim with, and then immediately call this contract. For security
-        # it makes sense to set up a tx which sends both messages in sequence atomically
-        data = call(first_subcurrency, [], 0, 4)
-        from = data[0]
-        to = data[1]
-        value = data[2]
-        txid = data[3]
-        if txid > contract.storage[4] and to == contract.address:
-            send(to, contract.storage[7] * value / 10^9)
-            contract.storage[4] = txid
-    # Claim second currency [2, address]
-    # One unit of the second currency in the last round's winning
-    # market entitles you to one unit of the futarchy's master
-    # currency
-    elif msg.data[0] == 2:
-        second_subcurrency = call(contract.storage[2], 3)
-        data = call(first_subcurrency, [], 0, 4)
-        from = data[0]
-        to = data[1]
-        value = data[2]
-        txid = data[3]
-        if txid > contract.storage[4] and to == contract.address:
-            call(contract.storage[2], [to, value], 2)
-            contract.storage[4] = txid
-    # Purchase currency for ether (target releasing 10^9 units per seculum)
-    # Price starts off 1 eth for 10^9 units but increases hyperbolically to
-    # limit issuance
-    elif msg.data[0] == 3:
-        pre_ema = contract.storage[5]
-        post_ema = pre_ema + msg.value
-        pre_reserve = 10^18 / (10^9 + pre_ema / 10^9)
-        post_reserve = 10^18 / (10^9 + post_ema / 10^9)
-        call(contract.storage[2], [msg.sender, pre_reserve - post_reserve], 2)
-        last_sold = contract.storage[6]
-        contract.storage[5] = pre_ema * (100000 + last_sold - block.number) + msg.value
-        contract.storage[6] = block.number
-    # Claim all currencies as the ether miner of the current block
-    elif msg.data[0] == 2 and msg.sender == block.coinbase and block.number > contract.storage[8]:
-        i = 0
-        numproposals = contract.storage[1]
-        while i < numproposals:
-            market = contract.storage[1001 + i * 3]
-            fc = call(market, 4)
-            sc = call(market, 5)
-            call(fc, [msg.sender, 1000], 2)
-            call(sc, [msg.sender, 1000], 2)
-            i += 1
-        contract.storage[8] = block.number
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/heap.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/heap.se
deleted file mode 100644
index 1bc442e6d4fb2b803d154626462139db9b451e55..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/heap.se
+++ /dev/null
@@ -1,55 +0,0 @@
-# 0: size
-# 1-n: elements
-
-init:
-    contract.storage[1000] = msg.sender
-code:
-    # Only owner of the heap is allowed to modify it
-    if contract.storage[1000] != msg.sender:
-        stop
-    # push
-    if msg.data[0] == 0:
-        sz = contract.storage[0]
-        contract.storage[sz + 1] = msg.data[1]
-        k = sz + 1
-        while k > 1:
-            bottom = contract.storage[k]
-            top = contract.storage[k/2]
-            if bottom < top:
-                contract.storage[k] = top
-                contract.storage[k/2] = bottom
-                k /= 2
-            else:
-                k = 0
-        contract.storage[0] = sz + 1
-    # pop
-    elif msg.data[0] == 1:
-        sz = contract.storage[0]
-        if !sz:
-            return(0)
-        prevtop = contract.storage[1]
-        contract.storage[1] = contract.storage[sz]
-        contract.storage[sz] = 0
-        top = contract.storage[1]
-        k = 1
-        while k * 2 < sz:
-            bottom1 = contract.storage[k * 2]
-            bottom2 = contract.storage[k * 2 + 1]
-            if bottom1 < top and (bottom1 < bottom2 or k * 2 + 1 >= sz):
-                contract.storage[k] = bottom1
-                contract.storage[k * 2] = top
-                k = k * 2
-            elif bottom2 < top and bottom2 < bottom1 and k * 2 + 1 < sz:
-                contract.storage[k] = bottom2
-                contract.storage[k * 2 + 1] = top
-                k = k * 2 + 1
-            else:
-                k = sz
-        contract.storage[0] = sz - 1
-        return(prevtop)
-    # top
-    elif msg.data[0] == 2:
-        return(contract.storage[1])
-    # size
-    elif msg.data[0] == 3:
-        return(contract.storage[0])
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/market.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/market.se
deleted file mode 100644
index 2303a0b607b6055ac063dab287b8c845ea6951a2..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/market.se
+++ /dev/null
@@ -1,117 +0,0 @@
-# Creates a decentralized market between any two subcurrencies
-
-# Here, the first subcurrency is the base asset and the second
-# subcurrency is the asset priced against the base asset. Hence,
-# "buying" refers to trading the first for the second, and
-# "selling" refers to trading the second for the first
-
-# storage 0: buy orders
-# storage 1: sell orders
-# storage 1000: first subcurrency
-# storage 1001: last first subcurrency txid
-# storage 2000: second subcurrency
-# storage 2001: last second subcurrency txid
-# storage 3000: current epoch
-# storage 4000: price
-# storage 4001: volume
-
-init:
-    # Heap for buy orders
-    contract.storage[0] = create('heap.se')
-    # Heap for sell orders
-    contract.storage[1] = create('heap.se')
-code:
-    # Initialize with [ first_subcurrency, second_subcurrency ]
-    if !contract.storage[1000]:
-        contract.storage[1000] = msg.data[0] # First subcurrency
-        contract.storage[1001] = -1
-        contract.storage[2000] = msg.data[1] # Second subcurrency
-        contract.storage[2001] = -1
-        contract.storage[3000] = block.number / 1000
-        stop
-    first_subcurrency = contract.storage[1000]
-    second_subcurrency = contract.storage[2000]
-    buy_heap = contract.storage[0]
-    sell_heap = contract.storage[1]
-    # This contract operates in "epochs" of 100 blocks
-    # At the end of each epoch, we process all orders
-    # simultaneously, independent of order. This algorithm
-    # prevents front-running, and generates a profit from
-    # the spread. The profit is permanently kept in the
-    # market (ie. destroyed), making both subcurrencies
-    # more valuable
-    
-    # Epoch transition code
-    if contract.storage[3000] < block.number / 100:
-        done = 0
-        volume = 0
-        while !done:
-            # Grab the top buy and sell order from each heap
-            topbuy = call(buy_heap, 1)
-            topsell = call(sell_heap, 1)
-            # An order is recorded in the heap as:
-            # Buys: (2^48 - 1 - price) * 2^208 + units of first currency * 2^160 + from
-            # Sells: price * 2^208 + units of second currency * 2^160 + from
-            buyprice = -(topbuy / 2^208)
-            buyfcvalue = (topbuy / 2^160) % 2^48
-            buyer = topbuy % 2^160
-            sellprice = topsell / 2^208
-            sellscvalue = (topsell / 2^160) % 2^48
-            seller = topsell % 2^160
-            # Heap empty, or no more matching orders
-            if not topbuy or not topsell or buyprice < sellprice:
-                done = 1
-            else:
-                # Add to volume counter
-                volume += buyfcvalue
-                # Calculate how much of the second currency the buyer gets, and
-                # how much of the first currency the seller gets
-                sellfcvalue = sellscvalue / buyprice
-                buyscvalue = buyfcvalue * sellprice
-                # Send the currency units along
-                call(second_subcurrency, [buyer, buyscvalue], 2)
-                call(first_subcurrency, [seller, sellfcvalue], 2)
-        if volume:
-            contract.storage[4000] = (buyprice + sellprice) / 2
-        contract.storage[4001] = volume
-        contract.storage[3000] = block.number / 100
-    # Make buy order [0, price]
-    if msg.data[0] == 0:
-        # We ask the first subcurrency contract what the last transaction was. The
-        # way to make a buy order is to send the amount of first currency units that
-        # you wish to buy with, and then immediately call this contract. For security
-        # it makes sense to set up a tx which sends both messages in sequence atomically
-        data = call(first_subcurrency, [], 0, 4)
-        from = data[0]
-        to = data[1]
-        value = data[2]
-        txid = data[3]
-        price = msg.data[1]
-        if txid > contract.storage[1001] and to == contract.address:
-            contract.storage[1001] = txid
-            # Adds the order to the heap
-            call(buy_heap, [0, -price * 2^208 + (value % 2^48) * 2^160 + from], 2) 
-    # Make sell order [1, price]
-    elif msg.data[0] == 1:
-        # Same mechanics as buying
-        data = call(second_subcurrency, [], 0, 4)
-        from = data[0]
-        to = data[1]
-        value = data[2]
-        txid = data[3]
-        price = msg.data[1]
-        if txid > contract.storage[2001] and to == contract.address:
-            contract.storage[2001] = txid
-            call(sell_heap, [0, price * 2^208 + (value % 2^48) * 2^160 + from], 2) 
-    # Ask for price
-    elif msg.data[0] == 2:
-        return(contract.storage[4000])
-    # Ask for volume
-    elif msg.data[0] == 3:
-        return(contract.storage[1000])
-    # Ask for first currency
-    elif msg.data[0] == 4:
-        return(contract.storage[2000])
-    # Ask for second currency
-    elif msg.data[0] == 5:
-        return(contract.storage[4001])
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/subcurrency.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/subcurrency.se
deleted file mode 100644
index 1501beff76ea6f99a2fb4d08be6f3310651769c9..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/subcurrency.se
+++ /dev/null
@@ -1,35 +0,0 @@
-# Initialization
-# Admin can issue and delete at will
-init:
-    contract.storage[0] = msg.sender
-code:
-    # If a message with one item is sent, that's a balance query
-    if msg.datasize == 1:
-        addr = msg.data[0]
-        return(contract.storage[addr])
-    # If a message with two items [to, value] are sent, that's a transfer request
-    elif msg.datasize == 2:
-        from = msg.sender
-        fromvalue = contract.storage[from]
-        to = msg.data[0]
-        value = msg.data[1]
-        if fromvalue >= value and value > 0 and to > 4:
-            contract.storage[from] = fromvalue - value
-            contract.storage[to] += value
-            contract.storage[2] = from
-            contract.storage[3] = to
-            contract.storage[4] = value
-            contract.storage[5] += 1
-            return(1)
-        return(0)
-    elif msg.datasize == 3 and msg.sender == contract.storage[0]:
-        # Admin can issue at will by sending a [to, value, 0] message
-        if msg.data[2] == 0:
-            contract.storage[msg.data[0]] += msg.data[1]
-        # Change admin [ newadmin, 0, 1 ]
-        # Set admin to 0 to disable administration
-        elif msg.data[2] == 1:
-            contract.storage[0] = msg.data[0]
-    # Fetch last transaction
-    else:
-        return([contract.storage[2], contract.storage[3], contract.storage[4], contract.storage[5]], 4)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/test.py b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/test.py
deleted file mode 100644
index 301a4a845286282cc01d90902f1a5cf797f9deba..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/cyberdyne/test.py
+++ /dev/null
@@ -1,39 +0,0 @@
-from __future__ import print_function
-import pyethereum
-t = pyethereum.tester
-s = t.state()
-# Create currencies
-c1 = s.contract('subcurrency.se')
-print("First currency: %s" % c1)
-c2 = s.contract('subcurrency.se')
-print("First currency: %s" % c2)
-# Allocate units
-s.send(t.k0, c1, 0, [t.a0, 1000, 0])
-s.send(t.k0, c1, 0, [t.a1, 1000, 0])
-s.send(t.k0, c2, 0, [t.a2, 1000000, 0])
-s.send(t.k0, c2, 0, [t.a3, 1000000, 0])
-print("Allocated units")
-# Market
-m = s.contract('market.se')
-s.send(t.k0, m, 0, [c1, c2])
-# Place orders
-s.send(t.k0, c1, 0, [m, 1000])
-s.send(t.k0, m, 0, [0, 1200])
-s.send(t.k1, c1, 0, [m, 1000])
-s.send(t.k1, m, 0, [0, 1400])
-s.send(t.k2, c2, 0, [m, 1000000])
-s.send(t.k2, m, 0, [1, 800])
-s.send(t.k3, c2, 0, [m, 1000000])
-s.send(t.k3, m, 0, [1, 600])
-print("Orders placed")
-# Next epoch and ping
-s.mine(100)
-print("Mined 100")
-s.send(t.k0, m, 0, [])
-print("Updating")
-# Check
-assert s.send(t.k0, c2, 0, [t.a0]) == [800000]
-assert s.send(t.k0, c2, 0, [t.a1]) == [600000]
-assert s.send(t.k0, c1, 0, [t.a2]) == [833]
-assert s.send(t.k0, c1, 0, [t.a3]) == [714]
-print("Balance checks passed")
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/datafeed.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/datafeed.se
deleted file mode 100644
index 4c4a56de867bf99437e75c9daf7e179475281eb3..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/datafeed.se
+++ /dev/null
@@ -1,12 +0,0 @@
-# Database updateable only by the original creator
-data creator
-
-def init():
-    self.creator = msg.sender
-
-def update(k, v):
-    if msg.sender == self.creator:
-        self.storage[k] = v
-
-def query(k):
-    return(self.storage[k])
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/ecrecover.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/ecrecover.se
deleted file mode 100644
index ce28f58c23f00c7fc42b0f385f954fb15c075635..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/ecrecover.se
+++ /dev/null
@@ -1,40 +0,0 @@
-# So I looked up on Wikipedia what Jacobian form actually is, and noticed that it's 
-# actually a rather different and more clever construction than the naive version
-# that I created. It may possible to achieve a further 20-50% savings by applying 
-# that version.
-
-extern all: [call]
-
-data JORDANMUL
-data JORDANADD
-data EXP
-
-def init():
-    self.JORDANMUL = create('jacobian_mul.se')
-    self.JORDANADD = create('jacobian_add.se')
-    self.EXP = create('modexp.se')
-
-def call(h, v, r, s):
-    N = -432420386565659656852420866394968145599
-    P = -4294968273
-    h = mod(h, N)
-    r = mod(r, P)
-    s = mod(s, N)
-    Gx = 55066263022277343669578718895168534326250603453777594175500187360389116729240
-    Gy = 32670510020758816978083085130507043184471273380659243275938904335757337482424
-    x = r
-    xcubed = mulmod(mulmod(x, x, P), x, P)
-    beta = self.EXP.call(addmod(xcubed, 7, P), div(P + 1, 4), P)
-    
-    # Static-gascost ghetto conditional
-    y_is_positive = mod(v, 2) xor mod(beta, 2)
-    y = beta * y_is_positive + (P - beta) * (1 - y_is_positive)
-    
-    GZ = self.JORDANMUL.call(Gx, 1, Gy, 1, N - h, outsz=4)
-    XY = self.JORDANMUL.call(x, 1, y, 1, s, outsz=4)
-    COMB = self.JORDANADD.call(GZ[0], GZ[1], GZ[2], GZ[3], XY[0], XY[1], XY[2], XY[3], 1, outsz=5)
-    COMB[4] = self.EXP.call(r, N - 2, N)
-    Q = self.JORDANMUL.call(data=COMB, datasz=5, outsz=4)
-    ox = mulmod(Q[0], self.EXP.call(Q[1], P - 2, P), P)
-    oy = mulmod(Q[2], self.EXP.call(Q[3], P - 2, P), P)
-    return([ox, oy], 2)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/ecrecover_compiled.evm b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/ecrecover_compiled.evm
deleted file mode 100644
index f575fe70f91fa241482250a0f60bd02ac98cfc7d..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/ecrecover_compiled.evm
+++ /dev/null
@@ -1 +0,0 @@
-6000607f535961071c80610013593961072f566000605f535961013d8061001359396101505661012b8061000e60003961013956600061023f5360003560001a6000141561012a5760806001602037602051151561002c576060511561002f565b60005b156100695760806080599059016000905260a052600060a051526001602060a05101526000604060a05101526001606060a051015260a051f25b6401000003d160000380608051826003846020516020510909098182600260605109836040516040510909828283098382830984858660026020510983098603866040518509088560405183098686888985604051098a038a85602051090809878689846040510909888960605183098a038a6080518509088960805183096080608059905901600090526101e052866101e051528560206101e05101528260406101e05101528160606101e05101526101e051f250505050505050505050505b5b6000f25b816000f090506000555961040680610168593961056e566000603f535961013d8061001359396101505661012b8061000e60003961013956600061023f5360003560001a6000141561012a5760806001602037602051151561002c576060511561002f565b60005b156100695760806080599059016000905260a052600060a051526001602060a05101526000604060a05101526001606060a051015260a051f25b6401000003d160000380608051826003846020516020510909098182600260605109836040516040510909828283098382830984858660026020510983098603866040518509088560405183098686888985604051098a038a85602051090809878689846040510909888960605183098a038a6080518509088960805183096080608059905901600090526101e052866101e051528560206101e05101528260406101e05101528160606101e05101526101e051f250505050505050505050505b5b6000f25b816000f0905060005561029a8061016860003961040256600061043f5360003560001a60001415610299576101006001602037602051151561002d5760605115610030565b60005b1561007657608059905901600090526101405260a051610140515260c051602061014051015260e051604061014051015261010051606061014051015261014051610120525b60a05115156100885760e0511561008b565b60005b156100d0576080599059016000905261016052602051610160515260405160206101605101526060516040610160510152608051606061016051015261016051610120525b61012051156100e157608061012051f25b6401000003d16000036000818260a0516040510983038360c051602051090814156101b1576000818260e051608051098303836101005160605109081415610175576080608080599059016000905260006101c0601f01536020516101e052604051610200526060516102205260805161024052818160816101c0601f01600060005460195a03f1508090509050f26101b0565b608060805990590160009052610280526000610280515260016020610280510152600060406102805101526001606061028051015261028051f25b5b808160405160c051098283610100516060510984038460805160e05109080981828360c0516020510984038460405160a051090883608051610100510909828283098382830984856020518309860386604051850908856040518309868760a051830988038860c0518509088760c051830988898a60405185098b038b8460205109088909898a836040510989098a8b60605183098c038c6080518509088b60805183096080608059905901600090526103e052866103e051528560206103e05101528260406103e05101528160606103e05101526103e051f2505050505050505050505050505b5b6000f25b816000f090506001556101928061058660003961071856600061013f5360003560001a600014156101915760a0600160203770014551231950b75fc4402da1732fc9bebf60000360a0510660a05260a05115606051156020511502011561007e5760806080599059016000905260c052600060c051526001602060c05101526000604060c05101526001606060c051015260c051f25b610120599059016000905260e052600060e051526000602060e05101526001604060e05101526000606060e05101526001608060e0510152600060a060e0510152600060c060e0510152600060e060e0510152600061010060e051015260e0517f80000000000000000000000000000000000000000000000000000000000000005b6000811115610187578060a0511615610165576080602083016081601f85016000600054614e20f15060205160a083015260405160c083015260605160e0830152608051610100830152608060208301610101601f85016000600154614e20f161017b565b6080602083016081601f85016000600054614e20f15b50600281049050610100565b608060208301f250505b5b6000f25b816000f0905060005559610406806107475939610b4d566000603f535961013d8061001359396101505661012b8061000e60003961013956600061023f5360003560001a6000141561012a5760806001602037602051151561002c576060511561002f565b60005b156100695760806080599059016000905260a052600060a051526001602060a05101526000604060a05101526001606060a051015260a051f25b6401000003d160000380608051826003846020516020510909098182600260605109836040516040510909828283098382830984858660026020510983098603866040518509088560405183098686888985604051098a038a85602051090809878689846040510909888960605183098a038a6080518509088960805183096080608059905901600090526101e052866101e051528560206101e05101528260406101e05101528160606101e05101526101e051f250505050505050505050505b5b6000f25b816000f0905060005561029a8061016860003961040256600061043f5360003560001a60001415610299576101006001602037602051151561002d5760605115610030565b60005b1561007657608059905901600090526101405260a051610140515260c051602061014051015260e051604061014051015261010051606061014051015261014051610120525b60a05115156100885760e0511561008b565b60005b156100d0576080599059016000905261016052602051610160515260405160206101605101526060516040610160510152608051606061016051015261016051610120525b61012051156100e157608061012051f25b6401000003d16000036000818260a0516040510983038360c051602051090814156101b1576000818260e051608051098303836101005160605109081415610175576080608080599059016000905260006101c0601f01536020516101e052604051610200526060516102205260805161024052818160816101c0601f01600060005460195a03f1508090509050f26101b0565b608060805990590160009052610280526000610280515260016020610280510152600060406102805101526001606061028051015261028051f25b5b808160405160c051098283610100516060510984038460805160e05109080981828360c0516020510984038460405160a051090883608051610100510909828283098382830984856020518309860386604051850908856040518309868760a051830988038860c0518509088760c051830988898a60405185098b038b8460205109088909898a836040510989098a8b60605183098c038c6080518509088b60805183096080608059905901600090526103e052866103e051528560206103e05101528260406103e05101528160606103e05101526103e051f2505050505050505050505050505b5b6000f25b816000f09050600155596100d080610b655939610c35566100be8061000e6000396100cc5660003560001a600014156100bd576060600160203760017f80000000000000000000000000000000000000000000000000000000000000005b60008111156100b157606051816040511615156020510a606051848509099150606051600282046040511615156020510a606051848509099150606051600482046040511615156020510a606051848509099150606051600882046040511615156020510a606051848509099150601081049050610038565b8160c052602060c0f250505b5b6000f25b816000f090506002556103d280610c4d60003961101f56600061095f5360003560001a600014156103d1576080600160203770014551231950b75fc4402da1732fc9bebf60000360a0526401000003d160000360c05260a0516020510660205260c0516060510660605260a051608051066080527f79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f8179860e0527f483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8610100526060516101205260c0516101205160c05161012051610120510909610140526000610180601f015360c051600761014051086101a0526004600160c05101046101c05260c0516101e05260206102006061610180601f01600060025460195a03f1506102005161016052600261016051066002604051061861022052610220516001036101605160c05103026102205161016051020161024052608080599059016000905260006102a0601f015360e0516102c05260016102e052610100516103005260016103205260205160a0510361034052818160a16102a0601f01600060005460195a03f150809050905061026052608080599059016000905260006103c0601f0153610120516103e052600161040052610240516104205260016104405260805161046052818160a16103c0601f01600060005460195a03f15080905090506103805260a080599059016000905260006104e0601f015361026051516105005260206102605101516105205260406102605101516105405260606102605101516105605261038051516105805260206103805101516105a05260406103805101516105c05260606103805101516105e05260016106005281816101216104e0601f01600060015460195a03f15080905090506104a0526000610640601f015360605161066052600260a051036106805260a0516106a05260206106c06061610640601f01600060025460195a03f1506106c05160806104a05101526104a05160208103805160018303608080599059016000905260008353818160a185600060005460195a03f150838552809050905090509050905090506106e05260c05160006107e0601f015360206106e051015161080052600260c051036108205260c05161084052602061086060616107e0601f01600060025460195a03f150610860516106e05151096107c05260c05160006108a0601f015360606106e05101516108c052600260c051036108e05260c05161090052602061092060616108a0601f01600060025460195a03f1506109205160406106e05101510961088052604060405990590160009052610940526107c051610940515261088051602061094051015261094051f25b5b6000f2
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/jacobian_add.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/jacobian_add.se
deleted file mode 100644
index 29dc390b2f54b4b52ad19693fa03d838fb08a858..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/jacobian_add.se
+++ /dev/null
@@ -1,32 +0,0 @@
-extern all: [call]
-data DOUBLE
-
-def init():
-    self.DOUBLE = create('jacobian_double.se')
-
-def call(axn, axd, ayn, ayd, bxn, bxd, byn, byd):
-    if !axn and !ayn:
-        o = [bxn, bxd, byn, byd]
-    if !bxn and !byn:
-        o = [axn, axd, ayn, ayd]
-    if o:
-        return(o, 4)
-    with P = -4294968273:
-        if addmod(mulmod(axn, bxd, P), P - mulmod(axd, bxn, P), P) == 0:
-            if addmod(mulmod(ayn, byd, P), P - mulmod(ayd, byn, P), P) == 0:
-                return(self.DOUBLE.call(axn, axd, ayn, ayd, outsz=4), 4)
-            else:
-                return([0, 1, 0, 1], 4)
-        with mn = mulmod(addmod(mulmod(byn, ayd, P), P - mulmod(ayn, byd, P), P), mulmod(bxd, axd, P), P):
-            with md = mulmod(mulmod(byd, ayd, P), addmod(mulmod(bxn, axd, P), P - mulmod(axn, bxd, P), P), P):
-                with msqn = mulmod(mn, mn, P):
-                    with msqd = mulmod(md, md, P):
-                        with msqman = addmod(mulmod(msqn, axd, P), P - mulmod(msqd, axn, P), P):
-                            with msqmad = mulmod(msqd, axd, P):
-                                with xn = addmod(mulmod(msqman, bxd, P), P - mulmod(msqmad, bxn, P), P):
-                                    with xd = mulmod(msqmad, bxd, P):
-                                        with mamxn = mulmod(mn, addmod(mulmod(axn, xd, P), P - mulmod(xn, axd, P), P), P):
-                                            with mamxd = mulmod(md, mulmod(axd, xd, P), P):
-                                                with yn = addmod(mulmod(mamxn, ayd, P), P - mulmod(mamxd, ayn, P), P):
-                                                    with yd = mulmod(mamxd, ayd, P):
-                                                        return([xn, xd, yn, yd], 4)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/jacobian_double.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/jacobian_double.se
deleted file mode 100644
index b7d8221a6fc6d03a7852c871c3dc5d493e2517de..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/jacobian_double.se
+++ /dev/null
@@ -1,16 +0,0 @@
-def call(axn, axd, ayn, ayd):
-    if !axn and !ayn:
-        return([0, 1, 0, 1], 4)
-    with P = -4294968273:
-        # No need to add (A, 1) because A = 0 for bitcoin
-        with mn = mulmod(mulmod(mulmod(axn, axn, P), 3, P), ayd, P):
-            with md = mulmod(mulmod(axd, axd, P), mulmod(ayn, 2, P), P):
-                with msqn = mulmod(mn, mn, P):
-                    with msqd = mulmod(md, md, P):
-                        with xn = addmod(mulmod(msqn, axd, P), P - mulmod(msqd, mulmod(axn, 2, P), P), P):
-                            with xd = mulmod(msqd, axd, P):
-                                with mamxn = mulmod(addmod(mulmod(axn, xd, P), P - mulmod(axd, xn, P), P), mn, P):
-                                    with mamxd = mulmod(mulmod(axd, xd, P), md, P):
-                                        with yn = addmod(mulmod(mamxn, ayd, P), P - mulmod(mamxd, ayn, P), P):
-                                            with yd = mulmod(mamxd, ayd, P):
-                                                return([xn, xd, yn, yd], 4)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/jacobian_mul.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/jacobian_mul.se
deleted file mode 100644
index bf5b96bb4de77586d33b80986d3460d087026209..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/jacobian_mul.se
+++ /dev/null
@@ -1,37 +0,0 @@
-# Expected gas cost
-#
-# def expect(n, point_at_infinity=False):
-#     n = n % (2**256 - 432420386565659656852420866394968145599)
-#     if point_at_infinity:
-#         return 79
-#     if n == 0:
-#         return 34479
-#     L = int(1 + math.log(n) / math.log(2))
-#     H = len([x for x in b.encode(n, 2) if x == '1'])
-#     return 34221 + 94 * L + 343 * H
-
-data DOUBLE
-data ADD
-
-def init():
-    self.DOUBLE = create('jacobian_double.se')
-    self.ADD = create('jacobian_add.se')
-
-def call(axn, axd, ayn, ayd, n):
-    n = mod(n, -432420386565659656852420866394968145599)
-    if !axn * !ayn + !n: # Constant-gas version of !axn and !ayn or !n
-        return([0, 1, 0, 1], 4)
-    with o = [0, 0, 1, 0, 1, 0, 0, 0, 0]:
-        with b = 2 ^ 255:
-            while gt(b, 0):
-                if n & b:
-                    ~call(20000, self.DOUBLE, 0, o + 31, 129, o + 32, 128)
-                    o[5] = axn
-                    o[6] = axd
-                    o[7] = ayn
-                    o[8] = ayd
-                    ~call(20000, self.ADD, 0, o + 31, 257, o + 32, 128)
-                else:
-                    ~call(20000, self.DOUBLE, 0, o + 31, 129, o + 32, 128)
-                b = div(b, 2)
-            return(o + 32, 4)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/modexp.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/modexp.se
deleted file mode 100644
index 687b12a04ca41e568a8be42d3c5292ab7b418c28..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/modexp.se
+++ /dev/null
@@ -1,11 +0,0 @@
-def call(b, e, m):
-    with o = 1:
-        with bit = 2 ^ 255:
-            while gt(bit, 0):
-                # A touch of loop unrolling for 20% efficiency gain
-                o = mulmod(mulmod(o, o, m), b ^ !(!(e & bit)), m)
-                o = mulmod(mulmod(o, o, m), b ^ !(!(e & div(bit, 2))), m)
-                o = mulmod(mulmod(o, o, m), b ^ !(!(e & div(bit, 4))), m)
-                o = mulmod(mulmod(o, o, m), b ^ !(!(e & div(bit, 8))), m)
-                bit = div(bit, 16)
-            return(o)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/substitutes.py b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/substitutes.py
deleted file mode 100644
index 0007da0cfd9ecaa890f958f902cf61ec8a3c655e..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/substitutes.py
+++ /dev/null
@@ -1,78 +0,0 @@
-import bitcoin as b
-import math
-import sys
-
-
-def signed(o):
-    return map(lambda x: x - 2**256 if x >= 2**255 else x, o)
-
-
-def hamming_weight(n):
-    return len([x for x in b.encode(n, 2) if x == '1'])
-
-
-def binary_length(n):
-    return len(b.encode(n, 2))
-
-
-def jacobian_mul_substitute(A, B, C, D, N):
-    if A == 0 and C == 0 or (N % b.N) == 0:
-        return {"gas": 86, "output": [0, 1, 0, 1]}
-    else:
-        output = b.jordan_multiply(((A, B), (C, D)), N)
-        return {
-            "gas": 35262 + 95 * binary_length(N % b.N) + 355 * hamming_weight(N % b.N),
-            "output": signed(list(output[0]) + list(output[1]))
-        }
-
-
-def jacobian_add_substitute(A, B, C, D, E, F, G, H):
-    if A == 0 or E == 0:
-        gas = 149
-    elif (A * F - B * E) % b.P == 0:
-        if (C * H - D * G) % b.P == 0:
-            gas = 442
-        else:
-            gas = 177
-    else:
-        gas = 301
-    output = b.jordan_add(((A, B), (C, D)), ((E, F), (G, H)))
-    return {
-        "gas": gas,
-        "output": signed(list(output[0]) + list(output[1]))
-    }
-
-
-def modexp_substitute(base, exp, mod):
-    return {
-        "gas": 5150,
-        "output": signed([pow(base, exp, mod) if mod > 0 else 0])
-    }
-
-
-def ecrecover_substitute(z, v, r, s):
-    P, A, B, N, Gx, Gy = b.P, b.A, b.B, b.N, b.Gx, b.Gy
-    x = r
-    beta = pow(x*x*x+A*x+B, (P + 1) / 4, P)
-    BETA_PREMIUM = modexp_substitute(x, (P + 1) / 4, P)["gas"]
-    y = beta if v % 2 ^ beta % 2 else (P - beta)
-    Gz = b.jordan_multiply(((Gx, 1), (Gy, 1)), (N - z) % N)
-    GZ_PREMIUM = jacobian_mul_substitute(Gx, 1, Gy, 1, (N - z) % N)["gas"]
-    XY = b.jordan_multiply(((x, 1), (y, 1)), s)
-    XY_PREMIUM = jacobian_mul_substitute(x, 1, y, 1, s % N)["gas"]
-    Qr = b.jordan_add(Gz, XY)
-    QR_PREMIUM = jacobian_add_substitute(Gz[0][0], Gz[0][1], Gz[1][0], Gz[1][1],
-                                         XY[0][0], XY[0][1], XY[1][0], XY[1][1]
-                                         )["gas"]
-    Q = b.jordan_multiply(Qr, pow(r, N - 2, N))
-    Q_PREMIUM = jacobian_mul_substitute(Qr[0][0], Qr[0][1], Qr[1][0], Qr[1][1],
-                                        pow(r, N - 2, N))["gas"]
-    R_PREMIUM = modexp_substitute(r, N - 2, N)["gas"]
-    OX_PREMIUM = modexp_substitute(Q[0][1], P - 2, P)["gas"]
-    OY_PREMIUM = modexp_substitute(Q[1][1], P - 2, P)["gas"]
-    Q = b.from_jordan(Q)
-    return {
-        "gas": 991 + BETA_PREMIUM + GZ_PREMIUM + XY_PREMIUM + QR_PREMIUM +
-        Q_PREMIUM + R_PREMIUM + OX_PREMIUM + OY_PREMIUM,
-        "output": signed(Q)
-    }
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/test.py b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/test.py
deleted file mode 100644
index 48d21e32fe1a441351c47d6518d4745e4efd59a9..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/ecc/test.py
+++ /dev/null
@@ -1,129 +0,0 @@
-import bitcoin as b
-import random
-import sys
-import math
-from pyethereum import tester as t
-import substitutes
-import time
-
-vals = [random.randrange(2**256) for i in range(12)]
-
-test_points = [list(p[0]) + list(p[1]) for p in
-               [b.jordan_multiply(((b.Gx, 1), (b.Gy, 1)), r) for r in vals]]
-
-G = [b.Gx, 1, b.Gy, 1]
-Z = [0, 1, 0, 1]
-
-
-def neg_point(p):
-    return [p[0], b.P - p[1], p[2], b.P - p[3]]
-
-s = t.state()
-s.block.gas_limit = 10000000
-t.gas_limit = 1000000
-
-
-c = s.contract('modexp.se')
-print "Starting modexp tests"
-
-for i in range(0, len(vals) - 2, 3):
-    o1 = substitutes.modexp_substitute(vals[i], vals[i+1], vals[i+2])
-    o2 = s.profile(t.k0, c, 0, funid=0, abi=vals[i:i+3])
-    #assert o1["gas"] == o2["gas"], (o1, o2)
-    assert o1["output"] == o2["output"], (o1, o2)
-
-c = s.contract('jacobian_add.se')
-print "Starting addition tests"
-
-for i in range(2):
-    P = test_points[i * 2]
-    Q = test_points[i * 2 + 1]
-    NP = neg_point(P)
-
-    o1 = substitutes.jacobian_add_substitute(*(P + Q))
-    o2 = s.profile(t.k0, c, 0, funid=0, abi=P + Q)
-    #assert o1["gas"] == o2["gas"], (o1, o2)
-    assert o1["output"] == o2["output"], (o1, o2)
-
-    o1 = substitutes.jacobian_add_substitute(*(P + NP))
-    o2 = s.profile(t.k0, c, 0, funid=0, abi=P + NP)
-    #assert o1["gas"] == o2["gas"], (o1, o2)
-    assert o1["output"] == o2["output"], (o1, o2)
-
-    o1 = substitutes.jacobian_add_substitute(*(P + P))
-    o2 = s.profile(t.k0, c, 0, funid=0, abi=P + P)
-    #assert o1["gas"] == o2["gas"], (o1, o2)
-    assert o1["output"] == o2["output"], (o1, o2)
-
-    o1 = substitutes.jacobian_add_substitute(*(P + Z))
-    o2 = s.profile(t.k0, c, 0, funid=0, abi=P + Z)
-    #assert o1["gas"] == o2["gas"], (o1, o2)
-    assert o1["output"] == o2["output"], (o1, o2)
-
-    o1 = substitutes.jacobian_add_substitute(*(Z + P))
-    o2 = s.profile(t.k0, c, 0, funid=0, abi=Z + P)
-    #assert o1["gas"] == o2["gas"], (o1, o2)
-    assert o1["output"] == o2["output"], (o1, o2)
-
-
-c = s.contract('jacobian_mul.se')
-print "Starting multiplication tests"
-
-
-mul_tests = [
-    Z + [0],
-    Z + [vals[0]],
-    test_points[0] + [0],
-    test_points[1] + [b.N],
-    test_points[2] + [1],
-    test_points[2] + [2],
-    test_points[2] + [3],
-    test_points[2] + [4],
-    test_points[3] + [5],
-    test_points[3] + [6],
-    test_points[4] + [7],
-    test_points[4] + [2**254],
-    test_points[4] + [vals[1]],
-    test_points[4] + [vals[2]],
-    test_points[4] + [vals[3]],
-    test_points[5] + [2**256 - 1],
-]
-
-for i, test in enumerate(mul_tests):
-    print 'trying mul_test %i' % i, test
-    o1 = substitutes.jacobian_mul_substitute(*test)
-    o2 = s.profile(t.k0, c, 0, funid=0, abi=test)
-    # assert o1["gas"] == o2["gas"], (o1, o2, test)
-    assert o1["output"] == o2["output"], (o1, o2, test)
-
-c = s.contract('ecrecover.se')
-print "Starting ecrecover tests"
-
-for i in range(5):
-    print 'trying ecrecover_test', vals[i*2], vals[i*2+1]
-    k = vals[i*2]
-    h = vals[i*2+1]
-    V, R, S = b.ecdsa_raw_sign(b.encode(h, 256, 32), k)
-    aa = time.time()
-    o1 = substitutes.ecrecover_substitute(h, V, R, S)
-    print 'sub', time.time() - aa
-    a = time.time()
-    o2 = s.profile(t.k0, c, 0, funid=0, abi=[h, V, R, S])
-    print time.time() - a
-    # assert o1["gas"] == o2["gas"], (o1, o2, h, V, R, S)
-    assert o1["output"] == o2["output"], (o1, o2, h, V, R, S)
-
-# Explicit tests
-
-data = [[
-    0xf007a9c78a4b2213220adaaf50c89a49d533fbefe09d52bbf9b0da55b0b90b60,
-    0x1b,
-    0x5228fc9e2fabfe470c32f459f4dc17ef6a0a81026e57e4d61abc3bc268fc92b5,
-    0x697d4221cd7bc5943b482173de95d3114b9f54c5f37cc7f02c6910c6dd8bd107
-]]
-
-for datum in data:
-    o1 = substitutes.ecrecover_substitute(*datum)
-    o2 = s.profile(t.k0, c, 0, funid=0, abi=datum)
-    #assert o1["gas"] == o2["gas"], (o1, o2, datum)
-    assert o1["output"] == o2["output"], (o1, o2, datum)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/eth15/channel.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/eth15/channel.se
deleted file mode 100644
index 733f4a95b4a7983b064e9f05ad02d6043513d627..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/eth15/channel.se
+++ /dev/null
@@ -1,45 +0,0 @@
-if msg.data[0] == 0:
-    new_id = contract.storage[-1]
-    # store [from, to, value, maxvalue, timeout] in contract storage
-    contract.storage[new_id] = msg.sender
-    contract.storage[new_id + 1] = msg.data[1]
-    contract.storage[new_id + 2] = 0
-    contract.storage[new_id + 3] = msg.value
-    contract.storage[new_id + 4] = 2^254
-    # increment next id
-    contract.storage[-1] = new_id + 10
-    # return id of this channel
-    return(new_id)
-
-# Increase payment on channel: [1, id, value, v, r, s]
-elif msg.data[0] == 1:
-    # Ecrecover native extension; will be a different address in testnet and live
-    ecrecover = 0x46a8d0b21b1336d83b06829f568d7450df36883f
-    # Message data parameters
-    id = msg.data[1] % 2^160
-    value = msg.data[2]
-    # Determine sender from signature
-    h = sha3([id, value], 2)
-    sender = call(ecrecover, [h, msg.data[3], msg.data[4], msg.data[5]], 4)
-    # Check sender matches and new value is greater than old
-    if sender == contract.storage[id]:
-        if value > contract.storage[id + 2] and value <= contract.storage[id + 3]:
-            # Update channel, increasing value and setting timeout
-            contract.storage[id + 2] = value
-            contract.storage[id + 4] = block.number + 1000
-
-# Cash out channel: [2, id]
-elif msg.data[0] == 2:
-    id = msg.data[1] % 2^160
-    # Check if timeout has run out
-    if block.number >= contract.storage[id + 3]:
-        # Send funds
-        send(contract.storage[id + 1], contract.storage[id + 2])
-        # Send refund
-        send(contract.storage[id], contract.storage[id + 3] - contract.storage[id + 2])
-        # Clear storage
-        contract.storage[id] = 0
-        contract.storage[id + 1] = 0
-        contract.storage[id + 2] = 0
-        contract.storage[id + 3] = 0
-        contract.storage[id + 4] = 0
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/eth15/map.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/eth15/map.se
deleted file mode 100644
index 768dfb9fc71f107df96ba8071506cb44d60161ac..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/eth15/map.se
+++ /dev/null
@@ -1,19 +0,0 @@
-# An implementation of a contract for storing a key/value binding
-init:
-    # Set owner
-    contract.storage[0] = msg.sender
-code:
-    # Check ownership
-    if msg.sender == contract.storage[0]:
-        # Get: returns (found, val)
-        if msg.data[0] == 0:
-            s = sha3(msg.data[1])
-            return([contract.storage[s], contract.storage[s+1]], 2)
-        # Set: sets map[k] = v
-        elif msg.data[0] == 1:
-            s = sha3(msg.data[1])
-            contract.storage[s] = 1
-            contract.storage[s + 1] = msg.data[2]
-        # Suicide
-        elif msg.data[2] == 1:
-            suicide(0)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/eth15/multiforward.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/eth15/multiforward.se
deleted file mode 100644
index 577794d9761d1cd494116826190ecc951a8d3e7a..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/eth15/multiforward.se
+++ /dev/null
@@ -1,14 +0,0 @@
-init:
-    contract.storage[0] = msg.sender
-code:
-    if msg.sender != contract.storage[0]:
-        stop
-    i = 0
-    while i < ~calldatasize():
-        to = ~calldataload(i)
-        value = ~calldataload(i+20) / 256^12
-        datasize = ~calldataload(i+32) / 256^30
-        data = alloc(datasize)
-        ~calldatacopy(data, i+34, datasize)
-        ~call(tx.gas - 25, to, value, data, datasize, 0, 0)
-        i += 34 + datasize
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/eth15/shadowchain.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/eth15/shadowchain.se
deleted file mode 100644
index 1e466a355d8ef45086579993ec51c8562115a729..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/eth15/shadowchain.se
+++ /dev/null
@@ -1,166 +0,0 @@
-# Exists in state:
-# (i) last committed block
-# (ii) chain of uncommitted blocks (linear only)
-# (iii) transactions, each tx with an associated block number
-#
-# Uncommitted block =
-#     [ numtxs, numkvs, tx1 (N words), tx2 (N words) ..., [k1, v1], [k2, v2], [k3, v3] ... ]
-#
-# Block checking process
-#
-# Suppose last committed state is m
-# Last uncommitted state is n
-# Contested block is b
-#
-# 1. Temporarily apply all state transitions from
-# m to b
-# 2. Run code, get list of changes
-# 3. Check is list of changes matches deltas
-#   * if yes, do nothing
-#   * if no, set last uncommitted state to pre-b
-#
-# Storage variables:
-#
-# Last committed block: 0
-# Last uncommitted block: 1
-# Contract holding code: 2
-# Uncommitted map: 3
-# Transaction length (parameter): 4
-# Block b: 2^160 + b * 2^40:
-#             + 1: submission blknum
-#             + 2: submitter
-#             + 3: data in uncommitted block format above
-# Last committed storage:
-#             sha3(k): index k
-
-# Initialize: [0, c, txlength], set address of the code-holding contract and the transaction
-# length
-if not contract.storage[2]:
-    contract.storage[2] = msg.data[1]
-    contract.storage[4] = msg.data[2]
-    stop
-
-# Sequentially commit all uncommitted blocks that are more than 1000 mainchain-blocks old
-last_committed_block = contract.storage[0]
-last_uncommitted_block = contract.storage[1]
-lcb_storage_index = 2^160 + last_committed_block * 2^40
-while contract.storage[lcb_storage_index + 1] < block.number - 1000 and last_committed_block < last_uncommitted_block:
-    kvpairs = contract.storage[lcb_storage_index]
-    i = 0
-    while i < kvpairs:
-        k = contract.storage[lcb_storage_index + 3 + i * 2]
-        v = contract.storage[lcb_storage_index + 4 + i * 2]
-        contract.storage[sha3(k)] = v
-        i += 1
-    last_committed_block += 1
-    lcb_storage_index += 2^40
-contract.storage[0] = last_committed_block
-    
-
-# Propose block: [ 0, block number, data in block format above ... ]
-if msg.data[0] == 0:
-    blknumber = msg.data[1]
-    # Block number must be correct
-    if blknumber != contract.storage[1]:
-        stop
-    # Deposit requirement
-    if msg.value < 10^19:
-        stop
-    # Store the proposal in storage as 
-    # [ 0, main-chain block number, sender, block data...]
-    start_index = 2^160 + blknumber * 2^40
-    numkvs = (msg.datasize - 2) / 2
-    contract.storage[start_index + 1] = block.number
-    1ontract.storage[start_index + 2] = msg.sender
-    i = 0
-    while i < msg.datasize - 2:
-        contract.storage[start_index + 3 + i] = msg.data[2 + i]
-        i += 1
-    contract.storage[1] = blknumber + 1
-
-# Challenge block: [ 1, b ]
-elif msg.data[0] == 1:
-    blknumber = msg.data[1]
-    txwidth = contract.storage[4]
-    last_uncommitted_block = contract.storage[1]
-    last_committed_block = contract.storage[0]
-    # Cannot challenge nonexistent or committed blocks
-    if blknumber <= last_uncommitted_block or blknumber > last_committed_block:
-        stop
-    # Create a contract to serve as a map that maintains keys and values
-    # temporarily
-    tempstore = create('map.se')
-    contract.storage[3] = tempstore
-    # Unquestioningly apply the state transitions from the last committed block
-    # up to b
-    b = last_committed_block
-    cur_storage_index = 2^160 + last_committed_block * 2^40
-    while b < blknumber:
-        numtxs = contract.storage[cur_storage_index + 3]
-        numkvs = contract.storage[cur_storage_index + 4]
-        kv0index = cur_storage_index + 5 + numtxs * txwidth
-        i = 0
-        while i < numkvs:
-            k = contract.storage[kv0index + i * 2]
-            v = contract.storage[kx0index + i * 2 + 1]
-            call(tempstore, [1, k, v], 3)
-            i += 1
-        b += 1
-        cur_storage_index += 2^40
-    # Run the actual code, and see what state transitions it outputs
-    # The way that the code is expected to work is to:
-    #
-    # (1) take as input the list of transactions (the contract should
-    # use msg.datasize to determine how many txs there are, and it should
-    # be aware of the value of txwidth)
-    # (2) call this contract with [2, k] to read current state data
-    # (3) call this contract with [3, k, v] to write current state data
-    # (4) return as output a list of all state transitions that it made
-    # in the form [kvcount, k1, v1, k2, v2 ... ]
-    #
-    # The reason for separating (2) from (3) is that sometimes the state
-    # transition may end up changing a given key many times, and we don't
-    # need to inefficiently store that in storage
-    numkvs = contract.storage[cur_storage_index + 3]
-    numtxs = contract.storage[cur_storage_index + 4]
-    # Populate input array
-    inpwidth = numtxs * txwidth
-    inp = array(inpwidth)
-    i = 0
-    while i < inpwidth:
-        inp[i] = contract.storage[cur_storage_index + 5 + i]
-        i += 1
-    out = call(contract.storage[2], inp, inpwidth, numkvs * 2 + 1)
-    # Check that the number of state transitions is the same
-    if out[0] != kvcount:
-        send(msg.sender, 10^19)
-        contract.storage[0] = last_committed_block
-        stop
-    kv0index = cur_storage_index + 5 + numtxs * txwidth
-    i = 0
-    while i < kvcount:
-        # Check that each individual state transition matches
-        k = contract.storage[kv0index + i * 2 + 1]
-        v = contract.storage[kv0index + i * 2 + 2]
-        if k != out[i * 2 + 1] or v != out[i * 2 + 2]:
-            send(msg.sender, 10^19)
-            contract.storage[0] = last_committed_block
-            stop
-        i += 1
-    # Suicide tempstore
-    call(tempstore, 2)
-
-
-# Read data [2, k]
-elif msg.data[0] == 2:
-    tempstore = contract.storage[3]
-    o = call(tempstore, [0, msg.data[1]], 2, 2)
-    if o[0]:
-        return(o[1])
-    else:
-        return contract.storage[sha3(msg.data[1])]
-
-# Write data [3, k, v]
-elif msg.data[0] == 3:
-    tempstore = contract.storage[3]
-    call(tempstore, [1, msg.data[1], msg.data[2]], 3, 2)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/fixedpoint.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/fixedpoint.se
deleted file mode 100644
index a8073c6855880cfc4a50fffae72807f25c37ef61..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/fixedpoint.se
+++ /dev/null
@@ -1,31 +0,0 @@
-type f: [a, b, c, d, e]
-
-macro f($a) + f($b):
-    f(add($a, $b))
-
-macro f($a) - f($b):
-    f(sub($a, $b))
-
-macro f($a) * f($b):
-    f(mul($a, $b) / 10000)
-
-macro f($a) / f($b):
-    f(sdiv($a * 10000, $b))
-
-macro f($a) % f($b):
-    f(smod($a, $b))
-
-macro f($v) = f($w):
-    $v = $w
-
-macro unfify(f($a)):
-    $a / 10000
-
-macro fify($a):
-    f($a * 10000)
-
-a = fify(5)
-b = fify(2)
-c = a / b
-e = c + (a / b)
-return(unfify(e))
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/long_integer_macros.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/long_integer_macros.se
deleted file mode 100644
index 58cdce6abceb42fd0c631704a8b6e46c2502a6bb..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/long_integer_macros.se
+++ /dev/null
@@ -1,116 +0,0 @@
-macro smin($a, $b):
-    with $1 = $a:
-        with $2 = $b:
-            if(slt($1, $2), $1, $2)
-
-macro smax($a, $b):
-    with $1 = $a:
-        with $2 = $b:
-            if(slt($1, $2), $2, $1)
-
-def omul(x, y):
-    o = expose(mklong(x) * mklong(y))
-    return(slice(o, 1), o[0]+1)
-
-def oadd(x, y):
-    o = expose(mklong(x) + mklong(y))
-    return(slice(o, 1), o[0]+1)
-
-def osub(x, y):
-    o = expose(mklong(x) - mklong(y))
-    return(slice(o, 1), o[0]+1)
-
-def odiv(x, y):
-    o = expose(mklong(x) / mklong(y))
-    return(slice(o, 1), o[0]+1)
-
-def comb(a:a, b:a, sign):
-    sz = smax(a[0], b[0])
-    msz = smin(a[0], b[0])
-    c = array(sz + 2)
-    c[0] = sz
-    i = 0
-    carry = 0
-    while i < msz:
-        m = a[i + 1] + sign * b[i + 1] + carry
-        c[i + 1] = mod(m + 2^127, 2^128) - 2^127
-        carry = (div(m + 2^127, 2^128) + 2^127) % 2^128 - 2^127
-        i += 1
-    u = if(a[0] > msz, a, b)
-    s = if(a[0] > msz, 1, sign)
-    while i < sz:
-        m = s * u[i + 1] + carry
-        c[i + 1] = mod(m + 2^127, 2^128) - 2^127
-        carry = (div(m + 2^127, 2^128) + 2^127) % 2^128 - 2^127
-        i += 1
-    if carry:
-        c[0] += 1
-        c[sz + 1] = carry
-    return(c, c[0]+1)
-
-def mul(a:a, b:a):
-    c = array(a[0] + b[0] + 2)
-    c[0] = a[0] + b[0]
-    i = 0
-    while i < a[0]:
-        j = 0
-        carry = 0
-        while j < b[0]:
-            m = c[i + j + 1] + a[i + 1] * b[j + 1] + carry
-            c[i + j + 1] = mod(m + 2^127, 2^128) - 2^127
-            carry = (div(m + 2^127, 2^128) + 2^127) % 2^128 - 2^127
-            j += 1
-        if carry:
-            c[0] = a[0] + b[0] + 1
-            c[i + j + 1] += carry
-        i += 1
-    return(c, c[0]+1)
-
-macro long($a) + long($b):
-    long(self.comb($a:$a[0]+1, $b:$b[0]+1, 1, outsz=$a[0]+$b[0]+2))
-
-macro long($a) - long($b):
-    long(self.comb($a:$a[0]+1, $b:$b[0]+1, -1, outsz=$a[0]+$b[0]+2))
-
-macro long($a) * long($b):
-    long(self.mul($a:$a[0]+1, $b:$b[0]+1, outsz=$a[0]+$b[0]+2))
-
-macro long($a) / long($b):
-    long(self.div($a:$a[0]+1, $b:$b[0]+1, outsz=$a[0]+$b[0]+2))
-
-macro mulexpand(long($a), $k, $m):
-    long:
-        with $c = array($a[0]+k+2):
-            $c[0] = $a[0]+$k
-            with i = 0:
-                while i < $a[0]:
-                    v = $a[i+1] * $m + $c[i+$k+1]
-                    $c[i+$k+1] = mod(v + 2^127, 2^128) - 2^127
-                    $c[i+$k+2] = div(v + 2^127, 2^128)
-                    i += 1
-                $c
-
-def div(a:a, b:a):
-    asz = a[0]
-    bsz = b[0]
-    while b[bsz] == 0 and bsz > 0:
-        bsz -= 1
-    c = array(asz+2)
-    c[0] = asz+1
-    while 1:
-        while a[asz] == 0 and asz > 0:
-            asz -= 1
-        if asz < bsz:
-            return(c, c[0]+1)
-        sub = expose(mulexpand(long(b), asz - bsz, a[asz] / b[bsz]))
-        c[asz - bsz+1] = a[asz] / b[bsz]
-        a = expose(long(a) - long(sub))
-        a[asz-1] += 2^128 * a[asz]
-        a[asz] = 0
-        
-macro mklong($i):
-    long([2, mod($i + 2^127, 2^128) - 2^127, div($i + 2^127, 2^128)])
-
-macro expose(long($i)):
-    $i
-
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/mul2.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/mul2.se
deleted file mode 100644
index 65adff1e62368b1729e0c98690b9d5f06bae5bb6..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/mul2.se
+++ /dev/null
@@ -1,2 +0,0 @@
-def double(v):
-    return(v*2)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/mutuala.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/mutuala.se
deleted file mode 100644
index 3efb0edebba6ef51cd4e62e7384015b16216b949..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/mutuala.se
+++ /dev/null
@@ -1,187 +0,0 @@
-# mutuala - subcurrency
-
-# We want to issue a currency that reduces in value as you store it through negative interest.
-# That negative interest would be stored in a commons account.  It's like the p2p version of a
-# capital tax
-
-# the same things goes for transactions - you pay as you use the currency.  However, the more
-# you pay, the more you get to say about what the tax is used for
-
-# each participant can propose a recipient for a payout to be made out of the commons account,
-# others can vote on it by awarding it tax_credits.
-
-# TODO should proposal have expiration timestamp?, after which the tax_credits are refunded
-# TODO multiple proposals can take more credits that available in the Commons, how to handle this
-# TODO how to handle lost accounts, after which no longer possible to get 2/3 majority
-
-shared:
-    COMMONS = 42
-    ADMIN = 666
-    CAPITAL_TAX_PER_DAY = 7305 # 5% per year
-    PAYMENT_TAX = 20 # 5%
-
-    ACCOUNT_LIST_OFFSET = 2^160
-    ACCOUNT_MAP_OFFSET = 2^161
-    PROPOSAL_LIST_OFFSET = 2^162
-    PROPOSAL_MAP_OFFSET = 2^163
-
-init:
-    contract.storage[ADMIN] = msg.sender
-    contract.storage[ACCOUNT_LIST_OFFSET - 1] = 1
-    contract.storage[ACCOUNT_LIST_OFFSET] = msg.sender
-    contract.storage[ACCOUNT_MAP_OFFSET + msg.sender] = 10^12
-    contract.storage[ACCOUNT_MAP_OFFSET + msg.sender + 1] = block.timestamp
-
-# contract.storage[COMMONS] = balance commons
-
-# contract.storage[ACCOUNT_LIST_OFFSET - 1] = number of accounts
-# contract.storage[ACCOUNT_LIST_OFFSET + n] = account n
-
-# contract.storage[PROPOSAL_LIST_OFFSET - 1] contains the number of proposals
-# contract.storage[PROPOSAL_LIST_OFFSET + n] = proposal n
-
-# per account:
-# contract.storage[ACCOUNT_MAP_OFFSET + account] = balance
-# contract.storage[ACCOUNT_MAP_OFFSET + account+1] = timestamp_last_transaction
-# contract.storage[ACCOUNT_MAP_OFFSET + account+2] = tax_credits
-
-# per proposal:
-# contract.storage[PROPOSAL_MAP_OFFSET + proposal_id] = recipient
-# contract.storage[PROPOSAL_MAP_OFFSET + proposal_id+1] = amount
-# contract.storage[PROPOSAL_MAP_OFFSET + proposal_id+2] = total vote credits
-
-code:
-    if msg.data[0] == "suicide" and msg.sender == contract.storage[ADMIN]:
-        suicide(msg.sender)
-
-    elif msg.data[0] == "balance":
-        addr = msg.data[1]
-        return(contract.storage[ACCOUNT_MAP_OFFSET + addr])
-
-    elif msg.data[0] == "pay":
-        from = msg.sender
-        fromvalue = contract.storage[ACCOUNT_MAP_OFFSET + from]
-        to = msg.data[1]
-        if to == 0 or to >= 2^160:
-            return([0, "invalid address"], 2)
-        value = msg.data[2]
-        tax = value / PAYMENT_TAX
-
-        if fromvalue >= value + tax:
-            contract.storage[ACCOUNT_MAP_OFFSET + from] = fromvalue - (value + tax)
-            contract.storage[ACCOUNT_MAP_OFFSET + to] += value
-            # tax
-            contract.storage[COMMONS] += tax
-            contract.storage[ACCOUNT_MAP_OFFSET + from + 2] += tax
-
-            # check timestamp field to see if target account exists
-            if contract.storage[ACCOUNT_MAP_OFFSET + to + 1] == 0:
-                # register new account
-                nr_accounts = contract.storage[ACCOUNT_LIST_OFFSET - 1]
-                contract.storage[ACCOUNT_LIST_OFFSET + nr_accounts] = to
-                contract.storage[ACCOUNT_LIST_OFFSET - 1] += 1
-                contract.storage[ACCOUNT_MAP_OFFSET + to + 1] = block.timestamp
-
-            return(1)
-        else:
-            return([0, "insufficient balance"], 2)
-
-    elif msg.data[0] == "hash":
-        proposal_id = sha3(msg.data[1])
-        return(proposal_id)
-
-    elif msg.data[0] == "propose":
-        from = msg.sender
-        # check if sender has an account and has tax credits
-        if contract.storage[ACCOUNT_MAP_OFFSET + from + 2] == 0:
-            return([0, "sender has no tax credits"], 2)
-
-        proposal_id = sha3(msg.data[1])
-        # check if proposal doesn't already exist
-        if contract.storage[PROPOSAL_MAP_OFFSET + proposal_id]:
-            return([0, "proposal already exists"])
-
-        to = msg.data[2]
-        # check if recipient is a valid address and has an account (with timestamp)
-        if to == 0 or to >= 2^160:
-            return([0, "invalid address"], 2)
-        if contract.storage[ACCOUNT_MAP_OFFSET + to + 1] == 0:
-            return([0, "invalid to account"], 2)
-
-        value = msg.data[3]
-        # check if there is enough money in the commons account
-        if value > contract.storage[COMMONS]:
-            return([0, "not enough credits in commons"], 2)
-
-        # record proposal in list
-        nr_proposals = contract.storage[PROPOSAL_LIST_OFFSET - 1]
-        contract.storage[PROPOSAL_LIST_OFFSET + nr_proposals] = proposal_id
-        contract.storage[PROPOSAL_LIST_OFFSET - 1] += 1
-
-        # record proposal in map
-        contract.storage[PROPOSAL_MAP_OFFSET + proposal_id] = to
-        contract.storage[PROPOSAL_MAP_OFFSET + proposal_id + 1] = value
-
-        return(proposal_id)
-
-    elif msg.data[0] == "vote":
-        from = msg.sender
-        proposal_id = sha3(msg.data[1])
-        value = msg.data[2]
-        # check if sender has an account and has tax credits
-        if value < contract.storage[ACCOUNT_MAP_OFFSET + from + 2]:
-            return([0, "sender doesn't have enough tax credits"], 2)
-
-        # check if proposal exist
-        if contract.storage[PROPOSAL_MAP_OFFSET + proposal_id] == 0:
-            return([0, "proposal doesn't exist"], 2)
-
-        # increase votes
-        contract.storage[PROPOSAL_MAP_OFFSET + proposal_id + 2] += value
-        # withdraw tax credits
-        contract.storage[ACCOUNT_MAP_OFFSET + from + 2] -= value
-
-        # did we reach 2/3 threshold?
-        if contract.storage[PROPOSAL_MAP_OFFSET + proposal_id + 2] >= contract.storage[COMMONS] * 2 / 3:
-            # got majority
-            to = contract.storage[PROPOSAL_MAP_OFFSET + proposal_id]
-            amount = contract.storage[PROPOSAL_MAP_OFFSET + proposal_id + 1]
-
-            # adjust balances
-            contract.storage[ACCOUNT_MAP_OFFSET + to] += amount
-            contract.storage[COMMONS] -= amount
-
-            # reset proposal
-            contract.storage[PROPOSAL_MAP_OFFSET + proposal_id] = 0
-            contract.storage[PROPOSAL_MAP_OFFSET + proposal_id + 1] = 0
-            contract.storage[PROPOSAL_MAP_OFFSET + proposal_id + 2] = 0
-            return(1)
-
-        return(proposal_id)
-
-    elif msg.data[0] == "tick":
-        nr_accounts = contract.storage[ACCOUNT_LIST_OFFSET - 1]
-        account_idx = 0
-        tax_paid = 0
-        # process all accounts and see if they have to pay their daily capital tax
-        while account_idx < nr_accounts:
-            cur_account = contract.storage[ACCOUNT_LIST_OFFSET + account_idx]
-            last_timestamp = contract.storage[ACCOUNT_MAP_OFFSET + cur_account + 1]
-            time_diff = block.timestamp - last_timestamp
-            if time_diff >= 86400:
-                tax_days = time_diff / 86400
-                balance = contract.storage[ACCOUNT_MAP_OFFSET + cur_account]
-                tax = tax_days * (balance / CAPITAL_TAX_PER_DAY)
-                if tax > 0:
-                    # charge capital tax, but give tax credits in return
-                    contract.storage[ACCOUNT_MAP_OFFSET + cur_account] -= tax
-                    contract.storage[ACCOUNT_MAP_OFFSET + cur_account + 1] += tax_days * 86400
-                    contract.storage[ACCOUNT_MAP_OFFSET + cur_account + 2] += tax
-
-                    contract.storage[COMMONS] += tax
-                    tax_paid += 1
-            account_idx += 1
-        return(tax_paid) # how many accounts did we charge tax on
-
-    else:
-        return([0, "unknown command"], 2)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/namecoin.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/namecoin.se
deleted file mode 100644
index 11d6274aee98711e3b94c0ab47347c1fde420322..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/namecoin.se
+++ /dev/null
@@ -1,7 +0,0 @@
-def register(k, v):
-    if !self.storage[k]: # Is the key not yet taken?
-        # Then take it!
-        self.storage[k] = v
-        return(1)
-    else:
-        return(0) // Otherwise do nothing
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/peano.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/peano.se
deleted file mode 100644
index 979854444c039adbc3187b9353a720ee077855e5..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/peano.se
+++ /dev/null
@@ -1,43 +0,0 @@
-macro padd($x, psuc($y)):
-    psuc(padd($x, $y))
-  
-macro padd($x, z()):
-    $x
-  
-macro dec(psuc($x)):
-    dec($x) + 1
-  
-macro dec(z()):
-    0
-  
-macro pmul($x, z()):
-    z()
-  
-macro pmul($x, psuc($y)):
-    padd(pmul($x, $y), $x)
-  
-macro pexp($x, z()):
-    one()
-  
-macro pexp($x, psuc($y)):
-    pmul($x, pexp($x, $y))
-  
-macro fac(z()):
-    one()
-  
-macro fac(psuc($x)):
-    pmul(psuc($x), fac($x))
-  
-macro one():
-    psuc(z())
-  
-macro two():
-    psuc(psuc(z()))
-  
-macro three():
-    psuc(psuc(psuc(z())))
-  
-macro five():
-    padd(three(), two())
-  
-return([dec(pmul(three(), pmul(three(), three()))), dec(fac(five()))], 2)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/returnten.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/returnten.se
deleted file mode 100644
index 7969c9eb83173bf92ec6648ce7d4c34b630653b6..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/returnten.se
+++ /dev/null
@@ -1,4 +0,0 @@
-extern mul2: [double]
-  
-x = create("mul2.se")
-return(x.double(5))
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellingcoin/quicksort.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellingcoin/quicksort.se
deleted file mode 100644
index be5d97fc7c6331a85b7c1c7d22c8b324ea51cc23..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellingcoin/quicksort.se
+++ /dev/null
@@ -1,33 +0,0 @@
-def kall():
-    argcount = ~calldatasize() / 32
-    if argcount == 1:
-        return(~calldataload(1))
-
-    args = array(argcount)
-    ~calldatacopy(args, 1, argcount * 32)
-    low = array(argcount)
-    lsz = 0
-    high = array(argcount)
-    hsz = 0
-    i = 1
-    while i < argcount:
-        if args[i] < args[0]:
-            low[lsz] = args[i]
-            lsz += 1
-        else:
-            high[hsz] = args[i]
-            hsz += 1
-        i += 1
-    low = self.kall(data=low, datasz=lsz, outsz=lsz)
-    high = self.kall(data=high, datasz=hsz, outsz=hsz)
-    o = array(argcount)
-    i = 0
-    while i < lsz:
-        o[i] = low[i]
-        i += 1
-    o[lsz] = args[0]
-    j = 0
-    while j < hsz:
-        o[lsz + 1 + j] = high[j]
-        j += 1
-    return(o, argcount)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellingcoin/quicksort_pairs.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellingcoin/quicksort_pairs.se
deleted file mode 100644
index 0e603a238b4d0cffb9c87544ba2c3aeeb6189ca8..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellingcoin/quicksort_pairs.se
+++ /dev/null
@@ -1,46 +0,0 @@
-# Quicksort pairs
-# eg. input of the form [ 30, 1, 90, 2, 70, 3, 50, 4]
-# outputs [ 30, 1, 50, 4, 70, 3, 90, 2 ]
-#
-# Note: this can be used as a generalized sorting algorithm:
-# map every object to [ key, ref ] where `ref` is the index
-# in memory to all of the properties and `key` is the key to
-# sort by
-
-
-def kall():
-    argcount = ~calldatasize() / 64
-    if argcount == 1:
-        return([~calldataload(1), ~calldataload(33)], 2)
-
-    args = array(argcount * 2)
-    ~calldatacopy(args, 1, argcount * 64)
-    low = array(argcount * 2)
-    lsz = 0
-    high = array(argcount * 2)
-    hsz = 0
-    i = 2
-    while i < argcount * 2:
-        if args[i] < args[0]:
-            low[lsz] = args[i]
-            low[lsz + 1] = args[i + 1]
-            lsz += 2
-        else:
-            high[hsz] = args[i]
-            high[hsz + 1] = args[i + 1]
-            hsz += 2
-        i = i + 2
-    low = self.kall(data=low, datasz=lsz, outsz=lsz)
-    high = self.kall(data=high, datasz=hsz, outsz=hsz)
-    o = array(argcount * 2)
-    i = 0
-    while i < lsz:
-        o[i] = low[i]
-        i += 1
-    o[lsz] = args[0]
-    o[lsz + 1] = args[1]
-    j = 0
-    while j < hsz:
-        o[lsz + 2 + j] = high[j]
-        j += 1
-    return(o, argcount * 2)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellingcoin/schellingcoin.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellingcoin/schellingcoin.se
deleted file mode 100644
index a7d7da9c54c74e3a18b493a59193821f8ba34a43..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellingcoin/schellingcoin.se
+++ /dev/null
@@ -1,94 +0,0 @@
-# SchellingCoin implementation
-#
-# Epoch length: 100 blocks
-# Target savings depletion rate: 0.1% per epoch
-
-data epoch
-data hashes_submitted
-data output
-data quicksort_pairs
-data accounts[2^160]
-data submissions[2^80](hash, deposit, address, value)
-extern any: [call]
-
-
-def init():
-    self.epoch = block.number / 100
-    self.quicksort_pairs = create('quicksort_pairs.se')
-
-def any():
-    if block.number / 100 > epoch:
-        # Sort all values submitted
-        N = self.hashes_submitted
-        o = array(N * 2)
-        i = 0
-        j = 0
-        while i < N:
-            v = self.submissions[i].value
-            if v:
-                o[j] = v
-                o[j + 1] = i
-                j += 2
-            i += 1
-        values = self.quicksort_pairs.call(data=o, datasz=j, outsz=j)
-
-        # Calculate total deposit, refund non-submitters and
-        # cleanup
-
-        deposits = array(j / 2)
-        addresses = array(j / 2)
-        
-        i = 0
-        total_deposit = 0
-        while i < j / 2:
-            base_index = HASHES + values[i * 2 + 1] * 3
-            deposits[i] = self.submissions[i].deposit
-            addresses[i] = self.submissions[i].address
-            if self.submissions[values[i * 2 + 1]].value:
-                total_deposit += deposits[i]
-            else:
-                send(addresses[i], deposits[i] * 999 / 1000)
-            i += 1
-
-        inverse_profit_ratio = total_deposit / (contract.balance / 1000) + 1
-
-        # Reward everyone
-        i = 0
-        running_deposit_sum = 0
-        halfway_passed = 0
-        while i < j / 2:
-            new_deposit_sum = running_deposit_sum + deposits[i]
-            if new_deposit_sum > total_deposit / 4 and running_deposit_sum < total_deposit * 3 / 4:
-                send(addresses[i], deposits[i] + deposits[i] / inverse_profit_ratio * 2)
-            else:
-                send(addresses[i], deposits[i] - deposits[i] / inverse_profit_ratio)
-
-            if not halfway_passed and new_deposit_sum > total_deposit / 2:
-                self.output = self.submissions[i].value
-                halfway_passed = 1
-            self.submissions[i].value = 0
-            running_deposit_sum = new_deposit_sum
-            i += 1
-        self.epoch = block.number / 100
-        self.hashes_submitted = 0
-        
-def submit_hash(h):
-    if block.number % 100 < 50:
-        cur = self.hashes_submitted
-        pos = HASHES + cur * 3
-        self.submissions[cur].hash = h
-        self.submissions[cur].deposit = msg.value
-        self.submissions[cur].address = msg.sender
-        self.hashes_submitted = cur + 1
-        return(cur)
-
-def submit_value(index, v):
-    if sha3([msg.sender, v], 2) == self.submissions[index].hash:
-            self.submissions[index].value = v
-            return(1)
-
-def request_balance():
-    return(contract.balance)
-
-def request_output():
-    return(self.output)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellingcoin/schellingdollar.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellingcoin/schellingdollar.se
deleted file mode 100644
index a34f42ce2d7f455757d8a87d6434a3b253018735..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellingcoin/schellingdollar.se
+++ /dev/null
@@ -1,171 +0,0 @@
-# Hedged zero-supply dollar implementation
-# Uses SchellingCoin as price-determining backend
-#
-# Stored variables:
-#
-# 0: Schelling coin contract
-# 1: Last epoch
-# 2: Genesis block of contract
-# 3: USD exposure
-# 4: ETH exposure
-# 5: Cached price
-# 6: Last interest rate
-# 2^160 + k: interest rate accumulator at k epochs
-# 2^161 + ADDR * 3: eth-balance of a particular address
-# 2^161 + ADDR * 3 + 1: usd-balance of a particular address
-# 2^161 + ADDR * 3 + 1: last accessed epoch of a particular address
-#
-# Transaction types:
-#
-# [1, to, val]: send ETH
-# [2, to, val]: send USD
-# [3, wei_amount]: convert ETH to USD
-# [4, usd_amount]: converts USD to ETH
-# [5]: deposit
-# [6, amount]: withdraw
-# [7]: my balance query
-# [7, acct]: balance query for any acct
-# [8]: global state query
-# [9]: liquidation test any account
-#
-# The purpose of the contract is to serve as a sort of cryptographic
-# bank account where users can store both ETH and USD. ETH must be
-# stored in zero or positive quantities, but USD balances can be
-# positive or negative. If the USD balance is negative, the invariant
-# usdbal * 10 >= ethbal * 9 must be satisfied; if any account falls
-# below this value, then that account's balances are zeroed. Note
-# that there is a 2% bounty to ping the app if an account does go
-# below zero; one weakness is that if no one does ping then it is
-# quite possible for accounts to go negative-net-worth, then zero
-# themselves out, draining the reserves of the "bank" and potentially
-# bankrupting it. A 0.1% fee on ETH <-> USD trade is charged to
-# minimize this risk. Additionally, the bank itself will inevitably
-# end up with positive or negative USD exposure; to mitigate this,
-# it automatically updates interest rates on USD to keep exposure
-# near zero.
-
-data schelling_coin
-data last_epoch
-data starting_block
-data usd_exposure
-data eth_exposure
-data price
-data last_interest_rate
-data interest_rate_accum[2^50]
-data accounts[2^160](eth, usd, last_epoch)
-
-extern sc: [submit_hash, submit_value, request_balance, request_output]
-
-def init():
-    self.schelling_coin = create('schellingcoin.se')
-    self.price = self.schelling_coin.request_output()
-    self.interest_rate_accum[0] = 10^18
-    self.starting_block = block.number
-
-def any():
-    sender = msg.sender
-    epoch = (block.number - self.starting_block) / 100
-    last_epoch = self.last_epoch
-    usdprice = self.price
-
-    # Update contract epochs
-    if epoch > last_epoch:
-        delta = epoch - last_epoch
-        last_interest_rate = self.last_interest_rate
-        usd_exposure - self.usd_exposure
-        last_accum = self.interest_rate_accum[last_epoch]
-
-        if usd_exposure < 0:
-            self.last_interest_rate = last_interest_rate - 10000 * delta
-        elif usd_exposure > 0:
-            self.last_interest_rate = last_interest_rate + 10000 * delta
-        
-        self.interest_rate_accum[epoch] = last_accum + last_accum * last_interest_rate * delta / 10^9
-
-        # Proceeds go to support the SchellingCoin feeding it price data, ultimately providing the depositors
-        # of the SchellingCoin an interest rate
-        bal = max(self.balance - self.eth_exposure, 0) / 10000
-        usdprice = self.schelling_coin.request_output()
-        self.price = usdprice
-        self.last_epoch = epoch
-
-    ethbal = self.accounts[msg.sender].eth
-    usdbal = self.accounts[msg.sender].usd
-
-    # Apply interest rates to sender and liquidation-test self
-    if msg.sender != self:
-        self.ping(self)
-    
-def send_eth(to, value):
-    if value > 0 and value <= ethbal and usdbal * usdprice * 2 + (ethbal - value) >= 0:
-        self.accounts[msg.sender].eth = ethbal - value
-        self.ping(to)
-        self.accounts[to].eth += value
-        return(1)
-
-def send_usd(to, value):
-    if value > 0 and value <= usdbal and (usdbal - value) * usdprice * 2 + ethbal >= 0:
-        self.accounts[msg.sender].usd = usdbal - value
-        self.ping(to)
-        self.accounts[to].usd += value
-        return(1)
-
-def convert_to_eth(usdvalue):
-    ethplus = usdvalue * usdprice * 999 / 1000
-    if usdvalue > 0 and (usdbal - usdvalue) * usdprice * 2 + (ethbal + ethplus) >= 0:
-        self.accounts[msg.sender].eth = ethbal + ethplus
-        self.accounts[msg.sender].usd = usdbal - usdvalue
-        self.eth_exposure += ethplus
-        self.usd_exposure -= usdvalue
-        return([ethbal + ethplus, usdbal - usdvalue], 2)
-
-def convert_to_usd(ethvalue):
-    usdplus = ethvalue / usdprice * 999 / 1000
-    if ethvalue > 0 and (usdbal + usdplus) * usdprice * 2 + (ethbal - ethvalue) >= 0:
-        self.accounts[msg.sender].eth = ethbal - ethvalue
-        self.accounts[msg.sender].usd = usdbal + usdplus
-        self.eth_exposure -= ethvalue
-        self.usd_exposure += usdplus
-        return([ethbal - ethvalue, usdbal + usdplus], 2)
-
-def deposit():
-    self.accounts[msg.sender].eth = ethbal + msg.value
-    self.eth_exposure += msg.value
-    return(ethbal + msg.value)
-
-def withdraw(value):
-    if value > 0 and value <= ethbal and usdbal * usdprice * 2 + (ethbal - value) >= 0:
-        self.accounts[msg.sender].eth -= value
-        self.eth_exposure -= value
-        return(ethbal - value)
-
-def balance(acct):
-    self.ping(acct)
-    return([self.accounts[acct].eth, self.accounts[acct].usd], 2)
-
-def global_state_query(acct):
-    interest = self.last_interest_rate
-    usd_exposure = self.usd_exposure
-    eth_exposure = self.eth_exposure
-    eth_balance = self.balance
-    return([epoch, usdprice, interest, usd_exposure, eth_exposure, eth_balance], 6)
-
-def ping(acct):
-    account_last_epoch = self.accounts[acct].last_epoch
-    if account_last_epoch != epoch:
-        cur_usd_balance = self.accounts[acct].usd
-        new_usd_balance = cur_usd_balance * self.interest_rate_accum[epoch] / self.interest_rate_accum[account_last_epoch]
-        self.accounts[acct].usd = new_usd_balance
-        self.accounts[acct].last_epoch = epoch
-        self.usd_exposure += new_usd_balance - cur_usd_balance
-        
-        ethbal = self.accounts[acct].eth
-
-        if new_usd_balance * usdval * 10 + ethbal * 9 < 0:
-            self.accounts[acct].eth = 0
-            self.accounts[acct].usd = 0
-            self.accounts[msg.sender].eth += ethbal / 50
-            self.eth_exposure += -ethbal + ethbal / 50
-            self.usd_exposure += new_usd_balance
-            return(1)
-        return(0)
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellinghelper.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellinghelper.se
deleted file mode 100644
index 0e522d6e8d595ebdf6bda99fd1e1c60240e6efc8..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/schellinghelper.se
+++ /dev/null
@@ -1 +0,0 @@
-return(sha3([msg.sender, msg.data[0]], 2))
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/short_namecoin.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/short_namecoin.se
deleted file mode 100644
index db327a77d37d8feacfd8a6e46b28987d7fb729da..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/short_namecoin.se
+++ /dev/null
@@ -1,3 +0,0 @@
-def register(k, v):
-    if !self.storage[k]:
-        self.storage[k] = v
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/subcurrency.se b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/subcurrency.se
deleted file mode 100644
index fbda822b6a49a6d70bb52f7bf579d0fad0c54154..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/examples/subcurrency.se
+++ /dev/null
@@ -1,11 +0,0 @@
-def init():
-    self.storage[msg.sender] = 1000000
-
-def balance_query(k):
-    return(self.storage[addr])
-
-def send(to, value):
-    fromvalue = self.storage[msg.sender]
-    if fromvalue >= value:
-        self.storage[from] = fromvalue - value
-        self.storage[to] += value
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/funcs.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/funcs.cpp
deleted file mode 100644
index ea9be14a6735da0a141a1683deb6a8d0a3dcbb20..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/funcs.cpp
+++ /dev/null
@@ -1,35 +0,0 @@
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include "funcs.h"
-#include "bignum.h"
-#include "util.h"
-#include "parser.h"
-#include "lllparser.h"
-#include "compiler.h"
-#include "rewriter.h"
-#include "tokenize.h"
-
-Node compileToLLL(std::string input) {
-    return rewrite(parseSerpent(input));
-}
-
-Node compileChunkToLLL(std::string input) {
-    return rewriteChunk(parseSerpent(input));
-}
-
-std::string compile(std::string input) {
-    return compileLLL(compileToLLL(input));
-}
-
-std::vector<Node> prettyCompile(std::string input) {
-    return prettyCompileLLL(compileToLLL(input));
-}
-
-std::string compileChunk(std::string input) {
-    return compileLLL(compileChunkToLLL(input));
-}
-
-std::vector<Node> prettyCompileChunk(std::string input) {
-    return prettyCompileLLL(compileChunkToLLL(input));
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/funcs.h b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/funcs.h
deleted file mode 100644
index d9bf4454903164286438169e1ea4985011467343..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/funcs.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include "bignum.h"
-#include "util.h"
-#include "parser.h"
-#include "lllparser.h"
-#include "compiler.h"
-#include "rewriter.h"
-#include "tokenize.h"
-
-// Function listing:
-//
-// parseSerpent      (serpent -> AST)      std::string -> Node
-// parseLLL          (LLL -> AST)          std::string -> Node
-// rewrite           (apply rewrite rules) Node -> Node
-// compileToLLL      (serpent -> LLL)      std::string -> Node
-// compileLLL        (LLL -> EVMhex)       Node -> std::string
-// prettyCompileLLL  (LLL -> EVMasm)       Node -> std::vector<Node>
-// prettyCompile     (serpent -> EVMasm)   std::string -> std::vector>Node>
-// compile           (serpent -> EVMhex)   std::string -> std::string
-// get_file_contents (filename -> file)    std::string -> std::string
-// exists            (does file exist?)    std::string -> bool
-
-Node compileToLLL(std::string input);
-
-Node compileChunkToLLL(std::string input);
-
-std::string compile(std::string input);
-
-std::vector<Node> prettyCompile(std::string input);
-
-std::string compileChunk(std::string input);
-
-std::vector<Node> prettyCompileChunk(std::string input);
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/functions.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/functions.cpp
deleted file mode 100644
index 78e12e84a00715db3794e6ef2dafaf99b12acf8f..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/functions.cpp
+++ /dev/null
@@ -1,203 +0,0 @@
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-#include "lllparser.h"
-#include "bignum.h"
-#include "optimize.h"
-#include "rewriteutils.h"
-#include "preprocess.h"
-#include "functions.h"
-
-std::string getSignature(std::vector<Node> args) {
-    std::string o;
-    for (unsigned i = 0; i < args.size(); i++) {
-        if (args[i].val == ":" && args[i].args[1].val == "s")
-            o += "s";
-        else if (args[i].val == ":" && args[i].args[1].val == "a")
-            o += "a";
-        else
-            o += "i";
-    }
-    return o;
-}
-
-// Convert a list of arguments into a node containing a
-// < datastart, datasz > pair
-
-Node packArguments(std::vector<Node> args, std::string sig,
-                      int funId, Metadata m) {
-    // Plain old 32 byte arguments
-    std::vector<Node> nargs;
-    // Variable-sized arguments
-    std::vector<Node> vargs;
-    // Variable sizes
-    std::vector<Node> sizes;
-    // Is a variable an array?
-    std::vector<bool> isArray;
-    // Fill up above three argument lists
-    int argCount = 0;
-    for (unsigned i = 0; i < args.size(); i++) {
-        Metadata m = args[i].metadata;
-        if (args[i].val == "=") {
-            // do nothing
-        }
-        else {
-            // Determine the correct argument type
-            char argType;
-            if (sig.size() > 0) {
-                if (argCount >= (signed)sig.size())
-                    err("Too many args", m);
-                argType = sig[argCount];
-            }
-            else argType = 'i';
-            // Integer (also usable for short strings)
-            if (argType == 'i') {
-                if (args[i].val == ":")
-                    err("Function asks for int, provided string or array", m);
-                nargs.push_back(args[i]);
-            }
-            // Long string
-            else if (argType == 's') {
-                if (args[i].val != ":")
-                    err("Must specify string length", m);
-                vargs.push_back(args[i].args[0]);
-                sizes.push_back(args[i].args[1]);
-                isArray.push_back(false);
-            }
-            // Array
-            else if (argType == 'a') {
-                if (args[i].val != ":")
-                    err("Must specify array length", m);
-                vargs.push_back(args[i].args[0]);
-                sizes.push_back(args[i].args[1]);
-                isArray.push_back(true);
-            }
-            else err("Invalid arg type in signature", m);
-            argCount++;
-        }
-    }
-    int static_arg_size = 1 + (vargs.size() + nargs.size()) * 32;
-    // Start off by saving the size variables and calculating the total
-    msn kwargs;
-    kwargs["funid"] = tkn(utd(funId), m);
-    std::string pattern =
-        "(with _sztot "+utd(static_arg_size)+"                            "
-        "    (with _sizes (alloc "+utd(sizes.size() * 32)+")              "
-        "        (seq                                                     ";
-    for (unsigned i = 0; i < sizes.size(); i++) {
-        std::string sizeIncrement = 
-            isArray[i] ? "(mul 32 _x)" : "_x";
-        pattern +=
-            "(with _x $sz"+utd(i)+"(seq                                   "
-            "    (mstore (add _sizes "+utd(i * 32)+") _x)                 "
-            "    (set _sztot (add _sztot "+sizeIncrement+" ))))           ";
-        kwargs["sz"+utd(i)] = sizes[i];
-    }
-    // Allocate memory, and set first data byte
-    pattern +=
-            "(with _datastart (alloc (add _sztot 32)) (seq                "
-            "    (mstore8 _datastart $funid)                              ";
-    // Copy over size variables
-    for (unsigned i = 0; i < sizes.size(); i++) {
-        int v = 1 + i * 32;
-        pattern +=
-            "    (mstore                                                  "
-            "          (add _datastart "+utd(v)+")                        "
-            "          (mload (add _sizes "+utd(v-1)+")))                 ";
-    }
-    // Store normal arguments
-    for (unsigned i = 0; i < nargs.size(); i++) {
-        int v = 1 + (i + sizes.size()) * 32;
-        pattern +=
-            "    (mstore (add _datastart "+utd(v)+") $"+utd(i)+")         ";
-        kwargs[utd(i)] = nargs[i];
-    }
-    // Loop through variable-sized arguments, store them
-    pattern += 
-            "    (with _pos (add _datastart "+utd(static_arg_size)+") (seq";
-    for (unsigned i = 0; i < vargs.size(); i++) {
-        std::string copySize =
-            isArray[i] ? "(mul 32 (mload (add _sizes "+utd(i * 32)+")))"
-                       : "(mload (add _sizes "+utd(i * 32)+"))";
-        pattern +=
-            "        (unsafe_mcopy _pos $vl"+utd(i)+" "+copySize+")       "
-            "        (set _pos (add _pos "+copySize+"))                   ";
-        kwargs["vl"+utd(i)] = vargs[i];
-    }
-    // Return a 2-item array containing the start and size
-    pattern += "     (array_lit _datastart _sztot))))))))";
-    std::string prefix = "_temp_"+mkUniqueToken();
-    // Fill in pattern, return triple
-    return subst(parseLLL(pattern), kwargs, prefix, m);
-}
-
-// Create a node for argument unpacking
-Node unpackArguments(std::vector<Node> vars, Metadata m) {
-    std::vector<std::string> varNames;
-    std::vector<std::string> longVarNames;
-    std::vector<bool> longVarIsArray;
-    // Fill in variable and long variable names, as well as which
-    // long variables are arrays and which are strings
-    for (unsigned i = 0; i < vars.size(); i++) {
-        if (vars[i].val == ":") {
-            if (vars[i].args.size() != 2)
-                err("Malformed def!", m);
-            longVarNames.push_back(vars[i].args[0].val);
-            std::string tag = vars[i].args[1].val;
-            if (tag == "s")
-                longVarIsArray.push_back(false);
-            else if (tag == "a")
-                longVarIsArray.push_back(true);
-            else
-                err("Function value can only be string or array", m);
-        }
-        else {
-            varNames.push_back(vars[i].val);
-        }
-    }
-    std::vector<Node> sub;
-    if (!varNames.size() && !longVarNames.size()) {
-        // do nothing if we have no arguments
-    }
-    else {
-        std::vector<Node> varNodes;
-        for (unsigned i = 0; i < longVarNames.size(); i++)
-            varNodes.push_back(token(longVarNames[i], m));
-        for (unsigned i = 0; i < varNames.size(); i++)
-            varNodes.push_back(token(varNames[i], m));
-        // Copy over variable lengths and short variables
-        for (unsigned i = 0; i < varNodes.size(); i++) {
-            int pos = 1 + i * 32;
-            std::string prefix = (i < longVarNames.size()) ? "_len_" : "";
-            sub.push_back(asn("untyped", asn("set",
-                              token(prefix+varNodes[i].val, m),
-                              asn("calldataload", tkn(utd(pos), m), m),
-                              m)));
-        }
-        // Copy over long variables
-        if (longVarNames.size() > 0) {
-            std::vector<Node> sub2;
-            int pos = varNodes.size() * 32 + 1;
-            Node tot = tkn("_tot", m);
-            for (unsigned i = 0; i < longVarNames.size(); i++) {
-                Node var = tkn(longVarNames[i], m);
-                Node varlen = longVarIsArray[i] 
-                    ? asn("mul", tkn("32", m), tkn("_len_"+longVarNames[i], m))
-                    : tkn("_len_"+longVarNames[i], m);
-                sub2.push_back(asn("untyped",
-                                   asn("set", var, asn("alloc", varlen))));
-                sub2.push_back(asn("calldatacopy", var, tot, varlen));
-                sub2.push_back(asn("set", tot, asn("add", tot, varlen)));
-            }
-            std::string prefix = "_temp_"+mkUniqueToken();
-            sub.push_back(subst(
-                astnode("with", tot, tkn(utd(pos), m), asn("seq", sub2)),
-                msn(),
-                prefix,
-                m));
-        }
-    }
-    return asn("seq", sub, m);
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/functions.h b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/functions.h
deleted file mode 100644
index 68a1c69cef5023e314c872bd1bedc067d877ab67..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/functions.h
+++ /dev/null
@@ -1,39 +0,0 @@
-#ifndef ETHSERP_FUNCTIONS
-#define ETHSERP_FUNCTIONS
-
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-#include "lllparser.h"
-#include "bignum.h"
-#include "optimize.h"
-#include "rewriteutils.h"
-#include "preprocess.h"
-
-
-class argPack {
-    public:
-        argPack(Node a, Node b, Node c) {
-            pre = a;
-            datastart = b;
-            datasz = c;
-        }
-    Node pre;
-    Node datastart;
-    Node datasz;
-};
-
-// Get a signature from a function
-std::string getSignature(std::vector<Node> args);
-
-// Convert a list of arguments into a <pre, mstart, msize> node
-// triple, given the signature of a function
-Node packArguments(std::vector<Node> args, std::string sig,
-                   int funId, Metadata m);
-
-// Create a node for argument unpacking
-Node unpackArguments(std::vector<Node> vars, Metadata m);
-
-#endif
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/lllparser.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/lllparser.cpp
deleted file mode 100644
index ad4fbd52de0126cd6ea592896de7c16f1598277b..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/lllparser.cpp
+++ /dev/null
@@ -1,70 +0,0 @@
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-#include "lllparser.h"
-#include "tokenize.h"
-
-struct _parseOutput {
-    Node node;
-    int newpos;
-};
-
-// Helper, returns subtree and position of start of next node
-_parseOutput _parse(std::vector<Node> inp, int pos) {
-    Metadata met = inp[pos].metadata;
-    _parseOutput o;
-    // Bracket: keep grabbing tokens until we get to the
-    // corresponding closing bracket
-    if (inp[pos].val == "(" || inp[pos].val == "[") {
-        std::string fun, rbrack;
-        std::vector<Node> args;
-        pos += 1;
-        if (inp[pos].val == "[") {
-            fun = "access";
-            rbrack = "]";
-        }
-        else rbrack = ")";
-        // First argument is the function
-        while (inp[pos].val != ")") {
-            _parseOutput po = _parse(inp, pos);
-            if (fun.length() == 0 && po.node.type == 1) {
-                std::cerr << "Error: first arg must be function\n";
-                fun = po.node.val;
-            }
-            else if (fun.length() == 0) {
-                fun = po.node.val;
-            }
-            else {
-                args.push_back(po.node);
-            }
-            pos = po.newpos;
-        }
-        o.newpos = pos + 1;
-        o.node = astnode(fun, args, met);
-    }
-    // Normal token, return it and advance to next token
-    else {
-        o.newpos = pos + 1;
-        o.node = token(inp[pos].val, met);
-    }
-    return o;
-}
-
-// stream of tokens -> lisp parse tree
-Node parseLLLTokenStream(std::vector<Node> inp) {
-    _parseOutput o = _parse(inp, 0);
-    return o.node;
-}
-
-// Parses LLL
-Node parseLLL(std::string s, bool allowFileRead) {
-    std::string input = s;
-    std::string file = "main";
-    if (exists(s) && allowFileRead) {
-        file = s;
-        input = get_file_contents(s);
-    }
-    return parseLLLTokenStream(tokenize(s, Metadata(file, 0, 0), true));
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/lllparser.h b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/lllparser.h
deleted file mode 100644
index 4bfa7b82e1a82e985eb0556a39da4788506adfa3..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/lllparser.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef ETHSERP_LLLPARSER
-#define ETHSERP_LLLPARSER
-
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-
-// LLL text -> parse tree
-Node parseLLL(std::string s, bool allowFileRead=false);
-
-#endif
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/opcodes.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/opcodes.cpp
deleted file mode 100644
index b24144e46b769ce4dda96607eb33aee0def6dd4b..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/opcodes.cpp
+++ /dev/null
@@ -1,154 +0,0 @@
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "opcodes.h"
-#include "util.h"
-#include "bignum.h"
-
-Mapping mapping[] = {
-    Mapping("STOP", 0x00, 0, 0),
-    Mapping("ADD", 0x01, 2, 1),
-    Mapping("MUL", 0x02, 2, 1),
-    Mapping("SUB", 0x03, 2, 1),
-    Mapping("DIV", 0x04, 2, 1),
-    Mapping("SDIV", 0x05, 2, 1),
-    Mapping("MOD", 0x06, 2, 1),
-    Mapping("SMOD", 0x07, 2, 1),
-    Mapping("ADDMOD", 0x08, 3, 1),
-    Mapping("MULMOD", 0x09, 3, 1),
-    Mapping("EXP", 0x0a, 2, 1),
-    Mapping("SIGNEXTEND", 0x0b, 2, 1),
-    Mapping("LT", 0x10, 2, 1),
-    Mapping("GT", 0x11, 2, 1),
-    Mapping("SLT", 0x12, 2, 1),
-    Mapping("SGT", 0x13, 2, 1),
-    Mapping("EQ", 0x14, 2, 1),
-    Mapping("ISZERO", 0x15, 1, 1),
-    Mapping("AND", 0x16, 2, 1),
-    Mapping("OR", 0x17, 2, 1),
-    Mapping("XOR", 0x18, 2, 1),
-    Mapping("NOT", 0x19, 1, 1),
-    Mapping("BYTE", 0x1a, 2, 1),
-    Mapping("SHA3", 0x20, 2, 1),
-    Mapping("ADDRESS", 0x30, 0, 1),
-    Mapping("BALANCE", 0x31, 1, 1),
-    Mapping("ORIGIN", 0x32, 0, 1),
-    Mapping("CALLER", 0x33, 0, 1),
-    Mapping("CALLVALUE", 0x34, 0, 1),
-    Mapping("CALLDATALOAD", 0x35, 1, 1),
-    Mapping("CALLDATASIZE", 0x36, 0, 1),
-    Mapping("CALLDATACOPY", 0x37, 3, 0),
-    Mapping("CODESIZE", 0x38, 0, 1),
-    Mapping("CODECOPY", 0x39, 3, 0),
-    Mapping("GASPRICE", 0x3a, 0, 1),
-    Mapping("EXTCODESIZE", 0x3b, 1, 1),
-    Mapping("EXTCODECOPY", 0x3c, 4, 0),
-    Mapping("PREVHASH", 0x40, 0, 1),
-    Mapping("COINBASE", 0x41, 0, 1),
-    Mapping("TIMESTAMP", 0x42, 0, 1),
-    Mapping("NUMBER", 0x43, 0, 1),
-    Mapping("DIFFICULTY", 0x44, 0, 1),
-    Mapping("GASLIMIT", 0x45, 0, 1),
-    Mapping("POP", 0x50, 1, 0),
-    Mapping("MLOAD", 0x51, 1, 1),
-    Mapping("MSTORE", 0x52, 2, 0),
-    Mapping("MSTORE8", 0x53, 2, 0),
-    Mapping("SLOAD", 0x54, 1, 1),
-    Mapping("SSTORE", 0x55, 2, 0),
-    Mapping("JUMP", 0x56, 1, 0),
-    Mapping("JUMPI", 0x57, 2, 0),
-    Mapping("PC", 0x58, 0, 1),
-    Mapping("MSIZE", 0x59, 0, 1),
-    Mapping("GAS", 0x5a, 0, 1),
-    Mapping("JUMPDEST", 0x5b, 0, 0),
-    Mapping("LOG0", 0xa0, 2, 0),
-    Mapping("LOG1", 0xa1, 3, 0),
-    Mapping("LOG2", 0xa2, 4, 0),
-    Mapping("LOG3", 0xa3, 5, 0),
-    Mapping("LOG4", 0xa4, 6, 0),
-    Mapping("CREATE", 0xf0, 3, 1),
-    Mapping("CALL", 0xf1, 7, 1),
-    Mapping("CALLCODE", 0xf2, 7, 1),
-    Mapping("RETURN", 0xf3, 2, 0),
-    Mapping("SUICIDE", 0xff, 1, 0),
-    Mapping("---END---", 0x00, 0, 0),
-};
-
-std::map<std::string, std::vector<int> > opcodes;
-std::map<int, std::string> reverseOpcodes;
-
-// Fetches everything EXCEPT PUSH1..32
-std::pair<std::string, std::vector<int> > _opdata(std::string ops, int opi) {
-    if (!opcodes.size()) {
-        int i = 0;
-        while (mapping[i].op != "---END---") {
-            Mapping mi = mapping[i];
-            opcodes[mi.op] = triple(mi.opcode, mi.in, mi.out);
-            i++;
-        }
-        for (i = 1; i <= 16; i++) {
-            opcodes["DUP"+unsignedToDecimal(i)] = triple(0x7f + i, i, i+1);
-            opcodes["SWAP"+unsignedToDecimal(i)] = triple(0x8f + i, i+1, i+1);
-        }
-        for (std::map<std::string, std::vector<int> >::iterator it=opcodes.begin();
-             it != opcodes.end();
-             it++) {
-            reverseOpcodes[(*it).second[0]] = (*it).first;
-        }
-    }
-    ops = upperCase(ops);
-    std::string op;
-    std::vector<int> opdata;
-    op = reverseOpcodes.count(opi) ? reverseOpcodes[opi] : "";
-    opdata = opcodes.count(ops) ? opcodes[ops] : triple(-1, -1, -1);
-    return std::pair<std::string, std::vector<int> >(op, opdata);
-}
-
-int opcode(std::string op) {
-	return _opdata(op, -1).second[0];
-}
-
-int opinputs(std::string op) {
-	return _opdata(op, -1).second[1];
-}
-
-int opoutputs(std::string op) {
-	return _opdata(op, -1).second[2];
-}
-
-std::string op(int opcode) {
-	return _opdata("", opcode).first;
-}
-
-std::string lllSpecials[][3] = {
-    { "ref", "1", "1" },
-    { "get", "1", "1" },
-    { "set", "2", "2" },
-    { "with", "3", "3" },
-    { "comment", "0", "2147483647" },
-    { "ops", "0", "2147483647" },
-    { "lll", "2", "2" },
-    { "seq", "0", "2147483647" },
-    { "if", "3", "3" },
-    { "unless", "2", "2" },
-    { "until", "2", "2" },
-    { "alloc", "1", "1" },
-    { "---END---", "0", "0" },
-};
-
-std::map<std::string, std::pair<int, int> > lllMap;
-
-// Is a function name one of the valid functions above?
-bool isValidLLLFunc(std::string f, int argc) {
-    if (lllMap.size() == 0) {
-        for (int i = 0; ; i++) {
-            if (lllSpecials[i][0] == "---END---") break;
-            lllMap[lllSpecials[i][0]] = std::pair<int, int>(
-                dtu(lllSpecials[i][1]), dtu(lllSpecials[i][2]));
-        }
-    }
-    return lllMap.count(f)
-        && argc >= lllMap[f].first
-        && argc <= lllMap[f].second;
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/opcodes.h b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/opcodes.h
deleted file mode 100644
index 41423c169bde7e6107d808d12a884e20f2aa96b6..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/opcodes.h
+++ /dev/null
@@ -1,45 +0,0 @@
-#ifndef ETHSERP_OPCODES
-#define ETHSERP_OPCODES
-
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-
-class Mapping {
-    public:
-        Mapping(std::string Op, int Opcode, int In, int Out) {
-            op = Op;
-            opcode = Opcode;
-            in = In;
-            out = Out;
-        }
-        std::string op;
-        int opcode;
-        int in;
-        int out;
-};
-
-extern Mapping mapping[];
-
-extern std::map<std::string, std::vector<int> > opcodes;
-extern std::map<int, std::string> reverseOpcodes;
-
-std::pair<std::string, std::vector<int> > _opdata(std::string ops, int opi);
-
-int opcode(std::string op);
-
-int opinputs(std::string op);
-
-int opoutputs(std::string op);
-
-std::string op(int opcode);
-
-extern std::string lllSpecials[][3];
-
-extern std::map<std::string, std::pair<int, int> > lllMap;
-
-bool isValidLLLFunc(std::string f, int argc);
-
-#endif
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/optimize.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/optimize.cpp
deleted file mode 100644
index e689fcb699794b04d496dd363e379c263d74b9a5..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/optimize.cpp
+++ /dev/null
@@ -1,98 +0,0 @@
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-#include "lllparser.h"
-#include "bignum.h"
-
-// Compile-time arithmetic calculations
-Node optimize(Node inp) {
-    if (inp.type == TOKEN) {
-        Node o = tryNumberize(inp);
-        if (decimalGt(o.val, tt256, true))
-            err("Value too large (exceeds 32 bytes or 2^256)", inp.metadata);
-        return o;
-    }
-	for (unsigned i = 0; i < inp.args.size(); i++) {
-        inp.args[i] = optimize(inp.args[i]);
-    }
-    // Arithmetic-specific transform
-    if (inp.val == "+") inp.val = "add";
-    if (inp.val == "*") inp.val = "mul";
-    if (inp.val == "-") inp.val = "sub";
-    if (inp.val == "/") inp.val = "sdiv";
-    if (inp.val == "^") inp.val = "exp";
-    if (inp.val == "**") inp.val = "exp";
-    if (inp.val == "%") inp.val = "smod";
-    // Degenerate cases for add and mul
-    if (inp.args.size() == 2) {
-        if (inp.val == "add" && inp.args[0].type == TOKEN && 
-                inp.args[0].val == "0") {
-            Node x = inp.args[1];
-            inp = x;
-        }
-        if (inp.val == "add" && inp.args[1].type == TOKEN && 
-                inp.args[1].val == "0") {
-            Node x = inp.args[0];
-            inp = x;
-        }
-        if (inp.val == "mul" && inp.args[0].type == TOKEN && 
-                inp.args[0].val == "1") {
-            Node x = inp.args[1];
-            inp = x;
-        }
-        if (inp.val == "mul" && inp.args[1].type == TOKEN && 
-                inp.args[1].val == "1") {
-            Node x = inp.args[0];
-            inp = x;
-        }
-    }
-    // Arithmetic computation
-    if (inp.args.size() == 2 
-            && inp.args[0].type == TOKEN 
-            && inp.args[1].type == TOKEN) {
-      std::string o;
-      if (inp.val == "add") {
-          o = decimalMod(decimalAdd(inp.args[0].val, inp.args[1].val), tt256);
-      }
-      else if (inp.val == "sub") {
-          if (decimalGt(inp.args[0].val, inp.args[1].val, true))
-              o = decimalSub(inp.args[0].val, inp.args[1].val);
-      }
-      else if (inp.val == "mul") {
-          o = decimalMod(decimalMul(inp.args[0].val, inp.args[1].val), tt256);
-      }
-      else if (inp.val == "div" && inp.args[1].val != "0") {
-          o = decimalDiv(inp.args[0].val, inp.args[1].val);
-      }
-      else if (inp.val == "sdiv" && inp.args[1].val != "0"
-            && decimalGt(tt255, inp.args[0].val)
-            && decimalGt(tt255, inp.args[1].val)) {
-          o = decimalDiv(inp.args[0].val, inp.args[1].val);
-      }
-      else if (inp.val == "mod" && inp.args[1].val != "0") {
-          o = decimalMod(inp.args[0].val, inp.args[1].val);
-      }
-      else if (inp.val == "smod" && inp.args[1].val != "0"
-            && decimalGt(tt255, inp.args[0].val)
-            && decimalGt(tt255, inp.args[1].val)) {
-          o = decimalMod(inp.args[0].val, inp.args[1].val);
-      }    
-      else if (inp.val == "exp") {
-          o = decimalModExp(inp.args[0].val, inp.args[1].val, tt256);
-      }
-      if (o.length()) return token(o, inp.metadata);
-    }
-    return inp;
-}
-
-// Is a node degenerate (ie. trivial to calculate) ?
-bool isDegenerate(Node n) {
-    return optimize(n).type == TOKEN;
-}
-
-// Is a node purely arithmetic?
-bool isPureArithmetic(Node n) {
-    return isNumberLike(optimize(n));
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/optimize.h b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/optimize.h
deleted file mode 100644
index 06ea3bba1eaa819eca1092ad531656ea6afbfad3..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/optimize.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef ETHSERP_OPTIMIZER
-#define ETHSERP_OPTIMIZER
-
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-
-// Compile-time arithmetic calculations
-Node optimize(Node inp);
-
-// Is a node degenerate (ie. trivial to calculate) ?
-bool isDegenerate(Node n);
-
-// Is a node purely arithmetic?
-bool isPureArithmetic(Node n);
-
-#endif
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/parser.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/parser.cpp
deleted file mode 100644
index 5e8c459c30a4fce8a5d86bf6bb5ee124859bd61a..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/parser.cpp
+++ /dev/null
@@ -1,430 +0,0 @@
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-#include "parser.h"
-#include "tokenize.h"
-
-// Extended BEDMAS precedence order
-int precedence(Node tok) {
-    std::string v = tok.val;
-    if (v == ".") return -1;
-    else if (v == "!" || v == "not") return 1;
-    else if (v=="^" || v == "**") return 2;
-	else if (v=="*" || v=="/" || v=="%") return 3;
-    else if (v=="+" || v=="-") return 4;
-    else if (v=="<" || v==">" || v=="<=" || v==">=") return 5;
-    else if (v=="&" || v=="|" || v=="xor" || v=="==" || v == "!=") return 6;
-    else if (v=="&&" || v=="and") return 7;    
-    else if (v=="||" || v=="or") return 8;
-    else if (v=="=") return 10;
-    else if (v=="+=" || v=="-=" || v=="*=" || v=="/=" || v=="%=") return 10;
-    else if (v==":" || v == "::") return 11;
-    else return 0;
-}
-
-// Token classification for shunting-yard purposes
-int toktype(Node tok) {
-    if (tok.type == ASTNODE) return COMPOUND;
-    std::string v = tok.val;
-    if (v == "(" || v == "[" || v == "{") return LPAREN;
-    else if (v == ")" || v == "]" || v == "}") return RPAREN;
-    else if (v == ",") return COMMA;
-    else if (v == "!" || v == "~" || v == "not") return UNARY_OP;
-    else if (precedence(tok) > 0) return BINARY_OP;
-    else if (precedence(tok) < 0) return TOKEN_SPLITTER;
-    if (tok.val[0] != '"' && tok.val[0] != '\'') {
-		for (unsigned i = 0; i < tok.val.length(); i++) {
-            if (chartype(tok.val[i]) == SYMB) {
-                err("Invalid symbol: "+tok.val, tok.metadata);
-            }
-        }
-    }
-    return ALPHANUM;
-}
-
-
-// Converts to reverse polish notation
-std::vector<Node> shuntingYard(std::vector<Node> tokens) {
-    std::vector<Node> iq;
-    for (int i = tokens.size() - 1; i >= 0; i--) {
-        iq.push_back(tokens[i]);
-    }
-    std::vector<Node> oq;
-    std::vector<Node> stack;
-    Node prev, tok;
-    int prevtyp = 0, toktyp = 0;
-    
-    while (iq.size()) {
-        prev = tok;
-        prevtyp = toktyp;
-        tok = iq.back();
-        toktyp = toktype(tok);
-        iq.pop_back();
-        // Alphanumerics go straight to output queue
-        if (toktyp == ALPHANUM) {
-            oq.push_back(tok);
-        }
-        // Left parens go on stack and output queue
-        else if (toktyp == LPAREN) {
-            while (stack.size() && toktype(stack.back()) == TOKEN_SPLITTER) {
-                oq.push_back(stack.back());
-                stack.pop_back();
-            }
-            if (prevtyp != ALPHANUM && prevtyp != RPAREN) {
-                oq.push_back(token("id", tok.metadata));
-            }
-            stack.push_back(tok);
-            oq.push_back(tok);
-        }
-        // If rparen, keep moving from stack to output queue until lparen
-        else if (toktyp == RPAREN) {
-            while (stack.size() && toktype(stack.back()) != LPAREN) {
-                oq.push_back(stack.back());
-                stack.pop_back();
-            }
-            if (stack.size()) {
-                stack.pop_back();
-            }
-            oq.push_back(tok);
-        }
-        else if (toktyp == UNARY_OP) {
-            stack.push_back(tok);
-        }
-        // If token splitter, just push it to the stack 
-        else if (toktyp == TOKEN_SPLITTER) {
-            while (stack.size() && toktype(stack.back()) == TOKEN_SPLITTER) {
-                oq.push_back(stack.back());
-                stack.pop_back();
-            }
-            stack.push_back(tok);
-        }
-        // If binary op, keep popping from stack while higher bedmas precedence
-        else if (toktyp == BINARY_OP) {
-            if (tok.val == "-" && prevtyp != ALPHANUM && prevtyp != RPAREN) {
-                stack.push_back(tok);
-                oq.push_back(token("0", tok.metadata));
-            }
-            else {
-                int prec = precedence(tok);
-                while (stack.size() 
-                      && (toktype(stack.back()) == BINARY_OP 
-                          || toktype(stack.back()) == UNARY_OP
-                          || toktype(stack.back()) == TOKEN_SPLITTER)
-                      && precedence(stack.back()) <= prec) {
-                    oq.push_back(stack.back());
-                    stack.pop_back();
-                }
-                stack.push_back(tok);
-            }
-        }
-        // Comma means finish evaluating the argument
-        else if (toktyp == COMMA) {
-            while (stack.size() && toktype(stack.back()) != LPAREN) {
-                oq.push_back(stack.back());
-                stack.pop_back();
-            }
-        }
-    }
-    while (stack.size()) {
-        oq.push_back(stack.back());
-        stack.pop_back();
-    }
-    return oq;
-}
-
-// Converts reverse polish notation into tree
-Node treefy(std::vector<Node> stream) {
-    std::vector<Node> iq;
-    for (int i = stream.size() -1; i >= 0; i--) {
-        iq.push_back(stream[i]);
-    }
-    std::vector<Node> oq;
-    while (iq.size()) {
-        Node tok = iq.back();
-        iq.pop_back();
-        int typ = toktype(tok);
-        // If unary, take node off end of oq and wrap it with the operator
-        // If binary, do the same with two nodes
-        if (typ == UNARY_OP || typ == BINARY_OP || typ == TOKEN_SPLITTER) {
-            std::vector<Node> args;
-            int rounds = (typ == UNARY_OP) ? 1 : 2;
-            for (int i = 0; i < rounds; i++) {
-                if (oq.size() == 0) {
-                    err("Line malformed, not enough args for "+tok.val,
-                        tok.metadata);
-                }
-                args.push_back(oq.back());
-                oq.pop_back();
-            }
-            std::vector<Node> args2;
-            while (args.size()) {
-                args2.push_back(args.back());
-                args.pop_back();
-            }
-            oq.push_back(astnode(tok.val, args2, tok.metadata));
-        }
-        // If rparen, keep grabbing until we get to an lparen
-        else if (typ == RPAREN) {
-            std::vector<Node> args;
-            while (1) {
-                if (toktype(oq.back()) == LPAREN) break;
-                args.push_back(oq.back());
-                oq.pop_back();
-                if (!oq.size()) err("Bracket without matching", tok.metadata);
-            }
-            oq.pop_back();
-            args.push_back(oq.back());
-            oq.pop_back();
-            // We represent a[b] as (access a b)
-            if (tok.val == "]")
-                 args.push_back(token("access", tok.metadata));
-            if (args.back().type == ASTNODE)
-                 args.push_back(token("fun", tok.metadata));
-            std::string fun = args.back().val;
-            args.pop_back();
-            // We represent [1,2,3] as (array_lit 1 2 3)
-            if (fun == "access" && args.size() && args.back().val == "id") {
-                fun = "array_lit";
-                args.pop_back();
-            }
-            std::vector<Node> args2;
-            while (args.size()) {
-                args2.push_back(args.back());
-                args.pop_back();
-            }
-            // When evaluating 2 + (3 * 5), the shunting yard algo turns that
-            // into 2 ( id 3 5 * ) +, effectively putting "id" as a dummy
-            // function where the algo was expecting a function to call the
-            // thing inside the brackets. This reverses that step
-			if (fun == "id" && args2.size() == 1) {
-                oq.push_back(args2[0]);
-            }
-            else {
-                oq.push_back(astnode(fun, args2, tok.metadata));
-            }
-        }
-        else oq.push_back(tok);
-        // This is messy, but has to be done. Import/inset other files here
-        std::string v = oq.back().val;
-        if ((v == "inset" || v == "import" || v == "create") 
-                && oq.back().args.size() == 1
-                && oq.back().args[0].type == TOKEN) {
-            int lastSlashPos = tok.metadata.file.rfind("/");
-            std::string root;
-            if (lastSlashPos >= 0)
-                root = tok.metadata.file.substr(0, lastSlashPos) + "/";
-            else
-                root = "";
-            std::string filename = oq.back().args[0].val;
-            filename = filename.substr(1, filename.length() - 2);
-            if (!exists(root + filename))
-                err("File does not exist: "+root + filename, tok.metadata);
-            oq.back().args.pop_back();
-            oq.back().args.push_back(parseSerpent(root + filename));
-        }
-        //Useful for debugging
-        //for (int i = 0; i < oq.size(); i++) {
-        //    std::cerr << printSimple(oq[i]) << " ";
-        //}
-        //std::cerr << " <-\n";
-    }
-    // Output must have one argument
-    if (oq.size() == 0) {
-        err("Output blank", Metadata());
-    }
-    else if (oq.size() > 1) {
-        return asn("multi", oq, oq[0].metadata);
-    }
-
-	return oq[0];
-}
-
-
-// Parses one line of serpent
-Node parseSerpentTokenStream(std::vector<Node> s) {
-    return treefy(shuntingYard(s));
-}
-
-
-// Count spaces at beginning of line
-int spaceCount(std::string s) {
-	unsigned pos = 0;
-	while (pos < s.length() && (s[pos] == ' ' || s[pos] == '\t'))
-		pos++;
-    return pos;
-}
-
-// Is this a command that takes an argument on the same line?
-bool bodied(std::string tok) {
-    return tok == "if" || tok == "elif" || tok == "while"
-        || tok == "with" || tok == "def" || tok == "extern"
-        || tok == "data" || tok == "assert" || tok == "return"
-        || tok == "fun" || tok == "scope" || tok == "macro"
-        || tok == "type";
-}
-
-// Are the two commands meant to continue each other? 
-bool bodiedContinued(std::string prev, std::string tok) {
-    return (prev == "if" && tok == "elif")
-        || (prev == "elif" && tok == "else")
-        || (prev == "elif" && tok == "elif")
-        || (prev == "if" && tok == "else");
-}
-
-// Is a line of code empty?
-bool isLineEmpty(std::string line) {
-    std::vector<Node> tokens = tokenize(line);
-    if (!tokens.size() || tokens[0].val == "#" || tokens[0].val == "//")
-        return true;
-    return false;
-}
-
-// Parse lines of serpent (helper function)
-Node parseLines(std::vector<std::string> lines, Metadata metadata, int sp) {
-    std::vector<Node> o;
-    int origLine = metadata.ln;
-	unsigned i = 0;
-    while (i < lines.size()) {
-        metadata.ln = origLine + i; 
-        std::string main = lines[i];
-        if (isLineEmpty(main)) {
-            i += 1;
-            continue;
-        }
-        int spaces = spaceCount(main);
-        if (spaces != sp) {
-            err("Indent mismatch", metadata);
-        }
-        // Tokenize current line
-        std::vector<Node> tokens = tokenize(main.substr(sp), metadata);
-        // Remove comments
-        std::vector<Node> tokens2;
-		for (unsigned j = 0; j < tokens.size(); j++) {
-            if (tokens[j].val == "#" || tokens[j].val == "//") break;
-            tokens2.push_back(tokens[j]);
-        }
-        bool expectingChildBlock = false;
-        if (tokens2.size() > 0 && tokens2.back().val == ":") {
-            tokens2.pop_back();
-            expectingChildBlock = true;
-        }
-        // Parse current line
-        Node out = parseSerpentTokenStream(tokens2);
-        // Parse child block
-        int childIndent = 999999;
-        std::vector<std::string> childBlock;
-        while (1) {
-			i++;
-			if (i >= lines.size())
-				break;
-            bool ile = isLineEmpty(lines[i]);
-            if (!ile) {
-                int spaces = spaceCount(lines[i]);
-                if (spaces <= sp) break;
-                childBlock.push_back(lines[i]);
-                if (spaces < childIndent) childIndent = spaces;
-            }
-            else childBlock.push_back("");
-        }
-        // Child block empty?
-        bool cbe = true;
-		for (unsigned i = 0; i < childBlock.size(); i++) {
-            if (childBlock[i].length() > 0) { cbe = false; break; }
-        }
-        // Add child block to AST
-        if (expectingChildBlock) {
-            if (cbe)
-                err("Expected indented child block!", out.metadata);
-            out.type = ASTNODE;
-            metadata.ln += 1;
-            out.args.push_back(parseLines(childBlock, metadata, childIndent));
-            metadata.ln -= 1;
-        }
-        else if (!cbe)
-            err("Did not expect indented child block!", out.metadata);
-        else if (out.args.size() && out.args[out.args.size() - 1].val == ":") {
-            Node n = out.args[out.args.size() - 1];
-            out.args.pop_back();
-            out.args.push_back(n.args[0]);
-            out.args.push_back(n.args[1]);
-        }
-        // Bring back if / elif into AST
-        if (bodied(tokens[0].val)) {
-            if (out.val != "multi") {
-                // token not being used in bodied form
-            }
-            else if (out.args[0].val == "id")
-                out = astnode(tokens[0].val, out.args[1].args, out.metadata);
-            else if (out.args[0].type == TOKEN) {
-                std::vector<Node> out2;
-                for (unsigned i = 1; i < out.args.size(); i++)
-                    out2.push_back(out.args[i]);
-                out = astnode(tokens[0].val, out2, out.metadata);
-            }
-            else
-                out = astnode("fun", out.args, out.metadata);
-        }
-        // Multi not supported
-        if (out.val == "multi")
-            err("Multiple expressions or unclosed bracket", out.metadata);
-        // Convert top-level colon expressions into non-colon expressions;
-        // makes if statements and the like equivalent indented or not
-        //if (out.val == ":" && out.args[0].type == TOKEN)
-        //    out = asn(out.args[0].val, out.args[1], out.metadata);
-        //if (bodied(tokens[0].val) && out.args[0].val == ":")
-        //    out = asn(tokens[0].val, out.args[0].args);
-        if (o.size() == 0 || o.back().type == TOKEN) {
-            o.push_back(out);
-            continue;
-        }
-        // This is a little complicated. Basically, the idea here is to build
-        // constructions like [if [< x 5] [a] [elif [< x 10] [b] [else [c]]]]
-        std::vector<Node> u;
-        u.push_back(o.back());
-        if (bodiedContinued(o.back().val, out.val)) {
-            while (1) {
-                if (!bodiedContinued(u.back().val, out.val)) {
-                    u.pop_back();
-                    break;
-                }
-                if (!u.back().args.size()
-                 || !bodiedContinued(u.back().val, u.back().args.back().val)) {
-                    break;
-                }
-                u.push_back(u.back().args.back());
-            }
-            u.back().args.push_back(out);
-            while (u.size() > 1) {
-                Node v = u.back();
-                u.pop_back();
-                u.back().args.pop_back();
-                u.back().args.push_back(v);
-            }
-            o.pop_back();
-            o.push_back(u[0]);
-        }
-        else o.push_back(out);
-    }
-	if (o.size() == 1)
-		return o[0];
-	else if (o.size())
-		return astnode("seq", o, o[0].metadata);
-	else
-		return astnode("seq", o, Metadata());
-}
-
-// Parses serpent code
-Node parseSerpent(std::string s) {
-    std::string input = s;
-    std::string file = "main";
-    if (exists(s)) {
-        file = s;
-        input = get_file_contents(s);
-    }
-    return parseLines(splitLines(input), Metadata(file, 0, 0), 0);
-}
-
-
-using namespace std;
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/parser.h b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/parser.h
deleted file mode 100644
index e3632220a95308b371b9c62f0a700bb11ddd51b1..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/parser.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef ETHSERP_PARSER
-#define ETHSERP_PARSER
-
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-
-// Serpent text -> parse tree
-Node parseSerpent(std::string s);
-
-#endif
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/preprocess.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/preprocess.cpp
deleted file mode 100644
index 3f08ea8b16d8e902e336110e34b857177fffd1a1..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/preprocess.cpp
+++ /dev/null
@@ -1,299 +0,0 @@
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-#include "lllparser.h"
-#include "bignum.h"
-#include "rewriteutils.h"
-#include "optimize.h"
-#include "preprocess.h"
-#include "functions.h"
-#include "opcodes.h"
-
-// Convert a function of the form (def (f x y z) (do stuff)) into
-// (if (first byte of ABI is correct) (seq (setup x y z) (do stuff)))
-Node convFunction(Node node, int functionCount) {
-    std::string prefix = "_temp"+mkUniqueToken()+"_";
-    Metadata m = node.metadata;
-
-    if (node.args.size() != 2)
-        err("Malformed def!", m);
-    // Collect the list of variable names and variable byte counts
-    Node unpack = unpackArguments(node.args[0].args, m);
-    // And the actual code
-    Node body = node.args[1];
-    // Main LLL-based function body
-    return astnode("if",
-                   astnode("eq",
-                           astnode("get", token("__funid", m), m),
-                           token(unsignedToDecimal(functionCount), m),
-                           m),
-                   astnode("seq", unpack, body, m));
-}
-
-// Populate an svObj with the arguments needed to determine
-// the storage position of a node
-svObj getStorageVars(svObj pre, Node node, std::string prefix,
-                     int index) {
-    Metadata m = node.metadata;
-    if (!pre.globalOffset.size()) pre.globalOffset = "0";
-    std::vector<Node> h;
-    std::vector<std::string> coefficients;
-    // Array accesses or atoms
-    if (node.val == "access" || node.type == TOKEN) {
-        std::string tot = "1";
-        h = listfyStorageAccess(node);
-        coefficients.push_back("1");
-        for (unsigned i = h.size() - 1; i >= 1; i--) {
-            // Array sizes must be constant or at least arithmetically
-            // evaluable at compile time
-            if (!isPureArithmetic(h[i]))
-                err("Array size must be fixed value", m);
-            // Create a list of the coefficient associated with each
-            // array index
-            coefficients.push_back(decimalMul(coefficients.back(), h[i].val));
-        }
-    }
-    // Tuples
-    else {
-        int startc;
-        // Handle the (fun <fun_astnode> args...) case
-        if (node.val == "fun") {
-            startc = 1;
-            h = listfyStorageAccess(node.args[0]);
-        }
-        // Handle the (<fun_name> args...) case, which
-        // the serpent parser produces when the function
-        // is a simple name and not a complex astnode
-        else {
-            startc = 0;
-            h = listfyStorageAccess(token(node.val, m));
-        }
-        svObj sub = pre;
-        sub.globalOffset = "0";
-        // Evaluate tuple elements recursively
-        for (unsigned i = startc; i < node.args.size(); i++) {
-            sub = getStorageVars(sub,
-                                 node.args[i],
-                                 prefix+h[0].val.substr(2)+".",
-                                 i-startc);
-        }
-        coefficients.push_back(sub.globalOffset);
-        for (unsigned i = h.size() - 1; i >= 1; i--) {
-            // Array sizes must be constant or at least arithmetically
-            // evaluable at compile time
-            if (!isPureArithmetic(h[i]))
-               err("Array size must be fixed value", m);
-            // Create a list of the coefficient associated with each
-            // array index
-            coefficients.push_back(decimalMul(coefficients.back(), h[i].val));
-        }
-        pre.offsets = sub.offsets;
-        pre.coefficients = sub.coefficients;
-        pre.nonfinal = sub.nonfinal;
-        pre.nonfinal[prefix+h[0].val.substr(2)] = true;
-    }
-    pre.coefficients[prefix+h[0].val.substr(2)] = coefficients;
-    pre.offsets[prefix+h[0].val.substr(2)] = pre.globalOffset;
-    pre.indices[prefix+h[0].val.substr(2)] = index;
-    if (decimalGt(tt176, coefficients.back()))
-        pre.globalOffset = decimalAdd(pre.globalOffset, coefficients.back());
-    return pre;
-}
-
-// Preprocess input containing functions
-//
-// localExterns is a map of the form, eg,
-//
-// { x: { foo: 0, bar: 1, baz: 2 }, y: { qux: 0, foo: 1 } ... }
-//
-// localExternSigs is a map of the form, eg,
-//
-// { x : { foo: iii, bar: iis, baz: ia }, y: { qux: i, foo: as } ... }
-//
-// Signifying that x.foo = 0, x.baz = 2, y.foo = 1, etc
-// and that x.foo has three integers as arguments, x.bar has two
-// integers and a variable-length string, and baz has an integer
-// and an array
-//
-// globalExterns is a one-level map, eg from above
-//
-// { foo: 1, bar: 1, baz: 2, qux: 0 }
-//
-// globalExternSigs is a one-level map, eg from above
-//
-// { foo: as, bar: iis, baz: ia, qux: i}
-//
-// Note that globalExterns and globalExternSigs may be ambiguous
-// Also, a null signature implies an infinite tail of integers
-preprocessResult preprocessInit(Node inp) {
-    Metadata m = inp.metadata;
-    if (inp.val != "seq")
-        inp = astnode("seq", inp, m);
-    std::vector<Node> empty = std::vector<Node>();
-    Node init = astnode("seq", empty, m);
-    Node shared = astnode("seq", empty, m);
-    std::vector<Node> any;
-    std::vector<Node> functions;
-    preprocessAux out = preprocessAux();
-    out.localExterns["self"] = std::map<std::string, int>();
-    int functionCount = 0;
-    int storageDataCount = 0;
-    for (unsigned i = 0; i < inp.args.size(); i++) {
-        Node obj = inp.args[i];
-        // Functions
-        if (obj.val == "def") {
-            if (obj.args.size() == 0)
-                err("Empty def", m);
-            std::string funName = obj.args[0].val;
-            // Init, shared and any are special functions
-            if (funName == "init" || funName == "shared" || funName == "any") {
-                if (obj.args[0].args.size())
-                    err(funName+" cannot have arguments", m);
-            }
-            if (funName == "init") init = obj.args[1];
-            else if (funName == "shared") shared = obj.args[1];
-            else if (funName == "any") any.push_back(obj.args[1]);
-            else {
-                // Other functions
-                functions.push_back(convFunction(obj, functionCount));
-                out.localExterns["self"][obj.args[0].val] = functionCount;
-                out.localExternSigs["self"][obj.args[0].val] 
-                    = getSignature(obj.args[0].args);
-                functionCount++;
-            }
-        }
-        // Extern declarations
-        else if (obj.val == "extern") {
-            std::string externName = obj.args[0].val;
-            Node al = obj.args[1];
-            if (!out.localExterns.count(externName))
-                out.localExterns[externName] = std::map<std::string, int>();
-            for (unsigned i = 0; i < al.args.size(); i++) {
-                if (al.args[i].val == ":") {
-                    std::string v = al.args[i].args[0].val;
-                    std::string sig = al.args[i].args[1].val;
-                    out.globalExterns[v] = i;
-                    out.globalExternSigs[v] = sig;
-                    out.localExterns[externName][v] = i;
-                    out.localExternSigs[externName][v] = sig;
-                }
-                else {
-                    std::string v = al.args[i].val;
-                    out.globalExterns[v] = i;
-                    out.globalExternSigs[v] = "";
-                    out.localExterns[externName][v] = i;
-                    out.localExternSigs[externName][v] = "";
-                }
-            }
-        }
-        // Custom macros
-        else if (obj.val == "macro") {
-            // Rules for valid macros:
-            //
-            // There are only four categories of valid macros:
-            //
-            // 1. a macro where the outer function is something
-            // which is NOT an existing valid function/extern/datum
-            // 2. a macro of the form set(c(x), d) where c must NOT
-            // be an existing valid function/extern/datum
-            // 3. something of the form access(c(x)), where c must NOT
-            // be an existing valid function/extern/datum
-            // 4. something of the form set(access(c(x)), d) where c must
-            // NOT be an existing valid function/extern/datum
-            bool valid = false;
-            Node pattern = obj.args[0];
-            Node substitution = obj.args[1];
-            if (opcode(pattern.val) < 0 && !isValidFunctionName(pattern.val))
-                valid = true;
-            if (pattern.val == "set" &&
-                    opcode(pattern.args[0].val) < 0 &&
-                    !isValidFunctionName(pattern.args[0].val))
-                valid = true;
-            if (pattern.val == "access" &&
-                    opcode(pattern.args[0].val) < 0 &&
-                    !isValidFunctionName(pattern.args[0].val))
-            if (pattern.val == "set" &&
-                    pattern.args[0].val == "access" &&
-                    opcode(pattern.args[0].args[0].val) < 0 &&
-                    !isValidFunctionName(pattern.args[0].args[0].val))
-                valid = true;
-            if (valid) {
-                out.customMacros.push_back(rewriteRule(pattern, substitution));
-            }
-        }
-        // Variable types
-        else if (obj.val == "type") {
-            std::string typeName = obj.args[0].val;
-            std::vector<Node> vars = obj.args[1].args;
-            for (unsigned i = 0; i < vars.size(); i++)
-                out.types[vars[i].val] = typeName;
-        }
-        // Storage variables/structures
-        else if (obj.val == "data") {
-            out.storageVars = getStorageVars(out.storageVars,
-                                             obj.args[0],
-                                             "",
-                                             storageDataCount);
-            storageDataCount += 1;
-        }
-        else any.push_back(obj);
-    }
-    std::vector<Node> main;
-    if (shared.args.size()) main.push_back(shared);
-    if (init.args.size()) main.push_back(init);
-
-    std::vector<Node> code;
-    if (shared.args.size()) code.push_back(shared);
-    for (unsigned i = 0; i < any.size(); i++)
-        code.push_back(any[i]);
-    for (unsigned i = 0; i < functions.size(); i++)
-        code.push_back(functions[i]);
-    Node codeNode;
-    if (functions.size() > 0) {
-        codeNode = astnode("with",
-                           token("__funid", m),
-                           astnode("byte",
-                                   token("0", m),
-                                   astnode("calldataload", token("0", m), m),
-                                   m),
-                           astnode("seq", code, m),
-                           m);
-    }
-    else codeNode = astnode("seq", code, m);
-    main.push_back(astnode("~return",
-                           token("0", m),
-                           astnode("lll",
-                                   codeNode,
-                                   token("0", m),
-                                   m),
-                           m));
-
-
-    Node result;
-    if (main.size() == 1) result = main[0];
-    else result = astnode("seq", main, inp.metadata);
-    return preprocessResult(result, out);
-}
-
-preprocessResult processTypes (preprocessResult pr) {
-    preprocessAux aux = pr.second;
-    Node node = pr.first;
-    if (node.type == TOKEN && aux.types.count(node.val)) {
-        node = asn(aux.types[node.val], node, node.metadata);
-    }
-    else if (node.val == "untyped")
-        return preprocessResult(node.args[0], aux);
-    else {
-        for (unsigned i = 0; i < node.args.size(); i++) {
-            node.args[i] =
-                processTypes(preprocessResult(node.args[i], aux)).first;
-        }
-    }
-    return preprocessResult(node, aux);
-}
-
-preprocessResult preprocess(Node n) {
-    return processTypes(preprocessInit(n));
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/preprocess.h b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/preprocess.h
deleted file mode 100644
index 944436aef0f57fe1188a7588454c9a61cf6d3cbb..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/preprocess.h
+++ /dev/null
@@ -1,58 +0,0 @@
-#ifndef ETHSERP_PREPROCESSOR
-#define ETHSERP_PREPROCESSOR
-
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-
-// Storage variable index storing object
-struct svObj {
-    std::map<std::string, std::string> offsets;
-    std::map<std::string, int> indices;
-    std::map<std::string, std::vector<std::string> > coefficients;
-    std::map<std::string, bool> nonfinal;
-    std::string globalOffset;
-};
-
-class rewriteRule {
-    public:
-        rewriteRule(Node p, Node s) {
-            pattern = p;
-            substitution = s;
-        }
-        Node pattern;
-        Node substitution;
-};
-
-
-// Preprocessing result storing object
-class preprocessAux {
-    public:
-        preprocessAux() {
-            globalExterns = std::map<std::string, int>();
-            localExterns = std::map<std::string, std::map<std::string, int> >();
-            localExterns["self"] = std::map<std::string, int>();
-        }
-        std::map<std::string, int> globalExterns;
-        std::map<std::string, std::string> globalExternSigs;
-        std::map<std::string, std::map<std::string, int> > localExterns;
-        std::map<std::string, std::map<std::string, std::string> > localExternSigs;
-        std::vector<rewriteRule> customMacros;
-        std::map<std::string, std::string> types;
-        svObj storageVars;
-};
-
-#define preprocessResult std::pair<Node, preprocessAux>
-
-// Populate an svObj with the arguments needed to determine
-// the storage position of a node
-svObj getStorageVars(svObj pre, Node node, std::string prefix="",
-                     int index=0);
-
-// Preprocess a function (see cpp for details)
-preprocessResult preprocess(Node inp);
-
-
-#endif
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/pyserpent.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/pyserpent.cpp
deleted file mode 100644
index 38398aa46e0cbbd398c41705fcef014fbb25e910..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/pyserpent.cpp
+++ /dev/null
@@ -1,173 +0,0 @@
-#include <Python.h>
-#include "structmember.h"
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <iostream>
-#include "funcs.h"
-
-#define PYMETHOD(name, FROM, method, TO) \
-    static PyObject * name(PyObject *, PyObject *args) { \
-        try { \
-        FROM(med) \
-        return TO(method(med)); \
-        } \
-        catch (std::string e) { \
-           PyErr_SetString(PyExc_Exception, e.c_str()); \
-           return NULL; \
-        } \
-    }
-
-#define FROMSTR(v) \
-    const char *command; \
-    int len; \
-    if (!PyArg_ParseTuple(args, "s#", &command, &len)) \
-        return NULL; \
-    std::string v = std::string(command, len); \
-
-#define FROMNODE(v) \
-    PyObject *node; \
-    if (!PyArg_ParseTuple(args, "O", &node)) \
-        return NULL; \
-    Node v = cppifyNode(node);
-
-#define FROMLIST(v) \
-    PyObject *node; \
-    if (!PyArg_ParseTuple(args, "O", &node)) \
-        return NULL; \
-    std::vector<Node> v = cppifyNodeList(node);
-
-// Convert metadata into python wrapper form [file, ln, ch]
-PyObject* pyifyMetadata(Metadata m) {
-    PyObject* a = PyList_New(0);
-    PyList_Append(a, Py_BuildValue("s#", m.file.c_str(), m.file.length()));
-    PyList_Append(a, Py_BuildValue("i", m.ln));
-    PyList_Append(a, Py_BuildValue("i", m.ch));
-    return a;
-}
-
-// Convert node into python wrapper form 
-// [token=0/astnode=1, val, metadata, args]
-PyObject* pyifyNode(Node n) {
-    PyObject* a = PyList_New(0);
-    PyList_Append(a, Py_BuildValue("i", n.type == ASTNODE));
-    PyList_Append(a, Py_BuildValue("s#", n.val.c_str(), n.val.length()));
-    PyList_Append(a, pyifyMetadata(n.metadata));
-    for (unsigned i = 0; i < n.args.size(); i++)
-        PyList_Append(a, pyifyNode(n.args[i]));
-    return a;
-}
-
-// Convert string into python wrapper form
-PyObject* pyifyString(std::string s) {
-    return Py_BuildValue("s#", s.c_str(), s.length());
-}
-
-// Convert list of nodes into python wrapper form
-PyObject* pyifyNodeList(std::vector<Node> n) {
-    PyObject* a = PyList_New(0);
-    for (unsigned i = 0; i < n.size(); i++)
-        PyList_Append(a, pyifyNode(n[i]));
-    return a;
-}
-
-// Convert pyobject int into normal form
-int cppifyInt(PyObject* o) {
-    int out;
-    if (!PyArg_Parse(o, "i", &out))
-        err("Argument should be integer", Metadata());
-    return out;
-}
-
-// Convert pyobject string into normal form
-std::string cppifyString(PyObject* o) {
-    const char *command;
-    if (!PyArg_Parse(o, "s", &command))
-        err("Argument should be string", Metadata());
-    return std::string(command);
-}
-
-// Convert metadata from python wrapper form
-Metadata cppifyMetadata(PyObject* o) {
-    std::string file = cppifyString(PyList_GetItem(o, 0));
-    int ln = cppifyInt(PyList_GetItem(o, 1));
-    int ch = cppifyInt(PyList_GetItem(o, 2));
-    return Metadata(file, ln, ch);
-}
-
-// Convert node from python wrapper form
-Node cppifyNode(PyObject* o) {
-    Node n;
-    int isAstNode = cppifyInt(PyList_GetItem(o, 0));
-    n.type = isAstNode ? ASTNODE : TOKEN;
-    n.val = cppifyString(PyList_GetItem(o, 1));
-    n.metadata = cppifyMetadata(PyList_GetItem(o, 2));
-    std::vector<Node> args;
-    for (int i = 3; i < PyList_Size(o); i++) {
-        args.push_back(cppifyNode(PyList_GetItem(o, i)));
-    }
-    n.args = args;
-    return n;
-}
-
-//Convert list of nodes into normal form
-std::vector<Node> cppifyNodeList(PyObject* o) {
-    std::vector<Node> out;
-    for (int i = 0; i < PyList_Size(o); i++) {
-        out.push_back(cppifyNode(PyList_GetItem(o,i)));
-    }
-    return out;
-}
-
-PYMETHOD(ps_compile, FROMSTR, compile, pyifyString)
-PYMETHOD(ps_compile_chunk, FROMSTR, compileChunk, pyifyString)
-PYMETHOD(ps_compile_to_lll, FROMSTR, compileToLLL, pyifyNode)
-PYMETHOD(ps_compile_chunk_to_lll, FROMSTR, compileChunkToLLL, pyifyNode)
-PYMETHOD(ps_compile_lll, FROMNODE, compileLLL, pyifyString)
-PYMETHOD(ps_parse, FROMSTR, parseSerpent, pyifyNode)
-PYMETHOD(ps_rewrite, FROMNODE, rewrite, pyifyNode)
-PYMETHOD(ps_rewrite_chunk, FROMNODE, rewriteChunk, pyifyNode)
-PYMETHOD(ps_pretty_compile, FROMSTR, prettyCompile, pyifyNodeList)
-PYMETHOD(ps_pretty_compile_chunk, FROMSTR, prettyCompileChunk, pyifyNodeList)
-PYMETHOD(ps_pretty_compile_lll, FROMNODE, prettyCompileLLL, pyifyNodeList)
-PYMETHOD(ps_serialize, FROMLIST, serialize, pyifyString)
-PYMETHOD(ps_deserialize, FROMSTR, deserialize, pyifyNodeList)
-PYMETHOD(ps_parse_lll, FROMSTR, parseLLL, pyifyNode)
-
-
-static PyMethodDef PyextMethods[] = {
-    {"compile",  ps_compile, METH_VARARGS,
-        "Compile code."},
-    {"compile_chunk",  ps_compile_chunk, METH_VARARGS,
-        "Compile code chunk (no wrappers)."},
-    {"compile_to_lll",  ps_compile_to_lll, METH_VARARGS,
-        "Compile code to LLL."},
-    {"compile_chunk_to_lll",  ps_compile_chunk_to_lll, METH_VARARGS,
-        "Compile code chunk to LLL (no wrappers)."},
-    {"compile_lll",  ps_compile_lll, METH_VARARGS,
-        "Compile LLL to EVM."},
-    {"parse",  ps_parse, METH_VARARGS,
-        "Parse serpent"},
-    {"rewrite",  ps_rewrite, METH_VARARGS,
-        "Rewrite parsed serpent to LLL"},
-    {"rewrite_chunk",  ps_rewrite_chunk, METH_VARARGS,
-        "Rewrite parsed serpent to LLL (no wrappers)"},
-    {"pretty_compile",  ps_pretty_compile, METH_VARARGS,
-        "Compile to EVM opcodes"},
-    {"pretty_compile_chunk",  ps_pretty_compile_chunk, METH_VARARGS,
-        "Compile chunk to EVM opcodes (no wrappers)"},
-    {"pretty_compile_lll",  ps_pretty_compile_lll, METH_VARARGS,
-        "Compile LLL to EVM opcodes"},
-    {"serialize",  ps_serialize, METH_VARARGS,
-        "Convert EVM opcodes to bin"},
-    {"deserialize",  ps_deserialize, METH_VARARGS,
-        "Convert EVM bin to opcodes"},
-    {"parse_lll",  ps_parse_lll, METH_VARARGS,
-        "Parse LLL"},
-    {NULL, NULL, 0, NULL}        /* Sentinel */
-};
-
-PyMODINIT_FUNC initserpent_pyext(void)
-{
-     Py_InitModule( "serpent_pyext", PyextMethods );
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/pyserpent.py b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/pyserpent.py
deleted file mode 100644
index 2103b48fefa23912b2c32397d810873f099926d4..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/pyserpent.py
+++ /dev/null
@@ -1 +0,0 @@
-from serpent import *
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/rewriter.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/rewriter.cpp
deleted file mode 100644
index 4cdce4f0a6f3662960494e58bc804df68e2252ad..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/rewriter.cpp
+++ /dev/null
@@ -1,804 +0,0 @@
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-#include "lllparser.h"
-#include "bignum.h"
-#include "optimize.h"
-#include "rewriteutils.h"
-#include "preprocess.h"
-#include "functions.h"
-#include "opcodes.h"
-
-// Rewrite rules
-std::string macros[][2] = {
-    {
-        "(seq $x)",
-        "$x"
-    },
-    {
-        "(seq (seq) $x)",
-        "$x"
-    },
-    {
-        "(+= $a $b)",
-        "(set $a (+ $a $b))"
-    },
-    {
-        "(*= $a $b)",
-        "(set $a (* $a $b))"
-    },
-    {
-        "(-= $a $b)",
-        "(set $a (- $a $b))"
-    },
-    {
-        "(/= $a $b)",
-        "(set $a (/ $a $b))"
-    },
-    {
-        "(%= $a $b)",
-        "(set $a (% $a $b))"
-    },
-    {
-        "(^= $a $b)",
-        "(set $a (^ $a $b))"
-    },
-    {
-        "(!= $a $b)",
-        "(iszero (eq $a $b))"
-    },
-    {
-        "(assert $x)",
-        "(unless $x (stop))"
-    },
-    {
-        "(min $a $b)",
-        "(with $1 $a (with $2 $b (if (lt $1 $2) $1 $2)))"
-    },
-    {
-        "(max $a $b)",
-        "(with $1 $a (with $2 $b (if (lt $1 $2) $2 $1)))"
-    },
-    {
-        "(smin $a $b)",
-        "(with $1 $a (with $2 $b (if (slt $1 $2) $1 $2)))"
-    },
-    {
-        "(smax $a $b)",
-        "(with $1 $a (with $2 $b (if (slt $1 $2) $2 $1)))"
-    },
-    {
-        "(if $cond $do (else $else))",
-        "(if $cond $do $else)"
-    },
-    {
-        "(code $code)",
-        "$code"
-    },
-    {
-        "(slice $arr $pos)",
-        "(add $arr (mul 32 $pos))",
-    },
-    {
-        "(array $len)",
-        "(alloc (mul 32 $len))"
-    },
-    {
-        "(while $cond $do)",
-        "(until (iszero $cond) $do)",
-    },
-    {
-        "(while (iszero $cond) $do)",
-        "(until $cond $do)",
-    },
-    {
-        "(if $cond $do)",
-        "(unless (iszero $cond) $do)",
-    },
-    {
-        "(if (iszero $cond) $do)",
-        "(unless $cond $do)",
-    },
-    {
-        "(access (. self storage) $ind)",
-        "(sload $ind)"
-    },
-    {
-        "(access $var $ind)",
-        "(mload (add $var (mul 32 $ind)))"
-    },
-    {
-        "(set (access (. self storage) $ind) $val)",
-        "(sstore $ind $val)"
-    },
-    {
-        "(set (access $var $ind) $val)",
-        "(mstore (add $var (mul 32 $ind)) $val)"
-    },
-    {
-        "(getch $var $ind)",
-        "(mod (mload (sub (add $var $ind) 31)) 256)"
-    },
-    {
-        "(setch $var $ind $val)",
-        "(mstore8 (add $var $ind) $val)",
-    },
-    {
-        "(send $to $value)",
-        "(~call (sub (gas) 25) $to $value 0 0 0 0)"
-    },
-    {
-        "(send $gas $to $value)",
-        "(~call $gas $to $value 0 0 0 0)"
-    },
-    {
-        "(sha3 $x)",
-        "(seq (set $1 $x) (~sha3 (ref $1) 32))"
-    },
-    {
-        "(sha3 $mstart (= chars $msize))",
-        "(~sha3 $mstart $msize)"
-    },
-    {
-        "(sha3 $mstart $msize)",
-        "(~sha3 $mstart (mul 32 $msize))"
-    },
-    {
-        "(id $0)",
-        "$0"
-    },
-    {
-        "(return $x)",
-        "(seq (set $1 $x) (~return (ref $1) 32))"
-    },
-    {
-        "(return $mstart (= chars $msize))",
-        "(~return $mstart $msize)"
-    },
-    {
-        "(return $start $len)",
-        "(~return $start (mul 32 $len))"
-    },
-    {
-        "(&& $x $y)",
-        "(if $x $y 0)"
-    },
-    {
-        "(|| $x $y)",
-        "(with $1 $x (if $1 $1 $y))"
-    },
-    {
-        "(>= $x $y)",
-        "(iszero (slt $x $y))"
-    },
-    {
-        "(<= $x $y)",
-        "(iszero (sgt $x $y))"
-    },
-    {
-        "(create $code)",
-        "(create 0 $code)"
-    },
-    {
-        "(create $endowment $code)",
-        "(with $1 (msize) (create $endowment (get $1) (lll (outer $code) (msize))))"
-    },
-    {
-        "(sha256 $x)",
-        "(with $1 (alloc 64) (seq (mstore (add (get $1) 32) $x) (pop (~call 101 2 0 (add (get $1) 32) 32 (get $1) 32)) (mload (get $1))))"
-    },
-    {
-        "(sha256 $arr (= chars $sz))",
-        "(with $1 (alloc 32) (seq (pop (~call 101 2 0 $arr $sz (get $1) 32)) (mload (get $1))))"
-    },
-    {
-        "(sha256 $arr $sz)",
-        "(with $1 (alloc 32) (seq (pop (~call 101 2 0 $arr (mul 32 $sz) (get $1) 32)) (mload (get $1))))"
-    },
-    {
-        "(ripemd160 $x)",
-        "(with $1 (alloc 64) (seq (mstore (add (get $1) 32) $x) (pop (~call 101 3 0 (add (get $1) 32) 32 (get $1) 32)) (mload (get $1))))"
-    },
-    {
-        "(ripemd160 $arr (= chars $sz))",
-        "(with $1 (alloc 32) (seq (pop (~call 101 3 0 $arr $sz (mload $1) 32)) (mload (get $1))))"
-    },
-    {
-        "(ripemd160 $arr $sz)",
-        "(with $1 (alloc 32) (seq (pop (~call 101 3 0 $arr (mul 32 $sz) (get $1) 32)) (mload (get $1))))"
-    },
-    {
-        "(ecrecover $h $v $r $s)",
-        "(with $1 (alloc 160) (seq (mstore (get $1) $h) (mstore (add (get $1) 32) $v) (mstore (add (get $1) 64) $r) (mstore (add (get $1) 96) $s) (pop (~call 101 1 0 (get $1) 128 (add (get $1 128)) 32)) (mload (add (get $1) 128))))"
-    },
-    {
-        "(inset $x)",
-        "$x"
-    },
-    {
-        "(create $x)",
-        "(with $1 (msize) (create $val (get $1) (lll $code (get $1))))"
-    },
-    {
-        "(with (= $var $val) $cond)",
-        "(with $var $val $cond)"
-    },
-    {
-        "(log $t1)",
-        "(~log1 0 0 $t1)"
-    },
-    {
-        "(log $t1 $t2)",
-        "(~log2 0 0 $t1 $t2)"
-    },
-    {
-        "(log $t1 $t2 $t3)",
-        "(~log3 0 0 $t1 $t2 $t3)"
-    },
-    {
-        "(log $t1 $t2 $t3 $t4)",
-        "(~log4 0 0 $t1 $t2 $t3 $t4)"
-    },
-    {
-        "(logarr $a $sz)",
-        "(~log0 $a (mul 32 $sz))"
-    },
-    {
-        "(logarr $a $sz $t1)",
-        "(~log1 $a (mul 32 $sz) $t1)"
-    },
-    {
-        "(logarr $a $sz $t1 $t2)",
-        "(~log2 $a (mul 32 $sz) $t1 $t2)"
-    },
-    {
-        "(logarr $a $sz $t1 $t2 $t3)",
-        "(~log3 $a (mul 32 $sz) $t1 $t2 $t3)"
-    },
-    {
-        "(logarr $a $sz $t1 $t2 $t3 $t4)",
-        "(~log4 $a (mul 32 $sz) $t1 $t2 $t3 $t4)"
-    },
-    {
-        "(save $loc $array (= chars $count))",
-        "(with $location (ref $loc) (with $c $count (with $end (div $c 32) (with $i 0 (seq (while (slt $i $end) (seq (sstore (add $i $location) (access $array $i)) (set $i (add $i 1)))) (sstore (add $i $location) (~and (access $array $i) (sub 0 (exp 256 (sub 32 (mod $c 32)))))))))))"
-    },
-    {
-        "(save $loc $array $count)",
-        "(with $location (ref $loc) (with $end $count (with $i 0 (while (slt $i $end) (seq (sstore (add $i $location) (access $array $i)) (set $i (add $i 1)))))))"
-    },
-    {
-        "(load $loc (= chars $count))",
-        "(with $location (ref $loc) (with $c $count (with $a (alloc $c) (with $i 0 (seq (while (slt $i (div $c 32)) (seq (set (access $a $i) (sload (add $location $i))) (set $i (add $i 1)))) (set (access $a $i) (~and (sload (add $location $i)) (sub 0 (exp 256 (sub 32 (mod $c 32)))))) $a)))))"
-    },
-    {
-        "(load $loc $count)",
-        "(with $location (ref $loc) (with $c $count (with $a (alloc $c) (with $i 0 (seq (while (slt $i $c) (seq (set (access $a $i) (sload (add $location $i))) (set $i (add $i 1)))) $a)))))"
-    },
-    {
-        "(unsafe_mcopy $to $from $sz)",
-        "(with _sz $sz (with _from $from (with _to $to (seq (comment STARTING UNSAFE MCOPY) (with _i 0 (while (lt _i _sz) (seq (mstore (add $to _i) (mload (add _from _i))) (set _i (add _i 32)))))))))"
-    },
-    {
-        "(mcopy $to $from $_sz)",
-        "(with _to $to (with _from $from (with _sz $sz (seq (comment STARTING MCOPY (with _i 0 (seq (while (lt (add _i 31) _sz) (seq (mstore (add _to _i) (mload (add _from _i))) (set _i (add _i 32)))) (with _mask (exp 256 (sub 32 (mod _sz 32))) (mstore (add $to _i) (add (mod (mload (add $to _i)) _mask) (and (mload (add $from _i)) (sub 0 _mask))))))))))))"
-    },
-    { "(. msg sender)", "(caller)" },
-    { "(. msg value)", "(callvalue)" },
-    { "(. tx gasprice)", "(gasprice)" },
-    { "(. tx origin)", "(origin)" },
-    { "(. tx gas)", "(gas)" },
-    { "(. $x balance)", "(balance $x)" },
-    { "self", "(address)" },
-    { "(. block prevhash)", "(prevhash)" },
-    { "(. block coinbase)", "(coinbase)" },
-    { "(. block timestamp)", "(timestamp)" },
-    { "(. block number)", "(number)" },
-    { "(. block difficulty)", "(difficulty)" },
-    { "(. block gaslimit)", "(gaslimit)" },
-    { "stop", "(stop)" },
-    { "---END---", "" } //Keep this line at the end of the list
-};
-
-std::vector<rewriteRule> nodeMacros;
-
-// Token synonyms
-std::string synonyms[][2] = {
-    { "or", "||" },
-    { "and", "&&" },
-    { "|", "~or" },
-    { "&", "~and" },
-    { "elif", "if" },
-    { "!", "iszero" },
-    { "~", "~not" },
-    { "not", "iszero" },
-    { "string", "alloc" },
-    { "+", "add" },
-    { "-", "sub" },
-    { "*", "mul" },
-    { "/", "sdiv" },
-    { "^", "exp" },
-    { "**", "exp" },
-    { "%", "smod" },
-    { "<", "slt" },
-    { ">", "sgt" },
-    { "=", "set" },
-    { "==", "eq" },
-    { ":", "kv" },
-    { "---END---", "" } //Keep this line at the end of the list
-};
-
-// Custom setters (need to be registered separately
-// for use with managed storage)
-std::string setters[][2] = {
-    { "+=", "+" },
-    { "-=", "-" },
-    { "*=", "*" },
-    { "/=", "/" },
-    { "%=", "%" },
-    { "^=", "^" },
-    { "---END---", "" } //Keep this line at the end of the list
-};
-
-// Processes mutable array literals
-Node array_lit_transform(Node node) {
-    std::string prefix = "_temp"+mkUniqueToken() + "_";
-    Metadata m = node.metadata;
-    std::map<std::string, Node> d;
-    std::string o = "(seq (set $arr (alloc "+utd(node.args.size()*32)+"))";
-    for (unsigned i = 0; i < node.args.size(); i++) {
-        o += " (mstore (add (get $arr) "+utd(i * 32)+") $"+utd(i)+")";
-        d[utd(i)] = node.args[i];
-    }
-    o += " (get $arr))";
-    return subst(parseLLL(o), d, prefix, m);
-}
-
-
-Node apply_rules(preprocessResult pr);
-
-// Transform "<variable>.<fun>(args...)" into
-// a call
-Node dotTransform(Node node, preprocessAux aux) {
-    Metadata m = node.metadata;
-    // We're gonna make lots of temporary variables,
-    // so set up a unique flag for them
-    std::string prefix = "_temp"+mkUniqueToken()+"_";
-    // Check that the function name is a token
-    if (node.args[0].args[1].type == ASTNODE)
-        err("Function name must be static", m);
-
-    Node dotOwner = node.args[0].args[0];
-    std::string dotMember = node.args[0].args[1].val;
-    // kwargs = map of special arguments
-    std::map<std::string, Node> kwargs;
-    kwargs["value"] = token("0", m);
-    kwargs["gas"] = subst(parseLLL("(- (gas) 25)"), msn(), prefix, m);
-    // Search for as=? and call=code keywords, and isolate the actual
-    // function arguments
-    std::vector<Node> fnargs;
-    std::string as = "";
-    std::string op = "call";
-    for (unsigned i = 1; i < node.args.size(); i++) {
-        fnargs.push_back(node.args[i]);
-        Node arg = fnargs.back();
-        if (arg.val == "=" || arg.val == "set") {
-            if (arg.args[0].val == "as")
-                as = arg.args[1].val;
-            if (arg.args[0].val == "call" && arg.args[1].val == "code")
-                op = "callcode";
-            if (arg.args[0].val == "gas")
-                kwargs["gas"] = arg.args[1];
-            if (arg.args[0].val == "value")
-                kwargs["value"] = arg.args[1];
-            if (arg.args[0].val == "outsz")
-                kwargs["outsz"] = arg.args[1];
-        }
-    }
-    if (dotOwner.val == "self") {
-        if (as.size()) err("Cannot use \"as\" when calling self!", m);
-        as = dotOwner.val;
-    }
-    // Determine the funId and sig assuming the "as" keyword was used
-    int funId = 0;
-    std::string sig;
-    if (as.size() > 0 && aux.localExterns.count(as)) {
-        if (!aux.localExterns[as].count(dotMember))
-            err("Invalid call: "+printSimple(dotOwner)+"."+dotMember, m);
-        funId = aux.localExterns[as][dotMember];
-        sig = aux.localExternSigs[as][dotMember];
-    }
-    // Determine the funId and sig otherwise
-    else if (!as.size()) {
-        if (!aux.globalExterns.count(dotMember))
-            err("Invalid call: "+printSimple(dotOwner)+"."+dotMember, m);
-        std::string key = unsignedToDecimal(aux.globalExterns[dotMember]);
-        funId = aux.globalExterns[dotMember];
-        sig = aux.globalExternSigs[dotMember];
-    }
-    else err("Invalid call: "+printSimple(dotOwner)+"."+dotMember, m);
-    // Pack arguments
-    kwargs["data"] = packArguments(fnargs, sig, funId, m);
-    kwargs["to"] = dotOwner;
-    Node main;
-    // Pack output
-    if (!kwargs.count("outsz")) {
-        main = parseLLL(
-            "(with _data $data (seq "
-                "(pop (~"+op+" $gas $to $value (access _data 0) (access _data 1) (ref $dataout) 32))"
-                "(get $dataout)))");
-    }
-    else {
-        main = parseLLL(
-            "(with _data $data (with _outsz (mul 32 $outsz) (with _out (alloc _outsz) (seq "
-                "(pop (~"+op+" $gas $to $value (access _data 0) (access _data 1) _out _outsz))"
-                "(get _out)))))");
-    }
-    // Set up main call
-
-    Node o = subst(main, kwargs, prefix, m);
-    return o;
-}
-
-// Transform an access of the form self.bob, self.users[5], etc into
-// a storage access
-//
-// There exist two types of objects: finite objects, and infinite
-// objects. Finite objects are packed optimally tightly into storage
-// accesses; for example:
-//
-// data obj[100](a, b[2][4], c)
-//
-// obj[0].a -> 0
-// obj[0].b[0][0] -> 1
-// obj[0].b[1][3] -> 8
-// obj[45].c -> 459
-//
-// Infinite objects are accessed by sha3([v1, v2, v3 ... ]), where
-// the values are a list of array indices and keyword indices, for
-// example:
-// data obj[](a, b[2][4], c)
-// data obj2[](a, b[][], c)
-//
-// obj[0].a -> sha3([0, 0, 0])
-// obj[5].b[1][3] -> sha3([0, 5, 1, 1, 3])
-// obj[45].c -> sha3([0, 45, 2])
-// obj2[0].a -> sha3([1, 0, 0])
-// obj2[5].b[1][3] -> sha3([1, 5, 1, 1, 3])
-// obj2[45].c -> sha3([1, 45, 2])
-Node storageTransform(Node node, preprocessAux aux,
-                      bool mapstyle=false, bool ref=false) {
-    Metadata m = node.metadata;
-    // Get a list of all of the "access parameters" used in order
-    // eg. self.users[5].cow[4][m[2]][woof] -> 
-    //         [--self, --users, 5, --cow, 4, m[2], woof]
-    std::vector<Node> hlist = listfyStorageAccess(node);
-    // For infinite arrays, the terms array will just provide a list
-    // of indices. For finite arrays, it's a list of index*coefficient
-    std::vector<Node> terms;
-    std::string offset = "0";
-    std::string prefix = "";
-    std::string varPrefix = "_temp"+mkUniqueToken()+"_";
-    int c = 0;
-    std::vector<std::string> coefficients;
-    coefficients.push_back("");
-    for (unsigned i = 1; i < hlist.size(); i++) {
-        // We pre-add the -- flag to parameter-like terms. For example,
-        // self.users[m] -> [--self, --users, m]
-        // self.users.m -> [--self, --users, --m]
-        if (hlist[i].val.substr(0, 2) == "--") {
-            prefix += hlist[i].val.substr(2) + ".";
-            std::string tempPrefix = prefix.substr(0, prefix.size()-1);
-            if (!aux.storageVars.offsets.count(tempPrefix))
-                return node;
-            if (c < (signed)coefficients.size() - 1)
-                err("Too few array index lookups", m);
-            if (c > (signed)coefficients.size() - 1)
-                err("Too many array index lookups", m);
-            coefficients = aux.storageVars.coefficients[tempPrefix];
-            // If the size of an object exceeds 2^176, we make it an infinite
-            // array
-            if (decimalGt(coefficients.back(), tt176) && !mapstyle)
-                return storageTransform(node, aux, true, ref);
-            offset = decimalAdd(offset, aux.storageVars.offsets[tempPrefix]);
-            c = 0;
-            if (mapstyle)
-                terms.push_back(token(unsignedToDecimal(
-                    aux.storageVars.indices[tempPrefix])));
-        }
-        else if (mapstyle) {
-            terms.push_back(hlist[i]);
-            c += 1;
-        }
-        else {
-            if (c > (signed)coefficients.size() - 2)
-                err("Too many array index lookups", m);
-            terms.push_back(
-                astnode("mul", 
-                        hlist[i],
-                        token(coefficients[coefficients.size() - 2 - c], m),
-                        m));
-                                    
-            c += 1;
-        }
-    }
-    if (aux.storageVars.nonfinal.count(prefix.substr(0, prefix.size()-1)))
-        err("Storage variable access not deep enough", m);
-    if (c < (signed)coefficients.size() - 1) {
-        err("Too few array index lookups", m);
-    }
-    if (c > (signed)coefficients.size() - 1) {
-        err("Too many array index lookups", m);
-    }
-    Node o;
-    if (mapstyle) {
-        std::string t = "_temp_"+mkUniqueToken();
-        std::vector<Node> sub;
-        for (unsigned i = 0; i < terms.size(); i++)
-            sub.push_back(asn("mstore",
-                              asn("add",
-                                  tkn(utd(i * 32), m),
-                                  asn("get", tkn(t+"pos", m), m),
-                                  m),
-                              terms[i],
-                              m));
-        sub.push_back(tkn(t+"pos", m));
-        Node main = asn("with",
-                        tkn(t+"pos", m),
-                        asn("alloc", tkn(utd(terms.size() * 32), m), m),
-                        asn("seq", sub, m),
-                        m);
-        Node sz = token(utd(terms.size() * 32), m);
-        o = astnode("~sha3",
-                    main,
-                    sz,
-                    m);
-    }
-    else {
-        // We add up all the index*coefficients
-        Node out = token(offset, node.metadata);
-        for (unsigned i = 0; i < terms.size(); i++) {
-            std::vector<Node> temp;
-            temp.push_back(out);
-            temp.push_back(terms[i]);
-            out = astnode("add", temp, node.metadata);
-        }
-        o = out;
-    }
-    if (ref) return o;
-    else return astnode("sload", o, node.metadata);
-}
-
-
-// Recursively applies rewrite rules
-std::pair<Node, bool> apply_rules_iter(preprocessResult pr) {
-    bool changed = false;
-    Node node = pr.first;
-    // If the rewrite rules have not yet been parsed, parse them
-    if (!nodeMacros.size()) {
-        for (int i = 0; i < 9999; i++) {
-            std::vector<Node> o;
-            if (macros[i][0] == "---END---") break;
-            nodeMacros.push_back(rewriteRule(
-                parseLLL(macros[i][0]),
-                parseLLL(macros[i][1])
-            ));
-        }
-    }
-    // Assignment transformations
-    for (int i = 0; i < 9999; i++) {
-        if (setters[i][0] == "---END---") break;
-        if (node.val == setters[i][0]) {
-            node = astnode("=",
-                           node.args[0],
-                           astnode(setters[i][1],
-                                   node.args[0],
-                                   node.args[1],
-                                   node.metadata),
-                           node.metadata);
-        }
-    }
-    // Do nothing to macros
-    if (node.val == "macro") {
-        return std::pair<Node, bool>(node, changed);
-    }
-    // Ignore comments
-    if (node.val == "comment") {
-        return std::pair<Node, bool>(node, changed);
-    }
-    // Special storage transformation
-    if (isNodeStorageVariable(node)) {
-        node = storageTransform(node, pr.second);
-        changed = true;
-    }
-    if (node.val == "ref" && isNodeStorageVariable(node.args[0])) {
-        node = storageTransform(node.args[0], pr.second, false, true);
-        changed = true;
-    }
-    if (node.val == "=" && isNodeStorageVariable(node.args[0])) {
-        Node t = storageTransform(node.args[0], pr.second);
-        if (t.val == "sload") {
-            std::vector<Node> o;
-            o.push_back(t.args[0]);
-            o.push_back(node.args[1]);
-            node = astnode("sstore", o, node.metadata);
-        }
-        changed = true;
-    }
-    // Main code
-	unsigned pos = 0;
-    std::string prefix = "_temp"+mkUniqueToken()+"_";
-    while(1) {
-        if (synonyms[pos][0] == "---END---") {
-            break;
-        }
-        else if (node.type == ASTNODE && node.val == synonyms[pos][0]) {
-            node.val = synonyms[pos][1];
-            changed = true;
-        }
-        pos++;
-    }
-    for (pos = 0; pos < nodeMacros.size() + pr.second.customMacros.size(); pos++) {
-        rewriteRule macro = pos < nodeMacros.size() 
-                ? nodeMacros[pos] 
-                : pr.second.customMacros[pos - nodeMacros.size()];
-        matchResult mr = match(macro.pattern, node);
-        if (mr.success) {
-            node = subst(macro.substitution, mr.map, prefix, node.metadata);
-            std::pair<Node, bool> o =
-                 apply_rules_iter(preprocessResult(node, pr.second));
-            o.second = true;
-            return o;
-        }
-    }
-    // Special transformations
-    if (node.val == "outer") {
-        node = apply_rules(preprocess(node.args[0]));
-        changed = true;
-    }
-    if (node.val == "array_lit") {
-        node = array_lit_transform(node);
-        changed = true;
-    }
-    if (node.val == "fun" && node.args[0].val == ".") {
-        node = dotTransform(node, pr.second);
-        changed = true;
-    }
-    if (node.type == ASTNODE) {
-		unsigned i = 0;
-        if (node.val == "set" || node.val == "ref" 
-                || node.val == "get" || node.val == "with") {
-            if (node.args[0].val.size() > 0 && node.args[0].val[0] != '\'' 
-                    && node.args[0].type == TOKEN && node.args[0].val[0] != '$') {
-                node.args[0].val = "'" + node.args[0].val;
-                changed = true;
-            }
-            i = 1;
-        }
-        else if (node.val == "arglen") {
-            node.val = "get";
-            node.args[0].val = "'_len_" + node.args[0].val;
-            i = 1;
-            changed = true;
-        }
-        for (; i < node.args.size(); i++) {
-            std::pair<Node, bool> r =
-                apply_rules_iter(preprocessResult(node.args[i], pr.second));
-            node.args[i] = r.first;
-            changed = changed || r.second;
-        }
-    }
-    else if (node.type == TOKEN && !isNumberLike(node)) {
-        if (node.val.size() >= 2
-                && node.val[0] == '"'
-                && node.val[node.val.size() - 1] == '"') {
-            std::string bin = node.val.substr(1, node.val.size() - 2);
-            unsigned sz = bin.size();
-            std::vector<Node> o;
-            for (unsigned i = 0; i < sz; i += 32) {
-                std::string t = binToNumeric(bin.substr(i, 32));
-                if ((sz - i) < 32 && (sz - i) > 0) {
-                    while ((sz - i) < 32) {
-                        t = decimalMul(t, "256");
-                        i--;
-                    }
-                    i = sz;
-                }
-                o.push_back(token(t, node.metadata));
-            }
-            node = astnode("array_lit", o, node.metadata);
-            std::pair<Node, bool> r = 
-                apply_rules_iter(preprocessResult(node, pr.second));
-            node = r.first;
-            changed = true;
-        }
-        else if (node.val.size() && node.val[0] != '\'' && node.val[0] != '$') {
-            node.val = "'" + node.val;
-            std::vector<Node> args;
-            args.push_back(node);
-            std::string v = node.val.substr(1);
-            node = astnode("get", args, node.metadata);
-            changed = true;
-        }
-    }
-    return std::pair<Node, bool>(node, changed);
-}
-
-Node apply_rules(preprocessResult pr) {
-    for (unsigned i = 0; i < pr.second.customMacros.size(); i++) {
-        pr.second.customMacros[i].pattern =
-            apply_rules(preprocessResult(pr.second.customMacros[i].pattern, preprocessAux()));
-    }
-    while (1) {
-        //std::cerr << printAST(pr.first) << 
-        // " " << pr.second.customMacros.size() << "\n";
-        std::pair<Node, bool> r = apply_rules_iter(pr);
-        if (!r.second) {
-            return r.first;
-        }
-        pr.first = r.first;
-    }
-}
-
-Node validate(Node inp) {
-    Metadata m = inp.metadata;
-    if (inp.type == ASTNODE) {
-        int i = 0;
-        while(validFunctions[i][0] != "---END---") {
-            if (inp.val == validFunctions[i][0]) {
-                std::string sz = unsignedToDecimal(inp.args.size());
-                if (decimalGt(validFunctions[i][1], sz)) {
-                    err("Too few arguments for "+inp.val, inp.metadata);   
-                }
-                if (decimalGt(sz, validFunctions[i][2])) {
-                    err("Too many arguments for "+inp.val, inp.metadata);   
-                }
-            }
-            i++;
-        }
-    }
-	for (unsigned i = 0; i < inp.args.size(); i++) validate(inp.args[i]);
-    return inp;
-}
-
-Node postValidate(Node inp) {
-    // This allows people to use ~x as a way of having functions with the same
-    // name and arity as macros; the idea is that ~x is a "final" form, and 
-    // should not be remacroed, but it is converted back at the end
-    if (inp.val.size() > 0 && inp.val[0] == '~') {
-        inp.val = inp.val.substr(1);
-    }
-    if (inp.type == ASTNODE) {
-        if (inp.val == ".")
-            err("Invalid object member (ie. a foo.bar not mapped to anything)",
-                inp.metadata);
-        else if (opcode(inp.val) >= 0) {
-            if ((signed)inp.args.size() < opinputs(inp.val))
-                err("Too few arguments for "+inp.val, inp.metadata);
-            if ((signed)inp.args.size() > opinputs(inp.val))
-                err("Too many arguments for "+inp.val, inp.metadata);
-        }
-        else if (isValidLLLFunc(inp.val, inp.args.size())) {
-            // do nothing
-        }
-        else err ("Invalid argument count or LLL function: "+inp.val, inp.metadata);
-        for (unsigned i = 0; i < inp.args.size(); i++) {
-            inp.args[i] = postValidate(inp.args[i]);
-        }
-    }
-    return inp;
-}
-
-Node rewrite(Node inp) {
-    return postValidate(optimize(apply_rules(preprocess(inp))));
-}
-
-Node rewriteChunk(Node inp) {
-    return postValidate(optimize(apply_rules(
-                        preprocessResult(
-                        validate(inp), preprocessAux()))));
-}
-
-using namespace std;
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/rewriter.h b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/rewriter.h
deleted file mode 100644
index 716815cee54fff44f99464c8139b25c51da829de..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/rewriter.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef ETHSERP_REWRITER
-#define ETHSERP_REWRITER
-
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-
-// Applies rewrite rules
-Node rewrite(Node inp);
-
-// Applies rewrite rules adding without wrapper
-Node rewriteChunk(Node inp);
-
-#endif
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/rewriteutils.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/rewriteutils.cpp
deleted file mode 100644
index 0d810bdbc79ed2ff00e1d4a40af3f1815822f702..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/rewriteutils.cpp
+++ /dev/null
@@ -1,211 +0,0 @@
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-#include "lllparser.h"
-#include "bignum.h"
-#include "rewriteutils.h"
-#include "optimize.h"
-
-// Valid functions and their min and max argument counts
-std::string validFunctions[][3] = {
-    { "if", "2", "3" },
-    { "unless", "2", "2" },
-    { "while", "2", "2" },
-    { "until", "2", "2" },
-    { "alloc", "1", "1" },
-    { "array", "1", "1" },
-    { "call", "2", tt256 },
-    { "callcode", "2", tt256 },
-    { "create", "1", "4" },
-    { "getch", "2", "2" },
-    { "setch", "3", "3" },
-    { "sha3", "1", "2" },
-    { "return", "1", "2" },
-    { "inset", "1", "1" },
-    { "min", "2", "2" },
-    { "max", "2", "2" },
-    { "array_lit", "0", tt256 },
-    { "seq", "0", tt256 },
-    { "log", "1", "6" },
-    { "outer", "1", "1" },
-    { "set", "2", "2" },
-    { "get", "1", "1" },
-    { "ref", "1", "1" },
-    { "declare", "1", tt256 },
-    { "with", "3", "3" },
-    { "outer", "1", "1" },
-    { "mcopy", "3", "3" },
-    { "unsafe_mcopy", "3", "3" },
-    { "save", "3", "3" },
-    { "load", "2", "2" },
-    { "---END---", "", "" } //Keep this line at the end of the list
-};
-
-std::map<std::string, bool> vfMap;
-
-// Is a function name one of the valid functions above?
-bool isValidFunctionName(std::string f) {
-    if (vfMap.size() == 0) {
-        for (int i = 0; ; i++) {
-            if (validFunctions[i][0] == "---END---") break;
-            vfMap[validFunctions[i][0]] = true;
-        }
-    }
-    return vfMap.count(f);
-}
-
-// Cool function for debug purposes (named cerrStringList to make
-// all prints searchable via 'cerr')
-void cerrStringList(std::vector<std::string> s, std::string suffix) {
-    for (unsigned i = 0; i < s.size(); i++) std::cerr << s[i] << " ";
-    std::cerr << suffix << "\n";
-}
-
-// Convert:
-// self.cow -> ["cow"]
-// self.horse[0] -> ["horse", "0"]
-// self.a[6][7][self.storage[3]].chicken[9] -> 
-//     ["6", "7", (sload 3), "chicken", "9"]
-std::vector<Node> listfyStorageAccess(Node node) {
-    std::vector<Node> out;
-    std::vector<Node> nodez;
-    nodez.push_back(node);
-    while (1) {
-        if (nodez.back().type == TOKEN) {
-            out.push_back(token("--" + nodez.back().val, node.metadata));
-            std::vector<Node> outrev;
-            for (int i = (signed)out.size() - 1; i >= 0; i--) {
-                outrev.push_back(out[i]);
-            }
-            return outrev;
-        }
-        if (nodez.back().val == ".")
-            nodez.back().args[1].val = "--" + nodez.back().args[1].val;
-        if (nodez.back().args.size() == 0)
-            err("Error parsing storage variable statement", node.metadata);
-        if (nodez.back().args.size() == 1)
-            out.push_back(token(tt256m1, node.metadata));
-        else
-            out.push_back(nodez.back().args[1]);
-        nodez.push_back(nodez.back().args[0]);
-    }
-}
-
-// Is the given node something of the form
-// self.cow
-// self.horse[0]
-// self.a[6][7][self.storage[3]].chicken[9]
-bool isNodeStorageVariable(Node node) {
-    std::vector<Node> nodez;
-    nodez.push_back(node);
-    while (1) {
-        if (nodez.back().type == TOKEN) return false;
-        if (nodez.back().args.size() == 0) return false;
-        if (nodez.back().val != "." && nodez.back().val != "access")
-            return false;
-        if (nodez.back().args[0].val == "self") return true;
-        nodez.push_back(nodez.back().args[0]);
-    }
-}
-
-// Main pattern matching routine, for those patterns that can be expressed
-// using our standard mini-language above
-//
-// Returns two values. First, a boolean to determine whether the node matches
-// the pattern, second, if the node does match then a map mapping variables
-// in the pattern to nodes
-matchResult match(Node p, Node n) {
-    matchResult o;
-    o.success = false;
-    if (p.type == TOKEN) {
-        if (p.val == n.val && n.type == TOKEN) o.success = true;
-        else if (p.val[0] == '$' || p.val[0] == '@') {
-            o.success = true;
-            o.map[p.val.substr(1)] = n;
-        }
-    }
-    else if (n.type==TOKEN || p.val!=n.val || p.args.size()!=n.args.size()) {
-        // do nothing
-    }
-    else {
-		for (unsigned i = 0; i < p.args.size(); i++) {
-            matchResult oPrime = match(p.args[i], n.args[i]);
-            if (!oPrime.success) {
-                o.success = false;
-                return o;
-            }
-            for (std::map<std::string, Node>::iterator it = oPrime.map.begin();
-                 it != oPrime.map.end();
-                 it++) {
-                o.map[(*it).first] = (*it).second;
-            }
-        }
-        o.success = true;
-    }
-    return o;
-}
-
-
-// Fills in the pattern with a dictionary mapping variable names to
-// nodes (these dicts are generated by match). Match and subst together
-// create a full pattern-matching engine. 
-Node subst(Node pattern,
-           std::map<std::string, Node> dict,
-           std::string varflag,
-           Metadata m) {
-    // Swap out patterns at the token level
-    if (pattern.metadata.ln == -1)
-        pattern.metadata = m;
-    if (pattern.type == TOKEN && 
-            pattern.val[0] == '$') {
-        if (dict.count(pattern.val.substr(1))) {
-            return dict[pattern.val.substr(1)];
-        }
-        else {
-            return token(varflag + pattern.val.substr(1), m);
-        }
-    }
-    // Other tokens are untouched
-    else if (pattern.type == TOKEN) {
-        return pattern;
-    }
-    // Substitute recursively for ASTs
-    else {
-        std::vector<Node> args;
-		for (unsigned i = 0; i < pattern.args.size(); i++) {
-            args.push_back(subst(pattern.args[i], dict, varflag, m));
-        }
-        return asn(pattern.val, args, m);
-    }
-}
-
-// Transforms a sequence containing two-argument with statements
-// into a statement containing those statements in nested form
-Node withTransform (Node source) {
-    Node o = token("--");
-    Metadata m = source.metadata;
-    std::vector<Node> args;
-    for (int i = source.args.size() - 1; i >= 0; i--) {
-        Node a = source.args[i];
-        if (a.val == "with" && a.args.size() == 2) {
-            std::vector<Node> flipargs;
-            for (int j = args.size() - 1; j >= 0; j--)
-                flipargs.push_back(args[i]);
-            if (o.val != "--")
-                flipargs.push_back(o);
-            o = asn("with", a.args[0], a.args[1], asn("seq", flipargs, m), m);
-            args = std::vector<Node>();
-        }
-        else {
-            args.push_back(a);
-        }
-    }
-    std::vector<Node> flipargs;
-    for (int j = args.size() - 1; j >= 0; j--)
-        flipargs.push_back(args[j]);
-    if (o.val != "--")
-        flipargs.push_back(o);
-    return asn("seq", flipargs, m);
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/rewriteutils.h b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/rewriteutils.h
deleted file mode 100644
index 8abf44a9fbbfaf65d7ba424ab20a917276c71cf0..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/rewriteutils.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#ifndef ETHSERP_REWRITEUTILS
-#define ETHSERP_REWRITEUTILS
-
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-
-// Valid functions and their min and max argument counts
-extern std::string validFunctions[][3];
-
-extern std::map<std::string, bool> vfMap;
-
-bool isValidFunctionName(std::string f);
-
-// Converts deep array access into ordered list of the arguments
-// along the descent
-std::vector<Node> listfyStorageAccess(Node node);
-
-// Cool function for debug purposes (named cerrStringList to make
-// all prints searchable via 'cerr')
-void cerrStringList(std::vector<std::string> s, std::string suffix="");
-
-// Is the given node something of the form
-// self.cow
-// self.horse[0]
-// self.a[6][7][self.storage[3]].chicken[9]
-bool isNodeStorageVariable(Node node);
-
-// Applies rewrite rules adding without wrapper
-Node rewriteChunk(Node inp);
-
-// Match result storing object
-struct matchResult {
-    bool success;
-    std::map<std::string, Node> map;
-};
-
-// Match node to pattern
-matchResult match(Node p, Node n);
-
-// Substitute node using pattern
-Node subst(Node pattern,
-           std::map<std::string, Node> dict,
-           std::string varflag,
-           Metadata m);
-
-Node withTransform(Node source);
-
-#endif
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/serpent.py b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/serpent.py
deleted file mode 100644
index 8d6bedfe35b6b5ece032c9dcfe858461f5a582d4..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/serpent.py
+++ /dev/null
@@ -1,201 +0,0 @@
-import serpent_pyext as pyext
-import sys
-import re
-
-VERSION = '1.7.7'
-
-
-class Metadata(object):
-    def __init__(self, li):
-        self.file = li[0]
-        self.ln = li[1]
-        self.ch = li[2]
-
-    def out(self):
-        return [self.file, self.ln, self.ch]
-
-
-class Token(object):
-    def __init__(self, val, metadata):
-        self.val = val
-        self.metadata = Metadata(metadata)
-
-    def out(self):
-        return [0, self.val, self.metadata.out()]
-
-    def __repr__(self):
-        return str(self.val)
-
-
-class Astnode(object):
-    def __init__(self, val, args, metadata):
-        self.val = val
-        self.args = map(node, args)
-        self.metadata = Metadata(metadata)
-
-    def out(self):
-        o = [1, self.val, self.metadata.out()]+[x.out() for x in self.args]
-        return o
-
-    def __repr__(self):
-        o = '(' + self.val
-        subs = map(repr, self.args)
-        k = 0
-        out = " "
-        while k < len(subs) and o != "(seq":
-            if '\n' in subs[k] or len(out + subs[k]) >= 80:
-                break
-            out += subs[k] + " "
-            k += 1
-        if k < len(subs):
-            o += out + "\n  "
-            o += '\n  '.join('\n'.join(subs[k:]).split('\n'))
-            o += '\n)'
-        else:
-            o += out[:-1] + ')'
-        return o
-
-
-def node(li):
-    if li[0]:
-        return Astnode(li[1], li[3:], li[2])
-    else:
-        return Token(li[1], li[2])
-
-
-def take(x):
-    return pyext.parse_lll(x) if isinstance(x, (str, unicode)) else x.out()
-
-
-def takelist(x):
-    return map(take, parse(x).args if isinstance(x, (str, unicode)) else x)
-
-
-compile = lambda x: pyext.compile(x)
-compile_chunk = lambda x: pyext.compile_chunk(x)
-compile_to_lll = lambda x: node(pyext.compile_to_lll(x))
-compile_chunk_to_lll = lambda x: node(pyext.compile_chunk_to_lll(x))
-compile_lll = lambda x: pyext.compile_lll(take(x))
-parse = lambda x: node(pyext.parse(x))
-rewrite = lambda x: node(pyext.rewrite(take(x)))
-rewrite_chunk = lambda x: node(pyext.rewrite_chunk(take(x)))
-pretty_compile = lambda x: map(node, pyext.pretty_compile(x))
-pretty_compile_chunk = lambda x: map(node, pyext.pretty_compile_chunk(x))
-pretty_compile_lll = lambda x: map(node, pyext.pretty_compile_lll(take(x)))
-serialize = lambda x: pyext.serialize(takelist(x))
-deserialize = lambda x: map(node, pyext.deserialize(x))
-
-is_numeric = lambda x: isinstance(x, (int, long))
-is_string = lambda x: isinstance(x, (str, unicode))
-tobytearr = lambda n, L: [] if L == 0 else tobytearr(n / 256, L - 1)+[n % 256]
-
-
-# A set of methods for detecting raw values (numbers and strings) and
-# converting them to integers
-def frombytes(b):
-    return 0 if len(b) == 0 else ord(b[-1]) + 256 * frombytes(b[:-1])
-
-
-def fromhex(b):
-    hexord = lambda x: '0123456789abcdef'.find(x)
-    return 0 if len(b) == 0 else hexord(b[-1]) + 16 * fromhex(b[:-1])
-
-
-def numberize(b):
-    if is_numeric(b):
-        return b
-    elif b[0] in ["'", '"']:
-        return frombytes(b[1:-1])
-    elif b[:2] == '0x':
-        return fromhex(b[2:])
-    elif re.match('^[0-9]*$', b):
-        return int(b)
-    elif len(b) == 40:
-        return fromhex(b)
-    else:
-        raise Exception("Cannot identify data type: %r" % b)
-
-
-def enc(n):
-    if is_numeric(n):
-        return ''.join(map(chr, tobytearr(n, 32)))
-    elif is_string(n) and len(n) == 40:
-        return '\x00' * 12 + n.decode('hex')
-    elif is_string(n):
-        return '\x00' * (32 - len(n)) + n
-    elif n is True:
-        return 1
-    elif n is False or n is None:
-        return 0
-
-
-def encode_datalist(*args):
-    if isinstance(args, (tuple, list)):
-        return ''.join(map(enc, args))
-    elif not len(args) or args[0] == '':
-        return ''
-    else:
-        # Assume you're getting in numbers or addresses or 0x...
-        return ''.join(map(enc, map(numberize, args)))
-
-
-def decode_datalist(arr):
-    if isinstance(arr, list):
-        arr = ''.join(map(chr, arr))
-    o = []
-    for i in range(0, len(arr), 32):
-        o.append(frombytes(arr[i:i + 32]))
-    return o
-
-
-def encode_abi(funid, *args):
-    len_args = ''
-    normal_args = ''
-    var_args = ''
-    for arg in args:
-        if isinstance(arg, str) and len(arg) and \
-                arg[0] == '"' and arg[-1] == '"':
-            len_args += enc(numberize(len(arg[1:-1])))
-            var_args += arg[1:-1]
-        elif isinstance(arg, list):
-            for a in arg:
-                var_args += enc(numberize(a))
-            len_args += enc(numberize(len(arg)))
-        else:
-            normal_args += enc(numberize(arg))
-    return chr(int(funid)) + len_args + normal_args + var_args
-
-
-def decode_abi(arr, *lens):
-    o = []
-    pos = 1
-    i = 0
-    if len(lens) == 1 and isinstance(lens[0], list):
-        lens = lens[0]
-    while pos < len(arr):
-        bytez = int(lens[i]) if i < len(lens) else 32
-        o.append(frombytes(arr[pos: pos + bytez]))
-        i, pos = i + 1, pos + bytez
-    return o
-
-
-def main():
-    if len(sys.argv) == 1:
-        print "serpent <command> <arg1> <arg2> ..."
-    else:
-        cmd = sys.argv[2] if sys.argv[1] == '-s' else sys.argv[1]
-        if sys.argv[1] == '-s':
-            args = [sys.stdin.read()] + sys.argv[3:]
-        elif sys.argv[1] == '-v':
-            print VERSION
-            sys.exit()
-        else:
-            cmd = sys.argv[1]
-            args = sys.argv[2:]
-        if cmd in ['deserialize', 'decode_datalist', 'decode_abi']:
-            args[0] = args[0].strip().decode('hex')
-        o = globals()[cmd](*args)
-        if isinstance(o, (Token, Astnode, list)):
-            print repr(o)
-        else:
-            print o.encode('hex')
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/setup.py b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/setup.py
deleted file mode 100644
index 5fdc1c16aad00179cacd721e09a267f37ec3cb2f..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/setup.py
+++ /dev/null
@@ -1,46 +0,0 @@
-from setuptools import setup, Extension
-
-import os
-from distutils.sysconfig import get_config_vars
-
-(opt,) = get_config_vars('OPT')
-os.environ['OPT'] = " ".join(
-    flag for flag in opt.split() if flag != '-Wstrict-prototypes'
-)
-
-setup(
-    # Name of this package
-    name="ethereum-serpent",
-
-    # Package version
-    version='1.7.7',
-
-    description='Serpent compiler',
-    maintainer='Vitalik Buterin',
-    maintainer_email='v@buterin.com',
-    license='WTFPL',
-    url='http://www.ethereum.org/',
-
-    # Describes how to build the actual extension module from C source files.
-    ext_modules=[
-        Extension(
-            'serpent_pyext',         # Python name of the module
-            ['bignum.cpp', 'util.cpp', 'tokenize.cpp',
-             'lllparser.cpp', 'parser.cpp', 'functions.cpp',
-             'optimize.cpp', 'opcodes.cpp',
-             'rewriteutils.cpp', 'preprocess.cpp', 'rewriter.cpp',
-             'compiler.cpp', 'funcs.cpp', 'pyserpent.cpp']
-        )],
-    py_modules=[
-        'serpent',
-        'pyserpent'
-    ],
-    scripts=[
-        'serpent.py'
-    ],
-    entry_points={
-        'console_scripts': [
-            'serpent = serpent:main',
-        ],
-    }
-    ),
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/tokenize.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/tokenize.cpp
deleted file mode 100644
index b60cc8a44aaa998a2cac4ec9cd6dd1988a8f47aa..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/tokenize.cpp
+++ /dev/null
@@ -1,115 +0,0 @@
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-
-// These appear as independent tokens even if inside a stream of symbols
-const std::string atoms[] = { "#", "//", "(", ")", "[", "]", "{", "}" };
-const int numAtoms = 8;
-
-// Is the char alphanumeric, a space, a bracket, a quote, a symbol?
-int chartype(char c) {
-    if (c >= '0' && c <= '9') return ALPHANUM;
-    else if (c >= 'a' && c <= 'z') return ALPHANUM;
-    else if (c >= 'A' && c <= 'Z') return ALPHANUM;
-	else if (std::string("~_$@").find(c) != std::string::npos) return ALPHANUM;
-    else if (c == '\t' || c == ' ' || c == '\n' || c == '\r') return SPACE;
-	else if (std::string("()[]{}").find(c) != std::string::npos) return BRACK;
-    else if (c == '"') return DQUOTE;
-    else if (c == '\'') return SQUOTE;
-    else return SYMB;
-}
-
-// "y = f(45,124)/3" -> [ "y", "f", "(", "45", ",", "124", ")", "/", "3"]
-std::vector<Node> tokenize(std::string inp, Metadata metadata, bool lispMode) {
-    int curtype = SPACE;
-	unsigned pos = 0;
-    int lastNewline = 0;
-    metadata.ch = 0;
-    std::string cur;
-    std::vector<Node> out;
-
-    inp += " ";
-    while (pos < inp.length()) {
-        int headtype = chartype(inp[pos]);
-        if (lispMode) {
-            if (inp[pos] == '\'') headtype = ALPHANUM;
-        }
-        // Are we inside a quote?
-        if (curtype == SQUOTE || curtype == DQUOTE) {
-            // Close quote
-            if (headtype == curtype) {
-                cur += inp[pos];
-                out.push_back(token(cur, metadata));
-                cur = "";
-                metadata.ch = pos - lastNewline;
-                curtype = SPACE;
-                pos += 1;
-            }
-            // eg. \xc3
-            else if (inp.length() >= pos + 4 && inp.substr(pos, 2) == "\\x") {
-                cur += (std::string("0123456789abcdef").find(inp[pos+2]) * 16
-                        + std::string("0123456789abcdef").find(inp[pos+3]));
-                pos += 4;
-            }
-            // Newline
-            else if (inp.substr(pos, 2) == "\\n") {
-                cur += '\n';
-                pos += 2;
-            }
-            // Backslash escape
-            else if (inp.length() >= pos + 2 && inp[pos] == '\\') {
-                cur += inp[pos + 1];
-                pos += 2;
-            }
-            // Normal character
-            else {
-                cur += inp[pos];
-                pos += 1;
-            }
-        }
-        else {
-            // Handle atoms ( '//', '#',  brackets )
-            for (int i = 0; i < numAtoms; i++) {
-                int split = cur.length() - atoms[i].length();
-                if (split >= 0 && cur.substr(split) == atoms[i]) {
-                    if (split > 0) {
-                        out.push_back(token(cur.substr(0, split), metadata));
-                    }
-                    metadata.ch += split;
-                    out.push_back(token(cur.substr(split), metadata));
-                    metadata.ch = pos - lastNewline;
-                    cur = "";
-                    curtype = SPACE;
-                }
-            }
-            // Special case the minus sign
-            if (cur.length() > 1 && (cur.substr(cur.length() - 1) == "-"
-                                  || cur.substr(cur.length() - 1) == "!")) {
-                out.push_back(token(cur.substr(0, cur.length() - 1), metadata));
-                out.push_back(token(cur.substr(cur.length() - 1), metadata));
-                cur = "";
-            }
-            // Boundary between different char types
-            if (headtype != curtype) {
-                if (curtype != SPACE && cur != "") {
-                    out.push_back(token(cur, metadata));
-                }
-                metadata.ch = pos - lastNewline;
-                cur = "";
-            }
-            cur += inp[pos];
-            curtype = headtype;
-            pos += 1;
-        }
-        if (inp[pos] == '\n') {
-            lastNewline = pos;
-            metadata.ch = 0;
-            metadata.ln += 1;
-        }
-    }
-    return out;
-}
-
-
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/tokenize.h b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/tokenize.h
deleted file mode 100644
index 04a42f3c6856d5a225a963dacaaa326369d05ee0..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/tokenize.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef ETHSERP_TOKENIZE
-#define ETHSERP_TOKENIZE
-
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-
-int chartype(char c);
-
-std::vector<Node> tokenize(std::string inp,
-                           Metadata meta=Metadata(),
-                           bool lispMode=false);
-
-#endif
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/util.cpp b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/util.cpp
deleted file mode 100644
index 56f642fc8a9463f17e8fce7300c1e8fd8e234d44..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/util.cpp
+++ /dev/null
@@ -1,305 +0,0 @@
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include "util.h"
-#include "bignum.h"
-#include <fstream>
-#include <cerrno>
-
-//Token or value node constructor
-Node token(std::string val, Metadata met) {
-    Node o;
-    o.type = 0;
-    o.val = val;
-    o.metadata = met;
-    return o;
-}
-
-//AST node constructor
-Node astnode(std::string val, std::vector<Node> args, Metadata met) {
-    Node o;
-    o.type = 1;
-    o.val = val;
-    o.args = args;
-    o.metadata = met;
-    return o;
-}
-
-//AST node constructors for a specific number of children
-Node astnode(std::string val, Metadata met) {
-    std::vector<Node> args;
-    return astnode(val, args, met);
-}
-
-Node astnode(std::string val, Node a, Metadata met) {
-    std::vector<Node> args;
-    args.push_back(a);
-    return astnode(val, args, met);
-}
-
-Node astnode(std::string val, Node a, Node b, Metadata met) {
-    std::vector<Node> args;
-    args.push_back(a);
-    args.push_back(b);
-    return astnode(val, args, met);
-}
-
-Node astnode(std::string val, Node a, Node b, Node c, Metadata met) {
-    std::vector<Node> args;
-    args.push_back(a);
-    args.push_back(b);
-    args.push_back(c);
-    return astnode(val, args, met);
-}
-
-Node astnode(std::string val, Node a, Node b, Node c, Node d, Metadata met) {
-    std::vector<Node> args;
-    args.push_back(a);
-    args.push_back(b);
-    args.push_back(c);
-    args.push_back(d);
-    return astnode(val, args, met);
-}
-
-
-// Print token list
-std::string printTokens(std::vector<Node> tokens) {
-    std::string s = "";
-	for (unsigned i = 0; i < tokens.size(); i++) {
-        s += tokens[i].val + " ";
-    }
-    return s;
-}
-
-// Prints a lisp AST on one line
-std::string printSimple(Node ast) {
-    if (ast.type == TOKEN) return ast.val;
-    std::string o = "(" + ast.val;
-    std::vector<std::string> subs;
-	for (unsigned i = 0; i < ast.args.size(); i++) {
-        o += " " + printSimple(ast.args[i]);
-    }
-    return o + ")";
-}
-
-// Number of tokens in a tree
-int treeSize(Node prog) {
-    if (prog.type == TOKEN) return 1;
-    int o = 0;
-	for (unsigned i = 0; i < prog.args.size(); i++) o += treeSize(prog.args[i]);
-    return o;
-}
-
-// Pretty-prints a lisp AST
-std::string printAST(Node ast, bool printMetadata) {
-    if (ast.type == TOKEN) return ast.val;
-    std::string o = "(";
-    if (printMetadata) {
-         o += ast.metadata.file + " ";
-         o += unsignedToDecimal(ast.metadata.ln) + " ";
-         o += unsignedToDecimal(ast.metadata.ch) + ": ";
-    }
-    o += ast.val;
-    std::vector<std::string> subs;
-	for (unsigned i = 0; i < ast.args.size(); i++) {
-        subs.push_back(printAST(ast.args[i], printMetadata));
-    }
-	unsigned k = 0;
-    std::string out = " ";
-    // As many arguments as possible go on the same line as the function,
-    // except when seq is used
-    while (k < subs.size() && o != "(seq") {
-		if (subs[k].find("\n") != std::string::npos || (out + subs[k]).length() >= 80) break;
-        out += subs[k] + " ";
-        k += 1;
-    }
-    // All remaining arguments go on their own lines
-    if (k < subs.size()) {
-        o += out + "\n";
-        std::vector<std::string> subsSliceK;
-		for (unsigned i = k; i < subs.size(); i++) subsSliceK.push_back(subs[i]);
-        o += indentLines(joinLines(subsSliceK));
-        o += "\n)";
-    }
-    else {
-        o += out.substr(0, out.size() - 1) + ")";
-    }
-    return o;
-}
-
-// Splits text by line
-std::vector<std::string> splitLines(std::string s) {
-	unsigned pos = 0;
-    int lastNewline = 0;
-    std::vector<std::string> o;
-    while (pos < s.length()) {
-        if (s[pos] == '\n') {
-            o.push_back(s.substr(lastNewline, pos - lastNewline));
-            lastNewline = pos + 1;
-        }
-        pos = pos + 1;
-    }
-    o.push_back(s.substr(lastNewline));
-    return o;
-}
-
-// Inverse of splitLines
-std::string joinLines(std::vector<std::string> lines) {
-    std::string o = "\n";
-	for (unsigned i = 0; i < lines.size(); i++) {
-        o += lines[i] + "\n";
-    }
-    return o.substr(1, o.length() - 2);
-}
-
-// Indent all lines by 4 spaces
-std::string indentLines(std::string inp) {
-    std::vector<std::string> lines = splitLines(inp);
-	for (unsigned i = 0; i < lines.size(); i++) lines[i] = "    "+lines[i];
-    return joinLines(lines);
-}
-
-// Binary to hexadecimal
-std::string binToNumeric(std::string inp) {
-    std::string o = "0";
-	for (unsigned i = 0; i < inp.length(); i++) {
-        o = decimalAdd(decimalMul(o,"256"), unsignedToDecimal((unsigned char)inp[i]));
-    }
-    return o;
-}
-
-// Converts string to simple numeric format
-std::string strToNumeric(std::string inp) {
-    std::string o = "0";
-    if (inp == "") {
-        o = "";
-    }
-    else if (inp.substr(0,2) == "0x") {
-		for (unsigned i = 2; i < inp.length(); i++) {
-            int dig = std::string("0123456789abcdef0123456789ABCDEF").find(inp[i]) % 16;
-            if (dig < 0) return "";
-            o = decimalAdd(decimalMul(o,"16"), unsignedToDecimal(dig));
-        }
-    }
-    else {
-        bool isPureNum = true;
-		for (unsigned i = 0; i < inp.length(); i++) {
-            isPureNum = isPureNum && inp[i] >= '0' && inp[i] <= '9';
-        }
-        o = isPureNum ? inp : "";
-    }
-    return o;
-}
-
-// Does the node contain a number (eg. 124, 0xf012c, "george")
-bool isNumberLike(Node node) {
-    if (node.type == ASTNODE) return false;
-    return strToNumeric(node.val) != "";
-}
-
-//Normalizes number representations
-Node nodeToNumeric(Node node) {
-    std::string o = strToNumeric(node.val);
-    return token(o == "" ? node.val : o, node.metadata);
-}
-
-Node tryNumberize(Node node) {
-    if (node.type == TOKEN && isNumberLike(node)) return nodeToNumeric(node);
-    return node;
-}
-
-//Converts a value to an array of byte number nodes
-std::vector<Node> toByteArr(std::string val, Metadata metadata, int minLen) {
-    std::vector<Node> o;
-    int L = 0;
-    while (val != "0" || L < minLen) {
-        o.push_back(token(decimalMod(val, "256"), metadata));
-        val = decimalDiv(val, "256");
-        L++;
-    }
-    std::vector<Node> o2;
-    for (int i = o.size() - 1; i >= 0; i--) o2.push_back(o[i]);
-    return o2;
-}
-
-int counter = 0;
-
-//Makes a unique token
-std::string mkUniqueToken() {
-    counter++;
-    return unsignedToDecimal(counter);
-}
-
-//Does a file exist? http://stackoverflow.com/questions/12774207
-bool exists(std::string fileName) {
-    std::ifstream infile(fileName.c_str());
-    return infile.good();
-}
-
-//Reads a file: http://stackoverflow.com/questions/2602013
-std::string get_file_contents(std::string filename)
-{
-  std::ifstream in(filename.c_str(), std::ios::in | std::ios::binary);
-  if (in)
-  {
-    std::string contents;
-    in.seekg(0, std::ios::end);
-    contents.resize(in.tellg());
-    in.seekg(0, std::ios::beg);
-    in.read(&contents[0], contents.size());
-    in.close();
-    return(contents);
-  }
-  throw(errno);
-}
-
-//Report error
-void err(std::string errtext, Metadata met) {
-    std::string err = "Error (file \"" + met.file + "\", line " +
-        unsignedToDecimal(met.ln + 1) + ", char " + unsignedToDecimal(met.ch) +
-        "): " + errtext;
-    std::cerr << err << "\n";
-    throw(err);
-}
-
-//Bin to hex
-std::string binToHex(std::string inp) {
-    std::string o = "";
-	for (unsigned i = 0; i < inp.length(); i++) {
-        unsigned char v = inp[i];
-        o += std::string("0123456789abcdef").substr(v/16, 1)
-           + std::string("0123456789abcdef").substr(v%16, 1);
-    }
-    return o;
-}
-
-//Hex to bin
-std::string hexToBin(std::string inp) {
-    std::string o = "";
-	for (unsigned i = 0; i+1 < inp.length(); i+=2) {
-        char v = (char)(std::string("0123456789abcdef").find(inp[i]) * 16 +
-                std::string("0123456789abcdef").find(inp[i+1]));
-        o += v;
-    }
-    return o;
-}
-
-//Lower to upper
-std::string upperCase(std::string inp) {
-    std::string o = "";
-	for (unsigned i = 0; i < inp.length(); i++) {
-        if (inp[i] >= 97 && inp[i] <= 122) o += inp[i] - 32;
-        else o += inp[i];
-    }
-    return o;
-}
-
-//Three-int vector
-std::vector<int> triple(int a, int b, int c) {
-    std::vector<int> v;
-    v.push_back(a);
-    v.push_back(b);
-    v.push_back(c);
-    return v;
-}
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/util.h b/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/util.h
deleted file mode 100644
index f7d6744f9e32c4ad6c05e48ce3599d15648c7c10..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/serpent/util.h
+++ /dev/null
@@ -1,127 +0,0 @@
-#ifndef ETHSERP_UTIL
-#define ETHSERP_UTIL
-
-#include <stdio.h>
-#include <iostream>
-#include <vector>
-#include <map>
-#include <fstream>
-#include <cerrno>
-
-const int TOKEN = 0,
-          ASTNODE = 1,
-          SPACE = 2,
-          BRACK = 3,
-          SQUOTE = 4,
-          DQUOTE = 5,
-          SYMB = 6,
-          ALPHANUM = 7,
-          LPAREN = 8,
-          RPAREN = 9,
-          COMMA = 10,
-          COLON = 11,
-          UNARY_OP = 12,
-          BINARY_OP = 13,
-          COMPOUND = 14,
-          TOKEN_SPLITTER = 15;
-
-// Stores metadata about each token
-class Metadata {
-    public:
-        Metadata(std::string File="main", int Ln=-1, int Ch=-1) {
-            file = File;
-            ln = Ln;
-            ch = Ch;
-            fixed = false;
-        }
-        std::string file;
-        int ln;
-        int ch;
-        bool fixed;
-};
-
-std::string mkUniqueToken();
-
-// type can be TOKEN or ASTNODE
-struct Node {
-    int type;
-    std::string val;
-    std::vector<Node> args;
-    Metadata metadata;
-};
-Node token(std::string val, Metadata met=Metadata());
-Node astnode(std::string val, std::vector<Node> args, Metadata met=Metadata());
-Node astnode(std::string val, Metadata met=Metadata());
-Node astnode(std::string val, Node a, Metadata met=Metadata());
-Node astnode(std::string val, Node a, Node b, Metadata met=Metadata());
-Node astnode(std::string val, Node a, Node b, Node c, Metadata met=Metadata());
-Node astnode(std::string val, Node a, Node b,
-             Node c, Node d, Metadata met=Metadata());
-
-// Number of tokens in a tree
-int treeSize(Node prog);
-
-// Print token list
-std::string printTokens(std::vector<Node> tokens);
-
-// Prints a lisp AST on one line
-std::string printSimple(Node ast);
-
-// Pretty-prints a lisp AST
-std::string printAST(Node ast, bool printMetadata=false);
-
-// Splits text by line
-std::vector<std::string> splitLines(std::string s);
-
-// Inverse of splitLines
-std::string joinLines(std::vector<std::string> lines);
-
-// Indent all lines by 4 spaces
-std::string indentLines(std::string inp);
-
-// Converts binary to simple numeric format
-std::string binToNumeric(std::string inp);
-
-// Converts string to simple numeric format
-std::string strToNumeric(std::string inp);
-
-// Does the node contain a number (eg. 124, 0xf012c, "george")
-bool isNumberLike(Node node);
-
-//Normalizes number representations
-Node nodeToNumeric(Node node);
-
-//If a node is numeric, normalize its representation
-Node tryNumberize(Node node);
-
-//Converts a value to an array of byte number nodes
-std::vector<Node> toByteArr(std::string val, Metadata metadata, int minLen=1);
-
-//Reads a file
-std::string get_file_contents(std::string filename);
-
-//Does a file exist?
-bool exists(std::string fileName);
-
-//Report error
-void err(std::string errtext, Metadata met);
-
-//Bin to hex
-std::string binToHex(std::string inp);
-
-//Hex to bin
-std::string hexToBin(std::string inp);
-
-//Lower to upper
-std::string upperCase(std::string inp);
-
-//Three-int vector
-std::vector<int> triple(int a, int b, int c);
-
-#define asn astnode
-#define tkn token
-#define msi std::map<std::string, int>
-#define msn std::map<std::string, Node>
-#define mss std::map<std::string, std::string>
-
-#endif
diff --git a/Godeps/_workspace/src/github.com/ethereum/serpent-go/tests/main.go b/Godeps/_workspace/src/github.com/ethereum/serpent-go/tests/main.go
deleted file mode 100644
index 2f2d17784c499508093954d4a0695cf43b1de275..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/ethereum/serpent-go/tests/main.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package main
-
-import (
-	"fmt"
-
-	"github.com/ethereum/serpent-go"
-)
-
-func main() {
-	out, _ := serpent.Compile(`
-// Namecoin
-if !contract.storage[msg.data[0]]: # Is the key not yet taken?
-    # Then take it!
-    contract.storage[msg.data[0]] = msg.data[1]
-    return(1)
-else:
-    return(0) // Otherwise do nothing
-	`)
-
-	fmt.Printf("%x\n", out)
-}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go
index 0d7911ecab13cc9cffc443334ddc1c49635e9c41..ccf390c9cff4bd120180dd47b5b58e9a0c00b102 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch.go
@@ -8,65 +8,84 @@ package leveldb
 
 import (
 	"encoding/binary"
-	"errors"
+	"fmt"
 
+	"github.com/syndtr/goleveldb/leveldb/errors"
 	"github.com/syndtr/goleveldb/leveldb/memdb"
 )
 
-var (
-	errBatchTooShort  = errors.New("leveldb: batch is too short")
-	errBatchBadRecord = errors.New("leveldb: bad record in batch")
-)
+type ErrBatchCorrupted struct {
+	Reason string
+}
+
+func (e *ErrBatchCorrupted) Error() string {
+	return fmt.Sprintf("leveldb: batch corrupted: %s", e.Reason)
+}
+
+func newErrBatchCorrupted(reason string) error {
+	return errors.NewErrCorrupted(nil, &ErrBatchCorrupted{reason})
+}
 
-const kBatchHdrLen = 8 + 4
+const (
+	batchHdrLen  = 8 + 4
+	batchGrowRec = 3000
+)
 
-type batchReplay interface {
-	put(key, value []byte, seq uint64)
-	delete(key []byte, seq uint64)
+type BatchReplay interface {
+	Put(key, value []byte)
+	Delete(key []byte)
 }
 
 // Batch is a write batch.
 type Batch struct {
-	buf        []byte
+	data       []byte
 	rLen, bLen int
 	seq        uint64
 	sync       bool
 }
 
 func (b *Batch) grow(n int) {
-	off := len(b.buf)
+	off := len(b.data)
 	if off == 0 {
-		// include headers
-		off = kBatchHdrLen
-		n += off
+		off = batchHdrLen
+		if b.data != nil {
+			b.data = b.data[:off]
+		}
 	}
-	if cap(b.buf)-off >= n {
-		return
+	if cap(b.data)-off < n {
+		if b.data == nil {
+			b.data = make([]byte, off, off+n)
+		} else {
+			odata := b.data
+			div := 1
+			if b.rLen > batchGrowRec {
+				div = b.rLen / batchGrowRec
+			}
+			b.data = make([]byte, off, off+n+(off-batchHdrLen)/div)
+			copy(b.data, odata)
+		}
 	}
-	buf := make([]byte, 2*cap(b.buf)+n)
-	copy(buf, b.buf)
-	b.buf = buf[:off]
 }
 
-func (b *Batch) appendRec(t vType, key, value []byte) {
+func (b *Batch) appendRec(kt kType, key, value []byte) {
 	n := 1 + binary.MaxVarintLen32 + len(key)
-	if t == tVal {
+	if kt == ktVal {
 		n += binary.MaxVarintLen32 + len(value)
 	}
 	b.grow(n)
-	off := len(b.buf)
-	buf := b.buf[:off+n]
-	buf[off] = byte(t)
+	off := len(b.data)
+	data := b.data[:off+n]
+	data[off] = byte(kt)
 	off += 1
-	off += binary.PutUvarint(buf[off:], uint64(len(key)))
-	copy(buf[off:], key)
+	off += binary.PutUvarint(data[off:], uint64(len(key)))
+	copy(data[off:], key)
 	off += len(key)
-	if t == tVal {
-		off += binary.PutUvarint(buf[off:], uint64(len(value)))
-		copy(buf[off:], value)
+	if kt == ktVal {
+		off += binary.PutUvarint(data[off:], uint64(len(value)))
+		copy(data[off:], value)
 		off += len(value)
 	}
-	b.buf = buf[:off]
+	b.data = data[:off]
 	b.rLen++
 	//  Include 8-byte ikey header
 	b.bLen += len(key) + len(value) + 8
@@ -75,18 +94,51 @@ func (b *Batch) appendRec(t vType, key, value []byte) {
 // Put appends 'put operation' of the given key/value pair to the batch.
 // It is safe to modify the contents of the argument after Put returns.
 func (b *Batch) Put(key, value []byte) {
-	b.appendRec(tVal, key, value)
+	b.appendRec(ktVal, key, value)
 }
 
 // Delete appends 'delete operation' of the given key to the batch.
 // It is safe to modify the contents of the argument after Delete returns.
 func (b *Batch) Delete(key []byte) {
-	b.appendRec(tDel, key, nil)
+	b.appendRec(ktDel, key, nil)
+}
+
+// Dump dumps batch contents. The returned slice can be loaded into the
+// batch using Load method.
+// The returned slice is not its own copy, so the contents should not be
+// modified.
+func (b *Batch) Dump() []byte {
+	return b.encode()
+}
+
+// Load loads given slice into the batch. Previous contents of the batch
+// will be discarded.
+// The given slice will not be copied and will be used as batch buffer, so
+// it is not safe to modify the contents of the slice.
+func (b *Batch) Load(data []byte) error {
+	return b.decode(0, data)
+}
+
+// Replay replays batch contents.
+func (b *Batch) Replay(r BatchReplay) error {
+	return b.decodeRec(func(i int, kt kType, key, value []byte) {
+		switch kt {
+		case ktVal:
+			r.Put(key, value)
+		case ktDel:
+			r.Delete(key)
+		}
+	})
+}
+
+// Len returns number of records in the batch.
+func (b *Batch) Len() int {
+	return b.rLen
 }
 
 // Reset resets the batch.
 func (b *Batch) Reset() {
-	b.buf = nil
+	b.data = b.data[:0]
 	b.seq = 0
 	b.rLen = 0
 	b.bLen = 0
@@ -97,24 +149,10 @@ func (b *Batch) init(sync bool) {
 	b.sync = sync
 }
 
-func (b *Batch) put(key, value []byte, seq uint64) {
-	if b.rLen == 0 {
-		b.seq = seq
-	}
-	b.Put(key, value)
-}
-
-func (b *Batch) delete(key []byte, seq uint64) {
-	if b.rLen == 0 {
-		b.seq = seq
-	}
-	b.Delete(key)
-}
-
 func (b *Batch) append(p *Batch) {
 	if p.rLen > 0 {
-		b.grow(len(p.buf) - kBatchHdrLen)
-		b.buf = append(b.buf, p.buf[kBatchHdrLen:]...)
+		b.grow(len(p.data) - batchHdrLen)
+		b.data = append(b.data, p.data[batchHdrLen:]...)
 		b.rLen += p.rLen
 	}
 	if p.sync {
@@ -122,95 +160,93 @@ func (b *Batch) append(p *Batch) {
 	}
 }
 
-func (b *Batch) len() int {
-	return b.rLen
-}
-
+// size returns sums of key/value pair length plus 8-bytes ikey.
 func (b *Batch) size() int {
 	return b.bLen
 }
 
 func (b *Batch) encode() []byte {
 	b.grow(0)
-	binary.LittleEndian.PutUint64(b.buf, b.seq)
-	binary.LittleEndian.PutUint32(b.buf[8:], uint32(b.rLen))
+	binary.LittleEndian.PutUint64(b.data, b.seq)
+	binary.LittleEndian.PutUint32(b.data[8:], uint32(b.rLen))
 
-	return b.buf
+	return b.data
 }
 
-func (b *Batch) decode(buf []byte) error {
-	if len(buf) < kBatchHdrLen {
-		return errBatchTooShort
+func (b *Batch) decode(prevSeq uint64, data []byte) error {
+	if len(data) < batchHdrLen {
+		return newErrBatchCorrupted("too short")
 	}
 
-	b.seq = binary.LittleEndian.Uint64(buf)
-	b.rLen = int(binary.LittleEndian.Uint32(buf[8:]))
+	b.seq = binary.LittleEndian.Uint64(data)
+	if b.seq < prevSeq {
+		return newErrBatchCorrupted("invalid sequence number")
+	}
+	b.rLen = int(binary.LittleEndian.Uint32(data[8:]))
+	if b.rLen < 0 {
+		return newErrBatchCorrupted("invalid records length")
+	}
 	// No need to be precise at this point, it won't be used anyway
-	b.bLen = len(buf) - kBatchHdrLen
-	b.buf = buf
+	b.bLen = len(data) - batchHdrLen
+	b.data = data
 
 	return nil
 }
 
-func (b *Batch) decodeRec(f func(i int, t vType, key, value []byte)) error {
-	off := kBatchHdrLen
+func (b *Batch) decodeRec(f func(i int, kt kType, key, value []byte)) (err error) {
+	off := batchHdrLen
 	for i := 0; i < b.rLen; i++ {
-		if off >= len(b.buf) {
-			return errors.New("leveldb: invalid batch record length")
+		if off >= len(b.data) {
+			return newErrBatchCorrupted("invalid records length")
 		}
 
-		t := vType(b.buf[off])
-		if t > tVal {
-			return errors.New("leveldb: invalid batch record type in batch")
+		kt := kType(b.data[off])
+		if kt > ktVal {
+			return newErrBatchCorrupted("bad record: invalid type")
 		}
 		off += 1
 
-		x, n := binary.Uvarint(b.buf[off:])
+		x, n := binary.Uvarint(b.data[off:])
 		off += n
-		if n <= 0 || off+int(x) > len(b.buf) {
-			return errBatchBadRecord
+		if n <= 0 || off+int(x) > len(b.data) {
+			return newErrBatchCorrupted("bad record: invalid key length")
 		}
-		key := b.buf[off : off+int(x)]
+		key := b.data[off : off+int(x)]
 		off += int(x)
-
 		var value []byte
-		if t == tVal {
-			x, n := binary.Uvarint(b.buf[off:])
+		if kt == ktVal {
+			x, n := binary.Uvarint(b.data[off:])
 			off += n
-			if n <= 0 || off+int(x) > len(b.buf) {
-				return errBatchBadRecord
+			if n <= 0 || off+int(x) > len(b.data) {
+				return newErrBatchCorrupted("bad record: invalid value length")
 			}
-			value = b.buf[off : off+int(x)]
+			value = b.data[off : off+int(x)]
 			off += int(x)
 		}
 
-		f(i, t, key, value)
+		f(i, kt, key, value)
 	}
 
 	return nil
 }
 
-func (b *Batch) replay(to batchReplay) error {
-	return b.decodeRec(func(i int, t vType, key, value []byte) {
-		switch t {
-		case tVal:
-			to.put(key, value, b.seq+uint64(i))
-		case tDel:
-			to.delete(key, b.seq+uint64(i))
-		}
-	})
-}
-
 func (b *Batch) memReplay(to *memdb.DB) error {
-	return b.decodeRec(func(i int, t vType, key, value []byte) {
-		ikey := newIKey(key, b.seq+uint64(i), t)
+	return b.decodeRec(func(i int, kt kType, key, value []byte) {
+		ikey := newIkey(key, b.seq+uint64(i), kt)
 		to.Put(ikey, value)
 	})
 }
 
+func (b *Batch) memDecodeAndReplay(prevSeq uint64, data []byte, to *memdb.DB) error {
+	if err := b.decode(prevSeq, data); err != nil {
+		return err
+	}
+	return b.memReplay(to)
+}
+
 func (b *Batch) revertMemReplay(to *memdb.DB) error {
-	return b.decodeRec(func(i int, t vType, key, value []byte) {
-		ikey := newIKey(key, b.seq+uint64(i), t)
+	return b.decodeRec(func(i int, kt kType, key, value []byte) {
+		ikey := newIkey(key, b.seq+uint64(i), kt)
 		to.Delete(ikey)
 	})
 }
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go
index 19b749b8ffdb55d7a1c5eb0e0e508b034392b4b6..7fc842f4fedd6f8dbd9902945d6098d4d556b6f4 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/batch_test.go
@@ -15,7 +15,7 @@ import (
 )
 
 type tbRec struct {
-	t          vType
+	kt         kType
 	key, value []byte
 }
 
@@ -23,39 +23,39 @@ type testBatch struct {
 	rec []*tbRec
 }
 
-func (p *testBatch) put(key, value []byte, seq uint64) {
-	p.rec = append(p.rec, &tbRec{tVal, key, value})
+func (p *testBatch) Put(key, value []byte) {
+	p.rec = append(p.rec, &tbRec{ktVal, key, value})
 }
 
-func (p *testBatch) delete(key []byte, seq uint64) {
-	p.rec = append(p.rec, &tbRec{tDel, key, nil})
+func (p *testBatch) Delete(key []byte) {
+	p.rec = append(p.rec, &tbRec{ktDel, key, nil})
 }
 
 func compareBatch(t *testing.T, b1, b2 *Batch) {
 	if b1.seq != b2.seq {
 		t.Errorf("invalid seq number want %d, got %d", b1.seq, b2.seq)
 	}
-	if b1.len() != b2.len() {
-		t.Fatalf("invalid record length want %d, got %d", b1.len(), b2.len())
+	if b1.Len() != b2.Len() {
+		t.Fatalf("invalid record length want %d, got %d", b1.Len(), b2.Len())
 	}
 	p1, p2 := new(testBatch), new(testBatch)
-	err := b1.replay(p1)
+	err := b1.Replay(p1)
 	if err != nil {
 		t.Fatal("error when replaying batch 1: ", err)
 	}
-	err = b2.replay(p2)
+	err = b2.Replay(p2)
 	if err != nil {
 		t.Fatal("error when replaying batch 2: ", err)
 	}
 	for i := range p1.rec {
 		r1, r2 := p1.rec[i], p2.rec[i]
-		if r1.t != r2.t {
-			t.Errorf("invalid type on record '%d' want %d, got %d", i, r1.t, r2.t)
+		if r1.kt != r2.kt {
+			t.Errorf("invalid type on record '%d' want %d, got %d", i, r1.kt, r2.kt)
 		}
 		if !bytes.Equal(r1.key, r2.key) {
 			t.Errorf("invalid key on record '%d' want %s, got %s", i, string(r1.key), string(r2.key))
 		}
-		if r1.t == tVal {
+		if r1.kt == ktVal {
 			if !bytes.Equal(r1.value, r2.value) {
 				t.Errorf("invalid value on record '%d' want %s, got %s", i, string(r1.value), string(r2.value))
 			}
@@ -75,7 +75,7 @@ func TestBatch_EncodeDecode(t *testing.T) {
 	b1.Delete([]byte("k"))
 	buf := b1.encode()
 	b2 := new(Batch)
-	err := b2.decode(buf)
+	err := b2.decode(0, buf)
 	if err != nil {
 		t.Error("error when decoding batch: ", err)
 	}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..0dd60fd829bb83fbe497a9838e6c73e462729c6b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench2_test.go
@@ -0,0 +1,58 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build !go1.2
+
+package leveldb
+
+import (
+	"sync/atomic"
+	"testing"
+)
+
+func BenchmarkDBReadConcurrent(b *testing.B) {
+	p := openDBBench(b, false)
+	p.populate(b.N)
+	p.fill()
+	p.gc()
+	defer p.close()
+
+	b.ResetTimer()
+	b.SetBytes(116)
+
+	b.RunParallel(func(pb *testing.PB) {
+		iter := p.newIter()
+		defer iter.Release()
+		for pb.Next() && iter.Next() {
+		}
+	})
+}
+
+func BenchmarkDBReadConcurrent2(b *testing.B) {
+	p := openDBBench(b, false)
+	p.populate(b.N)
+	p.fill()
+	p.gc()
+	defer p.close()
+
+	b.ResetTimer()
+	b.SetBytes(116)
+
+	var dir uint32
+	b.RunParallel(func(pb *testing.PB) {
+		iter := p.newIter()
+		defer iter.Release()
+		if atomic.AddUint32(&dir, 1)%2 == 0 {
+			for pb.Next() && iter.Next() {
+			}
+		} else {
+			if pb.Next() && iter.Last() {
+				for pb.Next() && iter.Prev() {
+				}
+			}
+		}
+	})
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go
index ea6801a89fa1673b55c36d78eaf60ac1fc8b037c..91b426709d59a8fb0860cec31ba601f9daca3605 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/bench_test.go
@@ -170,7 +170,7 @@ func (p *dbBench) writes(perBatch int) {
 	b.SetBytes(116)
 }
 
-func (p *dbBench) drop() {
+func (p *dbBench) gc() {
 	p.keys, p.values = nil, nil
 	runtime.GC()
 }
@@ -249,6 +249,9 @@ func (p *dbBench) newIter() iterator.Iterator {
 }
 
 func (p *dbBench) close() {
+	if bp, err := p.db.GetProperty("leveldb.blockpool"); err == nil {
+		p.b.Log("Block pool stats: ", bp)
+	}
 	p.db.Close()
 	p.stor.Close()
 	os.RemoveAll(benchDB)
@@ -331,7 +334,7 @@ func BenchmarkDBRead(b *testing.B) {
 	p := openDBBench(b, false)
 	p.populate(b.N)
 	p.fill()
-	p.drop()
+	p.gc()
 
 	iter := p.newIter()
 	b.ResetTimer()
@@ -362,7 +365,7 @@ func BenchmarkDBReadUncompressed(b *testing.B) {
 	p := openDBBench(b, true)
 	p.populate(b.N)
 	p.fill()
-	p.drop()
+	p.gc()
 
 	iter := p.newIter()
 	b.ResetTimer()
@@ -379,7 +382,7 @@ func BenchmarkDBReadTable(b *testing.B) {
 	p.populate(b.N)
 	p.fill()
 	p.reopen()
-	p.drop()
+	p.gc()
 
 	iter := p.newIter()
 	b.ResetTimer()
@@ -395,7 +398,7 @@ func BenchmarkDBReadReverse(b *testing.B) {
 	p := openDBBench(b, false)
 	p.populate(b.N)
 	p.fill()
-	p.drop()
+	p.gc()
 
 	iter := p.newIter()
 	b.ResetTimer()
@@ -413,7 +416,7 @@ func BenchmarkDBReadReverseTable(b *testing.B) {
 	p.populate(b.N)
 	p.fill()
 	p.reopen()
-	p.drop()
+	p.gc()
 
 	iter := p.newIter()
 	b.ResetTimer()
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..175e2220323c8428957d1cad24aa9e8604f8a23e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/bench2_test.go
@@ -0,0 +1,30 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build !go1.2
+
+package cache
+
+import (
+	"math/rand"
+	"testing"
+)
+
+func BenchmarkLRUCache(b *testing.B) {
+	c := NewCache(NewLRU(10000))
+
+	b.SetParallelism(10)
+	b.RunParallel(func(pb *testing.PB) {
+		r := rand.New(rand.NewSource(time.Now().UnixNano()))
+
+		for pb.Next() {
+			key := uint64(r.Intn(1000000))
+			c.Get(0, key, func() (int, Value) {
+				return 1, key
+			}).Release()
+		}
+	})
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go
index 9b6a74977709ed71d86e96660bd3ce6c77c40806..c9670de5de6f70f4ba905df39776f67a42ff32d1 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go
@@ -8,118 +8,669 @@
 package cache
 
 import (
+	"sync"
 	"sync/atomic"
+	"unsafe"
+
+	"github.com/syndtr/goleveldb/leveldb/util"
 )
 
-// SetFunc used by Namespace.Get method to create a cache object. SetFunc
-// may return ok false, in that case the cache object will not be created.
-type SetFunc func() (ok bool, value interface{}, charge int, fin SetFin)
+// Cacher provides interface to implements a caching functionality.
+// An implementation must be goroutine-safe.
+type Cacher interface {
+	// Capacity returns cache capacity.
+	Capacity() int
 
-// SetFin will be called when corresponding cache object are released.
-type SetFin func()
+	// SetCapacity sets cache capacity.
+	SetCapacity(capacity int)
 
-// DelFin will be called when corresponding cache object are released.
-// DelFin will be called after SetFin. The exist is true if the corresponding
-// cache object is actually exist in the cache tree.
-type DelFin func(exist bool)
+	// Promote promotes the 'cache node'.
+	Promote(n *Node)
 
-// PurgeFin will be called when corresponding cache object are released.
-// PurgeFin will be called after SetFin. If PurgeFin present DelFin will
-// not be executed but passed to the PurgeFin, it is up to the caller
-// to call it or not.
-type PurgeFin func(ns, key uint64, delfin DelFin)
+	// Ban evicts the 'cache node' and prevent subsequent 'promote'.
+	Ban(n *Node)
 
-// Cache is a cache tree.
-type Cache interface {
-	// SetCapacity sets cache capacity.
-	SetCapacity(capacity int)
+	// Evict evicts the 'cache node'.
+	Evict(n *Node)
 
-	// GetNamespace gets or creates a cache namespace for the given id.
-	GetNamespace(id uint64) Namespace
+	// EvictNS evicts 'cache node' with the given namespace.
+	EvictNS(ns uint64)
 
-	// Purge purges all cache namespaces, read Namespace.Purge method documentation.
-	Purge(fin PurgeFin)
+	// EvictAll evicts all 'cache node'.
+	EvictAll()
+
+	// Close closes the 'cache tree'
+	Close() error
+}
+
+// Value is a 'cacheable object'. It may implements util.Releaser, if
+// so the the Release method will be called once object is released.
+type Value interface{}
+
+type CacheGetter struct {
+	Cache *Cache
+	NS    uint64
+}
 
-	// Zap zaps all cache namespaces, read Namespace.Zap method documentation.
-	Zap(closed bool)
+func (g *CacheGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle {
+	return g.Cache.Get(g.NS, key, setFunc)
 }
 
-// Namespace is a cache namespace.
-type Namespace interface {
-	// Get gets cache object for the given key. The given SetFunc (if not nil) will
-	// be called if the given key does not exist.
-	// If the given key does not exist, SetFunc is nil or SetFunc return ok false, Get
-	// will return ok false.
-	Get(key uint64, setf SetFunc) (obj Object, ok bool)
+// The hash tables implementation is based on:
+// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu, Kunlong Zhang, and Michael Spear. ACM Symposium on Principles of Distributed Computing, Jul 2014.
 
-	// Get deletes cache object for the given key. If exist the cache object will
-	// be deleted later when all of its handles have been released (i.e. no one use
-	// it anymore) and the given DelFin (if not nil) will finally be  executed. If
-	// such cache object does not exist the given DelFin will be executed anyway.
-	//
-	// Delete returns true if such cache object exist.
-	Delete(key uint64, fin DelFin) bool
+const (
+	mInitialSize           = 1 << 4
+	mOverflowThreshold     = 1 << 5
+	mOverflowGrowThreshold = 1 << 7
+)
 
-	// Purge deletes all cache objects, read Delete method documentation.
-	Purge(fin PurgeFin)
+type mBucket struct {
+	mu     sync.Mutex
+	node   []*Node
+	frozen bool
+}
 
-	// Zap detaches the namespace from the cache tree and delete all its cache
-	// objects. The cache objects deletion and finalizers execution are happen
-	// immediately, even if its existing handles haven't yet been released.
-	// A zapped namespace can't never be filled again.
-	// If closed is false then the Get function will always call the given SetFunc
-	// if it is not nil, but resultant of the SetFunc will not be cached.
-	Zap(closed bool)
+func (b *mBucket) freeze() []*Node {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	if !b.frozen {
+		b.frozen = true
+	}
+	return b.node
 }
 
-// Object is a cache object.
-type Object interface {
-	// Release releases the cache object. Other methods should not be called
-	// after the cache object has been released.
-	Release()
+func (b *mBucket) get(r *Cache, h *mNode, hash uint32, ns, key uint64, noset bool) (done, added bool, n *Node) {
+	b.mu.Lock()
+
+	if b.frozen {
+		b.mu.Unlock()
+		return
+	}
+
+	// Scan the node.
+	for _, n := range b.node {
+		if n.hash == hash && n.ns == ns && n.key == key {
+			atomic.AddInt32(&n.ref, 1)
+			b.mu.Unlock()
+			return true, false, n
+		}
+	}
+
+	// Get only.
+	if noset {
+		b.mu.Unlock()
+		return true, false, nil
+	}
+
+	// Create node.
+	n = &Node{
+		r:    r,
+		hash: hash,
+		ns:   ns,
+		key:  key,
+		ref:  1,
+	}
+	// Add node to bucket.
+	b.node = append(b.node, n)
+	bLen := len(b.node)
+	b.mu.Unlock()
+
+	// Update counter.
+	grow := atomic.AddInt32(&r.nodes, 1) >= h.growThreshold
+	if bLen > mOverflowThreshold {
+		grow = grow || atomic.AddInt32(&h.overflow, 1) >= mOverflowGrowThreshold
+	}
 
-	// Value returns value of the cache object.
-	Value() interface{}
+	// Grow.
+	if grow && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) {
+		nhLen := len(h.buckets) << 1
+		nh := &mNode{
+			buckets:         make([]unsafe.Pointer, nhLen),
+			mask:            uint32(nhLen) - 1,
+			pred:            unsafe.Pointer(h),
+			growThreshold:   int32(nhLen * mOverflowThreshold),
+			shrinkThreshold: int32(nhLen >> 1),
+		}
+		ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh))
+		if !ok {
+			panic("BUG: failed swapping head")
+		}
+		go nh.initBuckets()
+	}
+
+	return true, true, n
 }
 
-// Namespace state.
-type nsState int
+func (b *mBucket) delete(r *Cache, h *mNode, hash uint32, ns, key uint64) (done, deleted bool) {
+	b.mu.Lock()
 
-const (
-	nsEffective nsState = iota
-	nsZapped
-	nsClosed
-)
+	if b.frozen {
+		b.mu.Unlock()
+		return
+	}
 
-// Node state.
-type nodeState int
+	// Scan the node.
+	var (
+		n    *Node
+		bLen int
+	)
+	for i := range b.node {
+		n = b.node[i]
+		if n.ns == ns && n.key == key {
+			if atomic.LoadInt32(&n.ref) == 0 {
+				deleted = true
 
-const (
-	nodeEffective nodeState = iota
-	nodeEvicted
-	nodeRemoved
-)
+				// Call releaser.
+				if n.value != nil {
+					if r, ok := n.value.(util.Releaser); ok {
+						r.Release()
+					}
+					n.value = nil
+				}
+
+				// Remove node from bucket.
+				b.node = append(b.node[:i], b.node[i+1:]...)
+				bLen = len(b.node)
+			}
+			break
+		}
+	}
+	b.mu.Unlock()
 
-// Fake object.
-type fakeObject struct {
-	value interface{}
-	fin   func()
-	once  uint32
+	if deleted {
+		// Call OnDel.
+		for _, f := range n.onDel {
+			f()
+		}
+
+		// Update counter.
+		atomic.AddInt32(&r.size, int32(n.size)*-1)
+		shrink := atomic.AddInt32(&r.nodes, -1) < h.shrinkThreshold
+		if bLen >= mOverflowThreshold {
+			atomic.AddInt32(&h.overflow, -1)
+		}
+
+		// Shrink.
+		if shrink && len(h.buckets) > mInitialSize && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) {
+			nhLen := len(h.buckets) >> 1
+			nh := &mNode{
+				buckets:         make([]unsafe.Pointer, nhLen),
+				mask:            uint32(nhLen) - 1,
+				pred:            unsafe.Pointer(h),
+				growThreshold:   int32(nhLen * mOverflowThreshold),
+				shrinkThreshold: int32(nhLen >> 1),
+			}
+			ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh))
+			if !ok {
+				panic("BUG: failed swapping head")
+			}
+			go nh.initBuckets()
+		}
+	}
+
+	return true, deleted
 }
 
-func (o *fakeObject) Value() interface{} {
-	if atomic.LoadUint32(&o.once) == 0 {
-		return o.value
+type mNode struct {
+	buckets         []unsafe.Pointer // []*mBucket
+	mask            uint32
+	pred            unsafe.Pointer // *mNode
+	resizeInProgess int32
+
+	overflow        int32
+	growThreshold   int32
+	shrinkThreshold int32
+}
+
+func (n *mNode) initBucket(i uint32) *mBucket {
+	if b := (*mBucket)(atomic.LoadPointer(&n.buckets[i])); b != nil {
+		return b
+	}
+
+	p := (*mNode)(atomic.LoadPointer(&n.pred))
+	if p != nil {
+		var node []*Node
+		if n.mask > p.mask {
+			// Grow.
+			pb := (*mBucket)(atomic.LoadPointer(&p.buckets[i&p.mask]))
+			if pb == nil {
+				pb = p.initBucket(i & p.mask)
+			}
+			m := pb.freeze()
+			// Split nodes.
+			for _, x := range m {
+				if x.hash&n.mask == i {
+					node = append(node, x)
+				}
+			}
+		} else {
+			// Shrink.
+			pb0 := (*mBucket)(atomic.LoadPointer(&p.buckets[i]))
+			if pb0 == nil {
+				pb0 = p.initBucket(i)
+			}
+			pb1 := (*mBucket)(atomic.LoadPointer(&p.buckets[i+uint32(len(n.buckets))]))
+			if pb1 == nil {
+				pb1 = p.initBucket(i + uint32(len(n.buckets)))
+			}
+			m0 := pb0.freeze()
+			m1 := pb1.freeze()
+			// Merge nodes.
+			node = make([]*Node, 0, len(m0)+len(m1))
+			node = append(node, m0...)
+			node = append(node, m1...)
+		}
+		b := &mBucket{node: node}
+		if atomic.CompareAndSwapPointer(&n.buckets[i], nil, unsafe.Pointer(b)) {
+			if len(node) > mOverflowThreshold {
+				atomic.AddInt32(&n.overflow, int32(len(node)-mOverflowThreshold))
+			}
+			return b
+		}
+	}
+
+	return (*mBucket)(atomic.LoadPointer(&n.buckets[i]))
+}
+
+func (n *mNode) initBuckets() {
+	for i := range n.buckets {
+		n.initBucket(uint32(i))
+	}
+	atomic.StorePointer(&n.pred, nil)
+}
+
+// Cache is a 'cache map'.
+type Cache struct {
+	mu     sync.RWMutex
+	mHead  unsafe.Pointer // *mNode
+	nodes  int32
+	size   int32
+	cacher Cacher
+	closed bool
+}
+
+// NewCache creates a new 'cache map'. The cacher is optional and
+// may be nil.
+func NewCache(cacher Cacher) *Cache {
+	h := &mNode{
+		buckets:         make([]unsafe.Pointer, mInitialSize),
+		mask:            mInitialSize - 1,
+		growThreshold:   int32(mInitialSize * mOverflowThreshold),
+		shrinkThreshold: 0,
+	}
+	for i := range h.buckets {
+		h.buckets[i] = unsafe.Pointer(&mBucket{})
+	}
+	r := &Cache{
+		mHead:  unsafe.Pointer(h),
+		cacher: cacher,
+	}
+	return r
+}
+
+func (r *Cache) getBucket(hash uint32) (*mNode, *mBucket) {
+	h := (*mNode)(atomic.LoadPointer(&r.mHead))
+	i := hash & h.mask
+	b := (*mBucket)(atomic.LoadPointer(&h.buckets[i]))
+	if b == nil {
+		b = h.initBucket(i)
+	}
+	return h, b
+}
+
+func (r *Cache) delete(n *Node) bool {
+	for {
+		h, b := r.getBucket(n.hash)
+		done, deleted := b.delete(r, h, n.hash, n.ns, n.key)
+		if done {
+			return deleted
+		}
+	}
+	return false
+}
+
+// Nodes returns number of 'cache node' in the map.
+func (r *Cache) Nodes() int {
+	return int(atomic.LoadInt32(&r.nodes))
+}
+
+// Size returns sums of 'cache node' size in the map.
+func (r *Cache) Size() int {
+	return int(atomic.LoadInt32(&r.size))
+}
+
+// Capacity returns cache capacity.
+func (r *Cache) Capacity() int {
+	if r.cacher == nil {
+		return 0
+	}
+	return r.cacher.Capacity()
+}
+
+// SetCapacity sets cache capacity.
+func (r *Cache) SetCapacity(capacity int) {
+	if r.cacher != nil {
+		r.cacher.SetCapacity(capacity)
+	}
+}
+
+// Get gets 'cache node' with the given namespace and key.
+// If cache node is not found and setFunc is not nil, Get will atomically creates
+// the 'cache node' by calling setFunc. Otherwise Get will returns nil.
+//
+// The returned 'cache handle' should be released after use by calling Release
+// method.
+func (r *Cache) Get(ns, key uint64, setFunc func() (size int, value Value)) *Handle {
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+	if r.closed {
+		return nil
+	}
+
+	hash := murmur32(ns, key, 0xf00)
+	for {
+		h, b := r.getBucket(hash)
+		done, _, n := b.get(r, h, hash, ns, key, setFunc == nil)
+		if done {
+			if n != nil {
+				n.mu.Lock()
+				if n.value == nil {
+					if setFunc == nil {
+						n.mu.Unlock()
+						n.unref()
+						return nil
+					}
+
+					n.size, n.value = setFunc()
+					if n.value == nil {
+						n.size = 0
+						n.mu.Unlock()
+						n.unref()
+						return nil
+					}
+					atomic.AddInt32(&r.size, int32(n.size))
+				}
+				n.mu.Unlock()
+				if r.cacher != nil {
+					r.cacher.Promote(n)
+				}
+				return &Handle{unsafe.Pointer(n)}
+			}
+
+			break
+		}
 	}
 	return nil
 }
 
-func (o *fakeObject) Release() {
-	if !atomic.CompareAndSwapUint32(&o.once, 0, 1) {
+// Delete removes and ban 'cache node' with the given namespace and key.
+// A banned 'cache node' will never inserted into the 'cache tree'. Ban
+// only attributed to the particular 'cache node', so when a 'cache node'
+// is recreated it will not be banned.
+//
+// If onDel is not nil, then it will be executed if such 'cache node'
+// doesn't exist or once the 'cache node' is released.
+//
+// Delete return true is such 'cache node' exist.
+func (r *Cache) Delete(ns, key uint64, onDel func()) bool {
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+	if r.closed {
+		return false
+	}
+
+	hash := murmur32(ns, key, 0xf00)
+	for {
+		h, b := r.getBucket(hash)
+		done, _, n := b.get(r, h, hash, ns, key, true)
+		if done {
+			if n != nil {
+				if onDel != nil {
+					n.mu.Lock()
+					n.onDel = append(n.onDel, onDel)
+					n.mu.Unlock()
+				}
+				if r.cacher != nil {
+					r.cacher.Ban(n)
+				}
+				n.unref()
+				return true
+			}
+
+			break
+		}
+	}
+
+	if onDel != nil {
+		onDel()
+	}
+
+	return false
+}
+
+// Evict evicts 'cache node' with the given namespace and key. This will
+// simply call Cacher.Evict.
+//
+// Evict return true is such 'cache node' exist.
+func (r *Cache) Evict(ns, key uint64) bool {
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+	if r.closed {
+		return false
+	}
+
+	hash := murmur32(ns, key, 0xf00)
+	for {
+		h, b := r.getBucket(hash)
+		done, _, n := b.get(r, h, hash, ns, key, true)
+		if done {
+			if n != nil {
+				if r.cacher != nil {
+					r.cacher.Evict(n)
+				}
+				n.unref()
+				return true
+			}
+
+			break
+		}
+	}
+
+	return false
+}
+
+// EvictNS evicts 'cache node' with the given namespace. This will
+// simply call Cacher.EvictNS.
+func (r *Cache) EvictNS(ns uint64) {
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+	if r.closed {
+		return
+	}
+
+	if r.cacher != nil {
+		r.cacher.EvictNS(ns)
+	}
+}
+
+// EvictAll evicts all 'cache node'. This will simply call Cacher.EvictAll.
+func (r *Cache) EvictAll() {
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+	if r.closed {
 		return
 	}
-	if o.fin != nil {
-		o.fin()
-		o.fin = nil
+
+	if r.cacher != nil {
+		r.cacher.EvictAll()
+	}
+}
+
+// Close closes the 'cache map' and releases all 'cache node'.
+func (r *Cache) Close() error {
+	r.mu.Lock()
+	if !r.closed {
+		r.closed = true
+
+		if r.cacher != nil {
+			if err := r.cacher.Close(); err != nil {
+				return err
+			}
+		}
+
+		h := (*mNode)(r.mHead)
+		h.initBuckets()
+
+		for i := range h.buckets {
+			b := (*mBucket)(h.buckets[i])
+			for _, n := range b.node {
+				// Call releaser.
+				if n.value != nil {
+					if r, ok := n.value.(util.Releaser); ok {
+						r.Release()
+					}
+					n.value = nil
+				}
+
+				// Call OnDel.
+				for _, f := range n.onDel {
+					f()
+				}
+			}
+		}
 	}
+	r.mu.Unlock()
+	return nil
+}
+
+// Node is a 'cache node'.
+type Node struct {
+	r *Cache
+
+	hash    uint32
+	ns, key uint64
+
+	mu    sync.Mutex
+	size  int
+	value Value
+
+	ref   int32
+	onDel []func()
+
+	CacheData unsafe.Pointer
+}
+
+// NS returns this 'cache node' namespace.
+func (n *Node) NS() uint64 {
+	return n.ns
+}
+
+// Key returns this 'cache node' key.
+func (n *Node) Key() uint64 {
+	return n.key
+}
+
+// Size returns this 'cache node' size.
+func (n *Node) Size() int {
+	return n.size
+}
+
+// Value returns this 'cache node' value.
+func (n *Node) Value() Value {
+	return n.value
+}
+
+// Ref returns this 'cache node' ref counter.
+func (n *Node) Ref() int32 {
+	return atomic.LoadInt32(&n.ref)
+}
+
+// GetHandle returns an handle for this 'cache node'.
+func (n *Node) GetHandle() *Handle {
+	if atomic.AddInt32(&n.ref, 1) <= 1 {
+		panic("BUG: Node.GetHandle on zero ref")
+	}
+	return &Handle{unsafe.Pointer(n)}
+}
+
+func (n *Node) unref() {
+	if atomic.AddInt32(&n.ref, -1) == 0 {
+		n.r.delete(n)
+	}
+}
+
+func (n *Node) unrefLocked() {
+	if atomic.AddInt32(&n.ref, -1) == 0 {
+		n.r.mu.RLock()
+		if !n.r.closed {
+			n.r.delete(n)
+		}
+		n.r.mu.RUnlock()
+	}
+}
+
+type Handle struct {
+	n unsafe.Pointer // *Node
+}
+
+func (h *Handle) Value() Value {
+	n := (*Node)(atomic.LoadPointer(&h.n))
+	if n != nil {
+		return n.value
+	}
+	return nil
+}
+
+func (h *Handle) Release() {
+	nPtr := atomic.LoadPointer(&h.n)
+	if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) {
+		n := (*Node)(nPtr)
+		n.unrefLocked()
+	}
+}
+
+func murmur32(ns, key uint64, seed uint32) uint32 {
+	const (
+		m = uint32(0x5bd1e995)
+		r = 24
+	)
+
+	k1 := uint32(ns >> 32)
+	k2 := uint32(ns)
+	k3 := uint32(key >> 32)
+	k4 := uint32(key)
+
+	k1 *= m
+	k1 ^= k1 >> r
+	k1 *= m
+
+	k2 *= m
+	k2 ^= k2 >> r
+	k2 *= m
+
+	k3 *= m
+	k3 ^= k3 >> r
+	k3 *= m
+
+	k4 *= m
+	k4 ^= k4 >> r
+	k4 *= m
+
+	h := seed
+
+	h *= m
+	h ^= k1
+	h *= m
+	h ^= k2
+	h *= m
+	h ^= k3
+	h *= m
+	h ^= k4
+
+	h ^= h >> 13
+	h *= m
+	h ^= h >> 15
+
+	return h
 }
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go
index 07a9939b24802a26ddfc2abd12cc55aee319e6ea..c2a50156f0472c99d3b60022781435fc18cf8947 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go
@@ -8,17 +8,289 @@ package cache
 
 import (
 	"math/rand"
+	"runtime"
+	"sync"
+	"sync/atomic"
 	"testing"
+	"time"
+	"unsafe"
 )
 
-func set(ns Namespace, key uint64, value interface{}, charge int, fin func()) Object {
-	obj, _ := ns.Get(key, func() (bool, interface{}, int, SetFin) {
-		return true, value, charge, fin
+type int32o int32
+
+func (o *int32o) acquire() {
+	if atomic.AddInt32((*int32)(o), 1) != 1 {
+		panic("BUG: invalid ref")
+	}
+}
+
+func (o *int32o) Release() {
+	if atomic.AddInt32((*int32)(o), -1) != 0 {
+		panic("BUG: invalid ref")
+	}
+}
+
+type releaserFunc struct {
+	fn    func()
+	value Value
+}
+
+func (r releaserFunc) Release() {
+	if r.fn != nil {
+		r.fn()
+	}
+}
+
+func set(c *Cache, ns, key uint64, value Value, charge int, relf func()) *Handle {
+	return c.Get(ns, key, func() (int, Value) {
+		if relf != nil {
+			return charge, releaserFunc{relf, value}
+		} else {
+			return charge, value
+		}
+	})
+}
+
+func TestCacheMap(t *testing.T) {
+	runtime.GOMAXPROCS(runtime.NumCPU())
+
+	nsx := []struct {
+		nobjects, nhandles, concurrent, repeat int
+	}{
+		{10000, 400, 50, 3},
+		{100000, 1000, 100, 10},
+	}
+
+	var (
+		objects [][]int32o
+		handles [][]unsafe.Pointer
+	)
+
+	for _, x := range nsx {
+		objects = append(objects, make([]int32o, x.nobjects))
+		handles = append(handles, make([]unsafe.Pointer, x.nhandles))
+	}
+
+	c := NewCache(nil)
+
+	wg := new(sync.WaitGroup)
+	var done int32
+
+	for ns, x := range nsx {
+		for i := 0; i < x.concurrent; i++ {
+			wg.Add(1)
+			go func(ns, i, repeat int, objects []int32o, handles []unsafe.Pointer) {
+				defer wg.Done()
+				r := rand.New(rand.NewSource(time.Now().UnixNano()))
+
+				for j := len(objects) * repeat; j >= 0; j-- {
+					key := uint64(r.Intn(len(objects)))
+					h := c.Get(uint64(ns), key, func() (int, Value) {
+						o := &objects[key]
+						o.acquire()
+						return 1, o
+					})
+					if v := h.Value().(*int32o); v != &objects[key] {
+						t.Fatalf("#%d invalid value: want=%p got=%p", ns, &objects[key], v)
+					}
+					if objects[key] != 1 {
+						t.Fatalf("#%d invalid object %d: %d", ns, key, objects[key])
+					}
+					if !atomic.CompareAndSwapPointer(&handles[r.Intn(len(handles))], nil, unsafe.Pointer(h)) {
+						h.Release()
+					}
+				}
+			}(ns, i, x.repeat, objects[ns], handles[ns])
+		}
+
+		go func(handles []unsafe.Pointer) {
+			r := rand.New(rand.NewSource(time.Now().UnixNano()))
+
+			for atomic.LoadInt32(&done) == 0 {
+				i := r.Intn(len(handles))
+				h := (*Handle)(atomic.LoadPointer(&handles[i]))
+				if h != nil && atomic.CompareAndSwapPointer(&handles[i], unsafe.Pointer(h), nil) {
+					h.Release()
+				}
+				time.Sleep(time.Millisecond)
+			}
+		}(handles[ns])
+	}
+
+	go func() {
+		handles := make([]*Handle, 100000)
+		for atomic.LoadInt32(&done) == 0 {
+			for i := range handles {
+				handles[i] = c.Get(999999999, uint64(i), func() (int, Value) {
+					return 1, 1
+				})
+			}
+			for _, h := range handles {
+				h.Release()
+			}
+		}
+	}()
+
+	wg.Wait()
+
+	atomic.StoreInt32(&done, 1)
+
+	for _, handles0 := range handles {
+		for i := range handles0 {
+			h := (*Handle)(atomic.LoadPointer(&handles0[i]))
+			if h != nil && atomic.CompareAndSwapPointer(&handles0[i], unsafe.Pointer(h), nil) {
+				h.Release()
+			}
+		}
+	}
+
+	for ns, objects0 := range objects {
+		for i, o := range objects0 {
+			if o != 0 {
+				t.Fatalf("invalid object #%d.%d: ref=%d", ns, i, o)
+			}
+		}
+	}
+}
+
+func TestCacheMap_NodesAndSize(t *testing.T) {
+	c := NewCache(nil)
+	if c.Nodes() != 0 {
+		t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes())
+	}
+	if c.Size() != 0 {
+		t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size())
+	}
+	set(c, 0, 1, 1, 1, nil)
+	set(c, 0, 2, 2, 2, nil)
+	set(c, 1, 1, 3, 3, nil)
+	set(c, 2, 1, 4, 1, nil)
+	if c.Nodes() != 4 {
+		t.Errorf("invalid nodes counter: want=%d got=%d", 4, c.Nodes())
+	}
+	if c.Size() != 7 {
+		t.Errorf("invalid size counter: want=%d got=%d", 4, c.Size())
+	}
+}
+
+func TestLRUCache_Capacity(t *testing.T) {
+	c := NewCache(NewLRU(10))
+	if c.Capacity() != 10 {
+		t.Errorf("invalid capacity: want=%d got=%d", 10, c.Capacity())
+	}
+	set(c, 0, 1, 1, 1, nil).Release()
+	set(c, 0, 2, 2, 2, nil).Release()
+	set(c, 1, 1, 3, 3, nil).Release()
+	set(c, 2, 1, 4, 1, nil).Release()
+	set(c, 2, 2, 5, 1, nil).Release()
+	set(c, 2, 3, 6, 1, nil).Release()
+	set(c, 2, 4, 7, 1, nil).Release()
+	set(c, 2, 5, 8, 1, nil).Release()
+	if c.Nodes() != 7 {
+		t.Errorf("invalid nodes counter: want=%d got=%d", 7, c.Nodes())
+	}
+	if c.Size() != 10 {
+		t.Errorf("invalid size counter: want=%d got=%d", 10, c.Size())
+	}
+	c.SetCapacity(9)
+	if c.Capacity() != 9 {
+		t.Errorf("invalid capacity: want=%d got=%d", 9, c.Capacity())
+	}
+	if c.Nodes() != 6 {
+		t.Errorf("invalid nodes counter: want=%d got=%d", 6, c.Nodes())
+	}
+	if c.Size() != 8 {
+		t.Errorf("invalid size counter: want=%d got=%d", 8, c.Size())
+	}
+}
+
+func TestCacheMap_NilValue(t *testing.T) {
+	c := NewCache(NewLRU(10))
+	h := c.Get(0, 0, func() (size int, value Value) {
+		return 1, nil
 	})
-	return obj
+	if h != nil {
+		t.Error("cache handle is non-nil")
+	}
+	if c.Nodes() != 0 {
+		t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes())
+	}
+	if c.Size() != 0 {
+		t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size())
+	}
 }
 
-func TestCache_HitMiss(t *testing.T) {
+func TestLRUCache_GetLatency(t *testing.T) {
+	runtime.GOMAXPROCS(runtime.NumCPU())
+
+	const (
+		concurrentSet = 30
+		concurrentGet = 3
+		duration      = 3 * time.Second
+		delay         = 3 * time.Millisecond
+		maxkey        = 100000
+	)
+
+	var (
+		set, getHit, getAll        int32
+		getMaxLatency, getDuration int64
+	)
+
+	c := NewCache(NewLRU(5000))
+	wg := &sync.WaitGroup{}
+	until := time.Now().Add(duration)
+	for i := 0; i < concurrentSet; i++ {
+		wg.Add(1)
+		go func(i int) {
+			defer wg.Done()
+			r := rand.New(rand.NewSource(time.Now().UnixNano()))
+			for time.Now().Before(until) {
+				c.Get(0, uint64(r.Intn(maxkey)), func() (int, Value) {
+					time.Sleep(delay)
+					atomic.AddInt32(&set, 1)
+					return 1, 1
+				}).Release()
+			}
+		}(i)
+	}
+	for i := 0; i < concurrentGet; i++ {
+		wg.Add(1)
+		go func(i int) {
+			defer wg.Done()
+			r := rand.New(rand.NewSource(time.Now().UnixNano()))
+			for {
+				mark := time.Now()
+				if mark.Before(until) {
+					h := c.Get(0, uint64(r.Intn(maxkey)), nil)
+					latency := int64(time.Now().Sub(mark))
+					m := atomic.LoadInt64(&getMaxLatency)
+					if latency > m {
+						atomic.CompareAndSwapInt64(&getMaxLatency, m, latency)
+					}
+					atomic.AddInt64(&getDuration, latency)
+					if h != nil {
+						atomic.AddInt32(&getHit, 1)
+						h.Release()
+					}
+					atomic.AddInt32(&getAll, 1)
+				} else {
+					break
+				}
+			}
+		}(i)
+	}
+
+	wg.Wait()
+	getAvglatency := time.Duration(getDuration) / time.Duration(getAll)
+	t.Logf("set=%d getHit=%d getAll=%d getMaxLatency=%v getAvgLatency=%v",
+		set, getHit, getAll, time.Duration(getMaxLatency), getAvglatency)
+
+	if getAvglatency > delay/3 {
+		t.Errorf("get avg latency > %v: got=%v", delay/3, getAvglatency)
+	}
+}
+
+func TestLRUCache_HitMiss(t *testing.T) {
 	cases := []struct {
 		key   uint64
 		value string
@@ -36,36 +308,37 @@ func TestCache_HitMiss(t *testing.T) {
 	}
 
 	setfin := 0
-	c := NewLRUCache(1000)
-	ns := c.GetNamespace(0)
+	c := NewCache(NewLRU(1000))
 	for i, x := range cases {
-		set(ns, x.key, x.value, len(x.value), func() {
+		set(c, 0, x.key, x.value, len(x.value), func() {
 			setfin++
 		}).Release()
 		for j, y := range cases {
-			r, ok := ns.Get(y.key, nil)
+			h := c.Get(0, y.key, nil)
 			if j <= i {
 				// should hit
-				if !ok {
+				if h == nil {
 					t.Errorf("case '%d' iteration '%d' is miss", i, j)
-				} else if r.Value().(string) != y.value {
-					t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, r.Value().(string), y.value)
+				} else {
+					if x := h.Value().(releaserFunc).value.(string); x != y.value {
+						t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value)
+					}
 				}
 			} else {
 				// should miss
-				if ok {
-					t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, r.Value().(string))
+				if h != nil {
+					t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, h.Value().(releaserFunc).value.(string))
 				}
 			}
-			if ok {
-				r.Release()
+			if h != nil {
+				h.Release()
 			}
 		}
 	}
 
 	for i, x := range cases {
 		finalizerOk := false
-		ns.Delete(x.key, func(exist bool) {
+		c.Delete(0, x.key, func() {
 			finalizerOk = true
 		})
 
@@ -74,22 +347,24 @@ func TestCache_HitMiss(t *testing.T) {
 		}
 
 		for j, y := range cases {
-			r, ok := ns.Get(y.key, nil)
+			h := c.Get(0, y.key, nil)
 			if j > i {
 				// should hit
-				if !ok {
+				if h == nil {
 					t.Errorf("case '%d' iteration '%d' is miss", i, j)
-				} else if r.Value().(string) != y.value {
-					t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, r.Value().(string), y.value)
+				} else {
+					if x := h.Value().(releaserFunc).value.(string); x != y.value {
+						t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value)
+					}
 				}
 			} else {
 				// should miss
-				if ok {
-					t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, r.Value().(string))
+				if h != nil {
+					t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, h.Value().(releaserFunc).value.(string))
 				}
 			}
-			if ok {
-				r.Release()
+			if h != nil {
+				h.Release()
 			}
 		}
 	}
@@ -100,137 +375,180 @@ func TestCache_HitMiss(t *testing.T) {
 }
 
 func TestLRUCache_Eviction(t *testing.T) {
-	c := NewLRUCache(12)
-	ns := c.GetNamespace(0)
-	o1 := set(ns, 1, 1, 1, nil)
-	set(ns, 2, 2, 1, nil).Release()
-	set(ns, 3, 3, 1, nil).Release()
-	set(ns, 4, 4, 1, nil).Release()
-	set(ns, 5, 5, 1, nil).Release()
-	if r, ok := ns.Get(2, nil); ok { // 1,3,4,5,2
-		r.Release()
-	}
-	set(ns, 9, 9, 10, nil).Release() // 5,2,9
-
-	for _, x := range []uint64{9, 2, 5, 1} {
-		r, ok := ns.Get(x, nil)
-		if !ok {
-			t.Errorf("miss for key '%d'", x)
+	c := NewCache(NewLRU(12))
+	o1 := set(c, 0, 1, 1, 1, nil)
+	set(c, 0, 2, 2, 1, nil).Release()
+	set(c, 0, 3, 3, 1, nil).Release()
+	set(c, 0, 4, 4, 1, nil).Release()
+	set(c, 0, 5, 5, 1, nil).Release()
+	if h := c.Get(0, 2, nil); h != nil { // 1,3,4,5,2
+		h.Release()
+	}
+	set(c, 0, 9, 9, 10, nil).Release() // 5,2,9
+
+	for _, key := range []uint64{9, 2, 5, 1} {
+		h := c.Get(0, key, nil)
+		if h == nil {
+			t.Errorf("miss for key '%d'", key)
 		} else {
-			if r.Value().(int) != int(x) {
-				t.Errorf("invalid value for key '%d' want '%d', got '%d'", x, x, r.Value().(int))
+			if x := h.Value().(int); x != int(key) {
+				t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
 			}
-			r.Release()
+			h.Release()
 		}
 	}
 	o1.Release()
-	for _, x := range []uint64{1, 2, 5} {
-		r, ok := ns.Get(x, nil)
-		if !ok {
-			t.Errorf("miss for key '%d'", x)
+	for _, key := range []uint64{1, 2, 5} {
+		h := c.Get(0, key, nil)
+		if h == nil {
+			t.Errorf("miss for key '%d'", key)
 		} else {
-			if r.Value().(int) != int(x) {
-				t.Errorf("invalid value for key '%d' want '%d', got '%d'", x, x, r.Value().(int))
+			if x := h.Value().(int); x != int(key) {
+				t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
 			}
-			r.Release()
+			h.Release()
 		}
 	}
-	for _, x := range []uint64{3, 4, 9} {
-		r, ok := ns.Get(x, nil)
-		if ok {
-			t.Errorf("hit for key '%d'", x)
-			if r.Value().(int) != int(x) {
-				t.Errorf("invalid value for key '%d' want '%d', got '%d'", x, x, r.Value().(int))
+	for _, key := range []uint64{3, 4, 9} {
+		h := c.Get(0, key, nil)
+		if h != nil {
+			t.Errorf("hit for key '%d'", key)
+			if x := h.Value().(int); x != int(key) {
+				t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
 			}
-			r.Release()
+			h.Release()
 		}
 	}
 }
 
-func TestLRUCache_SetGet(t *testing.T) {
-	c := NewLRUCache(13)
-	ns := c.GetNamespace(0)
-	for i := 0; i < 200; i++ {
-		n := uint64(rand.Intn(99999) % 20)
-		set(ns, n, n, 1, nil).Release()
-		if p, ok := ns.Get(n, nil); ok {
-			if p.Value() == nil {
-				t.Errorf("key '%d' contains nil value", n)
+func TestLRUCache_Evict(t *testing.T) {
+	c := NewCache(NewLRU(6))
+	set(c, 0, 1, 1, 1, nil).Release()
+	set(c, 0, 2, 2, 1, nil).Release()
+	set(c, 1, 1, 4, 1, nil).Release()
+	set(c, 1, 2, 5, 1, nil).Release()
+	set(c, 2, 1, 6, 1, nil).Release()
+	set(c, 2, 2, 7, 1, nil).Release()
+
+	for ns := 0; ns < 3; ns++ {
+		for key := 1; key < 3; key++ {
+			if h := c.Get(uint64(ns), uint64(key), nil); h != nil {
+				h.Release()
 			} else {
-				got := p.Value().(uint64)
-				if got != n {
-					t.Errorf("invalid value for key '%d' want '%d', got '%d'", n, n, got)
-				}
+				t.Errorf("Cache.Get on #%d.%d return nil", ns, key)
 			}
-			p.Release()
-		} else {
-			t.Errorf("key '%d' doesn't exist", n)
 		}
 	}
-}
 
-func TestLRUCache_Purge(t *testing.T) {
-	c := NewLRUCache(3)
-	ns1 := c.GetNamespace(0)
-	o1 := set(ns1, 1, 1, 1, nil)
-	o2 := set(ns1, 2, 2, 1, nil)
-	ns1.Purge(nil)
-	set(ns1, 3, 3, 1, nil).Release()
-	for _, x := range []uint64{1, 2, 3} {
-		r, ok := ns1.Get(x, nil)
-		if !ok {
-			t.Errorf("miss for key '%d'", x)
-		} else {
-			if r.Value().(int) != int(x) {
-				t.Errorf("invalid value for key '%d' want '%d', got '%d'", x, x, r.Value().(int))
+	if ok := c.Evict(0, 1); !ok {
+		t.Error("first Cache.Evict on #0.1 return false")
+	}
+	if ok := c.Evict(0, 1); ok {
+		t.Error("second Cache.Evict on #0.1 return true")
+	}
+	if h := c.Get(0, 1, nil); h != nil {
+		t.Errorf("Cache.Get on #0.1 return non-nil: %v", h.Value())
+	}
+
+	c.EvictNS(1)
+	if h := c.Get(1, 1, nil); h != nil {
+		t.Errorf("Cache.Get on #1.1 return non-nil: %v", h.Value())
+	}
+	if h := c.Get(1, 2, nil); h != nil {
+		t.Errorf("Cache.Get on #1.2 return non-nil: %v", h.Value())
+	}
+
+	c.EvictAll()
+	for ns := 0; ns < 3; ns++ {
+		for key := 1; key < 3; key++ {
+			if h := c.Get(uint64(ns), uint64(key), nil); h != nil {
+				t.Errorf("Cache.Get on #%d.%d return non-nil: %v", ns, key, h.Value())
 			}
-			r.Release()
 		}
 	}
-	o1.Release()
-	o2.Release()
-	for _, x := range []uint64{1, 2} {
-		r, ok := ns1.Get(x, nil)
-		if ok {
-			t.Errorf("hit for key '%d'", x)
-			if r.Value().(int) != int(x) {
-				t.Errorf("invalid value for key '%d' want '%d', got '%d'", x, x, r.Value().(int))
-			}
-			r.Release()
+}
+
+func TestLRUCache_Delete(t *testing.T) {
+	delFuncCalled := 0
+	delFunc := func() {
+		delFuncCalled++
+	}
+
+	c := NewCache(NewLRU(2))
+	set(c, 0, 1, 1, 1, nil).Release()
+	set(c, 0, 2, 2, 1, nil).Release()
+
+	if ok := c.Delete(0, 1, delFunc); !ok {
+		t.Error("Cache.Delete on #1 return false")
+	}
+	if h := c.Get(0, 1, nil); h != nil {
+		t.Errorf("Cache.Get on #1 return non-nil: %v", h.Value())
+	}
+	if ok := c.Delete(0, 1, delFunc); ok {
+		t.Error("Cache.Delete on #1 return true")
+	}
+
+	h2 := c.Get(0, 2, nil)
+	if h2 == nil {
+		t.Error("Cache.Get on #2 return nil")
+	}
+	if ok := c.Delete(0, 2, delFunc); !ok {
+		t.Error("(1) Cache.Delete on #2 return false")
+	}
+	if ok := c.Delete(0, 2, delFunc); !ok {
+		t.Error("(2) Cache.Delete on #2 return false")
+	}
+
+	set(c, 0, 3, 3, 1, nil).Release()
+	set(c, 0, 4, 4, 1, nil).Release()
+	c.Get(0, 2, nil).Release()
+
+	for key := 2; key <= 4; key++ {
+		if h := c.Get(0, uint64(key), nil); h != nil {
+			h.Release()
+		} else {
+			t.Errorf("Cache.Get on #%d return nil", key)
 		}
 	}
-}
 
-func BenchmarkLRUCache_SetRelease(b *testing.B) {
-	capacity := b.N / 100
-	if capacity <= 0 {
-		capacity = 10
+	h2.Release()
+	if h := c.Get(0, 2, nil); h != nil {
+		t.Errorf("Cache.Get on #2 return non-nil: %v", h.Value())
 	}
-	c := NewLRUCache(capacity)
-	ns := c.GetNamespace(0)
-	b.ResetTimer()
-	for i := uint64(0); i < uint64(b.N); i++ {
-		set(ns, i, nil, 1, nil).Release()
+
+	if delFuncCalled != 4 {
+		t.Errorf("delFunc isn't called 4 times: got=%d", delFuncCalled)
 	}
 }
 
-func BenchmarkLRUCache_SetReleaseTwice(b *testing.B) {
-	capacity := b.N / 100
-	if capacity <= 0 {
-		capacity = 10
+func TestLRUCache_Close(t *testing.T) {
+	relFuncCalled := 0
+	relFunc := func() {
+		relFuncCalled++
+	}
+	delFuncCalled := 0
+	delFunc := func() {
+		delFuncCalled++
 	}
-	c := NewLRUCache(capacity)
-	ns := c.GetNamespace(0)
-	b.ResetTimer()
 
-	na := b.N / 2
-	nb := b.N - na
+	c := NewCache(NewLRU(2))
+	set(c, 0, 1, 1, 1, relFunc).Release()
+	set(c, 0, 2, 2, 1, relFunc).Release()
 
-	for i := uint64(0); i < uint64(na); i++ {
-		set(ns, i, nil, 1, nil).Release()
+	h3 := set(c, 0, 3, 3, 1, relFunc)
+	if h3 == nil {
+		t.Error("Cache.Get on #3 return nil")
 	}
+	if ok := c.Delete(0, 3, delFunc); !ok {
+		t.Error("Cache.Delete on #3 return false")
+	}
+
+	c.Close()
 
-	for i := uint64(0); i < uint64(nb); i++ {
-		set(ns, i, nil, 1, nil).Release()
+	if relFuncCalled != 3 {
+		t.Errorf("relFunc isn't called 3 times: got=%d", relFuncCalled)
+	}
+	if delFuncCalled != 1 {
+		t.Errorf("delFunc isn't called 1 times: got=%d", delFuncCalled)
 	}
 }
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/empty_cache.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/empty_cache.go
deleted file mode 100644
index 1fbf814595ca3ee089e7514f0dc5aa6c726882e5..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/empty_cache.go
+++ /dev/null
@@ -1,246 +0,0 @@
-// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package cache
-
-import (
-	"sync"
-	"sync/atomic"
-)
-
-type emptyCache struct {
-	sync.Mutex
-	table map[uint64]*emptyNS
-}
-
-// NewEmptyCache creates a new initialized empty cache.
-func NewEmptyCache() Cache {
-	return &emptyCache{
-		table: make(map[uint64]*emptyNS),
-	}
-}
-
-func (c *emptyCache) GetNamespace(id uint64) Namespace {
-	c.Lock()
-	defer c.Unlock()
-
-	if ns, ok := c.table[id]; ok {
-		return ns
-	}
-
-	ns := &emptyNS{
-		cache: c,
-		id:    id,
-		table: make(map[uint64]*emptyNode),
-	}
-	c.table[id] = ns
-	return ns
-}
-
-func (c *emptyCache) Purge(fin PurgeFin) {
-	c.Lock()
-	for _, ns := range c.table {
-		ns.purgeNB(fin)
-	}
-	c.Unlock()
-}
-
-func (c *emptyCache) Zap(closed bool) {
-	c.Lock()
-	for _, ns := range c.table {
-		ns.zapNB(closed)
-	}
-	c.table = make(map[uint64]*emptyNS)
-	c.Unlock()
-}
-
-func (*emptyCache) SetCapacity(capacity int) {}
-
-type emptyNS struct {
-	cache *emptyCache
-	id    uint64
-	table map[uint64]*emptyNode
-	state nsState
-}
-
-func (ns *emptyNS) Get(key uint64, setf SetFunc) (o Object, ok bool) {
-	ns.cache.Lock()
-
-	switch ns.state {
-	case nsZapped:
-		ns.cache.Unlock()
-		if setf == nil {
-			return
-		}
-
-		var value interface{}
-		var fin func()
-		ok, value, _, fin = setf()
-		if ok {
-			o = &fakeObject{
-				value: value,
-				fin:   fin,
-			}
-		}
-		return
-	case nsClosed:
-		ns.cache.Unlock()
-		return
-	}
-
-	n, ok := ns.table[key]
-	if ok {
-		n.ref++
-	} else {
-		if setf == nil {
-			ns.cache.Unlock()
-			return
-		}
-
-		var value interface{}
-		var fin func()
-		ok, value, _, fin = setf()
-		if !ok {
-			ns.cache.Unlock()
-			return
-		}
-
-		n = &emptyNode{
-			ns:     ns,
-			key:    key,
-			value:  value,
-			setfin: fin,
-			ref:    1,
-		}
-		ns.table[key] = n
-	}
-
-	ns.cache.Unlock()
-	o = &emptyObject{node: n}
-	return
-}
-
-func (ns *emptyNS) Delete(key uint64, fin DelFin) bool {
-	ns.cache.Lock()
-
-	if ns.state != nsEffective {
-		ns.cache.Unlock()
-		if fin != nil {
-			fin(false)
-		}
-		return false
-	}
-
-	n, ok := ns.table[key]
-	if !ok {
-		ns.cache.Unlock()
-		if fin != nil {
-			fin(false)
-		}
-		return false
-	}
-	n.delfin = fin
-	ns.cache.Unlock()
-	return true
-}
-
-func (ns *emptyNS) purgeNB(fin PurgeFin) {
-	if ns.state != nsEffective {
-		return
-	}
-	for _, n := range ns.table {
-		n.purgefin = fin
-	}
-}
-
-func (ns *emptyNS) Purge(fin PurgeFin) {
-	ns.cache.Lock()
-	ns.purgeNB(fin)
-	ns.cache.Unlock()
-}
-
-func (ns *emptyNS) zapNB(closed bool) {
-	if ns.state != nsEffective {
-		return
-	}
-	for _, n := range ns.table {
-		n.execFin()
-	}
-	if closed {
-		ns.state = nsClosed
-	} else {
-		ns.state = nsZapped
-	}
-	ns.table = nil
-}
-
-func (ns *emptyNS) Zap(closed bool) {
-	ns.cache.Lock()
-	ns.zapNB(closed)
-	delete(ns.cache.table, ns.id)
-	ns.cache.Unlock()
-}
-
-type emptyNode struct {
-	ns       *emptyNS
-	key      uint64
-	value    interface{}
-	ref      int
-	setfin   SetFin
-	delfin   DelFin
-	purgefin PurgeFin
-}
-
-func (n *emptyNode) execFin() {
-	if n.setfin != nil {
-		n.setfin()
-		n.setfin = nil
-	}
-	if n.purgefin != nil {
-		n.purgefin(n.ns.id, n.key, n.delfin)
-		n.delfin = nil
-		n.purgefin = nil
-	} else if n.delfin != nil {
-		n.delfin(true)
-		n.delfin = nil
-	}
-}
-
-func (n *emptyNode) evict() {
-	n.ns.cache.Lock()
-	n.ref--
-	if n.ref == 0 {
-		if n.ns.state == nsEffective {
-			// Remove elem.
-			delete(n.ns.table, n.key)
-			// Execute finalizer.
-			n.execFin()
-		}
-	} else if n.ref < 0 {
-		panic("leveldb/cache: emptyNode: negative node reference")
-	}
-	n.ns.cache.Unlock()
-}
-
-type emptyObject struct {
-	node *emptyNode
-	once uint32
-}
-
-func (o *emptyObject) Value() interface{} {
-	if atomic.LoadUint32(&o.once) == 0 {
-		return o.node.value
-	}
-	return nil
-}
-
-func (o *emptyObject) Release() {
-	if !atomic.CompareAndSwapUint32(&o.once, 0, 1) {
-		return
-	}
-	o.node.evict()
-	o.node = nil
-}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go
new file mode 100644
index 0000000000000000000000000000000000000000..d9a84cde15e8a0c892dc419ace7679616943d54c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go
@@ -0,0 +1,195 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package cache
+
+import (
+	"sync"
+	"unsafe"
+)
+
+type lruNode struct {
+	n   *Node
+	h   *Handle
+	ban bool
+
+	next, prev *lruNode
+}
+
+func (n *lruNode) insert(at *lruNode) {
+	x := at.next
+	at.next = n
+	n.prev = at
+	n.next = x
+	x.prev = n
+}
+
+func (n *lruNode) remove() {
+	if n.prev != nil {
+		n.prev.next = n.next
+		n.next.prev = n.prev
+		n.prev = nil
+		n.next = nil
+	} else {
+		panic("BUG: removing removed node")
+	}
+}
+
+type lru struct {
+	mu       sync.Mutex
+	capacity int
+	used     int
+	recent   lruNode
+}
+
+func (r *lru) reset() {
+	r.recent.next = &r.recent
+	r.recent.prev = &r.recent
+	r.used = 0
+}
+
+func (r *lru) Capacity() int {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	return r.capacity
+}
+
+func (r *lru) SetCapacity(capacity int) {
+	var evicted []*lruNode
+
+	r.mu.Lock()
+	r.capacity = capacity
+	for r.used > r.capacity {
+		rn := r.recent.prev
+		if rn == nil {
+			panic("BUG: invalid LRU used or capacity counter")
+		}
+		rn.remove()
+		rn.n.CacheData = nil
+		r.used -= rn.n.Size()
+		evicted = append(evicted, rn)
+	}
+	r.mu.Unlock()
+
+	for _, rn := range evicted {
+		rn.h.Release()
+	}
+}
+
+func (r *lru) Promote(n *Node) {
+	var evicted []*lruNode
+
+	r.mu.Lock()
+	if n.CacheData == nil {
+		if n.Size() <= r.capacity {
+			rn := &lruNode{n: n, h: n.GetHandle()}
+			rn.insert(&r.recent)
+			n.CacheData = unsafe.Pointer(rn)
+			r.used += n.Size()
+
+			for r.used > r.capacity {
+				rn := r.recent.prev
+				if rn == nil {
+					panic("BUG: invalid LRU used or capacity counter")
+				}
+				rn.remove()
+				rn.n.CacheData = nil
+				r.used -= rn.n.Size()
+				evicted = append(evicted, rn)
+			}
+		}
+	} else {
+		rn := (*lruNode)(n.CacheData)
+		if !rn.ban {
+			rn.remove()
+			rn.insert(&r.recent)
+		}
+	}
+	r.mu.Unlock()
+
+	for _, rn := range evicted {
+		rn.h.Release()
+	}
+}
+
+func (r *lru) Ban(n *Node) {
+	r.mu.Lock()
+	if n.CacheData == nil {
+		n.CacheData = unsafe.Pointer(&lruNode{n: n, ban: true})
+	} else {
+		rn := (*lruNode)(n.CacheData)
+		if !rn.ban {
+			rn.remove()
+			rn.ban = true
+			r.used -= rn.n.Size()
+			r.mu.Unlock()
+
+			rn.h.Release()
+			rn.h = nil
+			return
+		}
+	}
+	r.mu.Unlock()
+}
+
+func (r *lru) Evict(n *Node) {
+	r.mu.Lock()
+	rn := (*lruNode)(n.CacheData)
+	if rn == nil || rn.ban {
+		r.mu.Unlock()
+		return
+	}
+	n.CacheData = nil
+	r.mu.Unlock()
+
+	rn.h.Release()
+}
+
+func (r *lru) EvictNS(ns uint64) {
+	var evicted []*lruNode
+
+	r.mu.Lock()
+	for e := r.recent.prev; e != &r.recent; {
+		rn := e
+		e = e.prev
+		if rn.n.NS() == ns {
+			rn.remove()
+			rn.n.CacheData = nil
+			r.used -= rn.n.Size()
+			evicted = append(evicted, rn)
+		}
+	}
+	r.mu.Unlock()
+
+	for _, rn := range evicted {
+		rn.h.Release()
+	}
+}
+
+func (r *lru) EvictAll() {
+	r.mu.Lock()
+	back := r.recent.prev
+	for rn := back; rn != &r.recent; rn = rn.prev {
+		rn.n.CacheData = nil
+	}
+	r.reset()
+	r.mu.Unlock()
+
+	for rn := back; rn != &r.recent; rn = rn.prev {
+		rn.h.Release()
+	}
+}
+
+func (r *lru) Close() error {
+	return nil
+}
+
+// NewLRU create a new LRU-cache.
+func NewLRU(capacity int) Cacher {
+	r := &lru{capacity: capacity}
+	r.reset()
+	return r
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru_cache.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru_cache.go
deleted file mode 100644
index 3c98e076b4456ace793fb9dfd433bc909aee20cb..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/cache/lru_cache.go
+++ /dev/null
@@ -1,354 +0,0 @@
-// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package cache
-
-import (
-	"sync"
-	"sync/atomic"
-)
-
-// lruCache represent a LRU cache state.
-type lruCache struct {
-	sync.Mutex
-
-	recent   lruNode
-	table    map[uint64]*lruNs
-	capacity int
-	size     int
-}
-
-// NewLRUCache creates a new initialized LRU cache with the given capacity.
-func NewLRUCache(capacity int) Cache {
-	c := &lruCache{
-		table:    make(map[uint64]*lruNs),
-		capacity: capacity,
-	}
-	c.recent.rNext = &c.recent
-	c.recent.rPrev = &c.recent
-	return c
-}
-
-// SetCapacity set cache capacity.
-func (c *lruCache) SetCapacity(capacity int) {
-	c.Lock()
-	c.capacity = capacity
-	c.evict()
-	c.Unlock()
-}
-
-// GetNamespace return namespace object for given id.
-func (c *lruCache) GetNamespace(id uint64) Namespace {
-	c.Lock()
-	defer c.Unlock()
-
-	if p, ok := c.table[id]; ok {
-		return p
-	}
-
-	p := &lruNs{
-		lru:   c,
-		id:    id,
-		table: make(map[uint64]*lruNode),
-	}
-	c.table[id] = p
-	return p
-}
-
-// Purge purge entire cache.
-func (c *lruCache) Purge(fin PurgeFin) {
-	c.Lock()
-	for _, ns := range c.table {
-		ns.purgeNB(fin)
-	}
-	c.Unlock()
-}
-
-func (c *lruCache) Zap(closed bool) {
-	c.Lock()
-	for _, ns := range c.table {
-		ns.zapNB(closed)
-	}
-	c.table = make(map[uint64]*lruNs)
-	c.Unlock()
-}
-
-func (c *lruCache) evict() {
-	top := &c.recent
-	for n := c.recent.rPrev; c.size > c.capacity && n != top; {
-		n.state = nodeEvicted
-		n.rRemove()
-		n.evictNB()
-		c.size -= n.charge
-		n = c.recent.rPrev
-	}
-}
-
-type lruNs struct {
-	lru   *lruCache
-	id    uint64
-	table map[uint64]*lruNode
-	state nsState
-}
-
-func (ns *lruNs) Get(key uint64, setf SetFunc) (o Object, ok bool) {
-	lru := ns.lru
-	lru.Lock()
-
-	switch ns.state {
-	case nsZapped:
-		lru.Unlock()
-		if setf == nil {
-			return
-		}
-
-		var value interface{}
-		var fin func()
-		ok, value, _, fin = setf()
-		if ok {
-			o = &fakeObject{
-				value: value,
-				fin:   fin,
-			}
-		}
-		return
-	case nsClosed:
-		lru.Unlock()
-		return
-	}
-
-	n, ok := ns.table[key]
-	if ok {
-		switch n.state {
-		case nodeEvicted:
-			// Insert to recent list.
-			n.state = nodeEffective
-			n.ref++
-			lru.size += n.charge
-			lru.evict()
-			fallthrough
-		case nodeEffective:
-			// Bump to front
-			n.rRemove()
-			n.rInsert(&lru.recent)
-		}
-		n.ref++
-	} else {
-		if setf == nil {
-			lru.Unlock()
-			return
-		}
-
-		var value interface{}
-		var charge int
-		var fin func()
-		ok, value, charge, fin = setf()
-		if !ok {
-			lru.Unlock()
-			return
-		}
-
-		n = &lruNode{
-			ns:     ns,
-			key:    key,
-			value:  value,
-			charge: charge,
-			setfin: fin,
-			ref:    2,
-		}
-		ns.table[key] = n
-		n.rInsert(&lru.recent)
-
-		lru.size += charge
-		lru.evict()
-	}
-
-	lru.Unlock()
-	o = &lruObject{node: n}
-	return
-}
-
-func (ns *lruNs) Delete(key uint64, fin DelFin) bool {
-	lru := ns.lru
-	lru.Lock()
-
-	if ns.state != nsEffective {
-		lru.Unlock()
-		if fin != nil {
-			fin(false)
-		}
-		return false
-	}
-
-	n, ok := ns.table[key]
-	if !ok {
-		lru.Unlock()
-		if fin != nil {
-			fin(false)
-		}
-		return false
-	}
-
-	n.delfin = fin
-	switch n.state {
-	case nodeRemoved:
-		lru.Unlock()
-		return false
-	case nodeEffective:
-		lru.size -= n.charge
-		n.rRemove()
-		n.evictNB()
-	}
-	n.state = nodeRemoved
-
-	lru.Unlock()
-	return true
-}
-
-func (ns *lruNs) purgeNB(fin PurgeFin) {
-	lru := ns.lru
-	if ns.state != nsEffective {
-		return
-	}
-
-	for _, n := range ns.table {
-		n.purgefin = fin
-		if n.state == nodeEffective {
-			lru.size -= n.charge
-			n.rRemove()
-			n.evictNB()
-		}
-		n.state = nodeRemoved
-	}
-}
-
-func (ns *lruNs) Purge(fin PurgeFin) {
-	ns.lru.Lock()
-	ns.purgeNB(fin)
-	ns.lru.Unlock()
-}
-
-func (ns *lruNs) zapNB(closed bool) {
-	lru := ns.lru
-	if ns.state != nsEffective {
-		return
-	}
-
-	if closed {
-		ns.state = nsClosed
-	} else {
-		ns.state = nsZapped
-	}
-	for _, n := range ns.table {
-		if n.state == nodeEffective {
-			lru.size -= n.charge
-			n.rRemove()
-		}
-		n.state = nodeRemoved
-		n.execFin()
-	}
-	ns.table = nil
-}
-
-func (ns *lruNs) Zap(closed bool) {
-	ns.lru.Lock()
-	ns.zapNB(closed)
-	delete(ns.lru.table, ns.id)
-	ns.lru.Unlock()
-}
-
-type lruNode struct {
-	ns *lruNs
-
-	rNext, rPrev *lruNode
-
-	key      uint64
-	value    interface{}
-	charge   int
-	ref      int
-	state    nodeState
-	setfin   SetFin
-	delfin   DelFin
-	purgefin PurgeFin
-}
-
-func (n *lruNode) rInsert(at *lruNode) {
-	x := at.rNext
-	at.rNext = n
-	n.rPrev = at
-	n.rNext = x
-	x.rPrev = n
-}
-
-func (n *lruNode) rRemove() bool {
-	// only remove if not already removed
-	if n.rPrev == nil {
-		return false
-	}
-
-	n.rPrev.rNext = n.rNext
-	n.rNext.rPrev = n.rPrev
-	n.rPrev = nil
-	n.rNext = nil
-
-	return true
-}
-
-func (n *lruNode) execFin() {
-	if n.setfin != nil {
-		n.setfin()
-		n.setfin = nil
-	}
-	if n.purgefin != nil {
-		n.purgefin(n.ns.id, n.key, n.delfin)
-		n.delfin = nil
-		n.purgefin = nil
-	} else if n.delfin != nil {
-		n.delfin(true)
-		n.delfin = nil
-	}
-}
-
-func (n *lruNode) evictNB() {
-	n.ref--
-	if n.ref == 0 {
-		if n.ns.state == nsEffective {
-			// remove elem
-			delete(n.ns.table, n.key)
-			// execute finalizer
-			n.execFin()
-		}
-	} else if n.ref < 0 {
-		panic("leveldb/cache: lruCache: negative node reference")
-	}
-}
-
-func (n *lruNode) evict() {
-	n.ns.lru.Lock()
-	n.evictNB()
-	n.ns.lru.Unlock()
-}
-
-type lruObject struct {
-	node *lruNode
-	once uint32
-}
-
-func (o *lruObject) Value() interface{} {
-	if atomic.LoadUint32(&o.once) == 0 {
-		return o.node.value
-	}
-	return nil
-}
-
-func (o *lruObject) Release() {
-	if !atomic.CompareAndSwapUint32(&o.once, 0, 1) {
-		return
-	}
-
-	o.node.evict()
-	o.node = nil
-}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/config.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/config.go
deleted file mode 100644
index 51105889772f02e796033c51cdbf654e7dd7c44b..0000000000000000000000000000000000000000
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/config.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
-// All rights reserved.
-//
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package leveldb
-
-const (
-	kNumLevels = 7
-
-	// Level-0 compaction is started when we hit this many files.
-	kL0_CompactionTrigger float64 = 4
-
-	// Soft limit on number of level-0 files.  We slow down writes at this point.
-	kL0_SlowdownWritesTrigger = 8
-
-	// Maximum number of level-0 files.  We stop writes at this point.
-	kL0_StopWritesTrigger = 12
-
-	// Maximum level to which a new compacted memdb is pushed if it
-	// does not create overlap.  We try to push to level 2 to avoid the
-	// relatively expensive level 0=>1 compactions and to avoid some
-	// expensive manifest file operations.  We do not push all the way to
-	// the largest level since that can generate a lot of wasted disk
-	// space if the same key space is being repeatedly overwritten.
-	kMaxMemCompactLevel = 2
-
-	// Maximum size of a table.
-	kMaxTableSize = 2 * 1048576
-
-	// Maximum bytes of overlaps in grandparent (i.e., level+2) before we
-	// stop building a single file in a level->level+1 compaction.
-	kMaxGrandParentOverlapBytes = 10 * kMaxTableSize
-
-	// Maximum number of bytes in all compacted files.  We avoid expanding
-	// the lower level file set of a compaction if it would make the
-	// total compaction cover more than this many bytes.
-	kExpCompactionMaxBytes = 25 * kMaxTableSize
-)
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go
index a036e08935a85e5e34d60b96520e0603ea17806a..a351874ed4351f6d0abf67719bf7698d4228216a 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go
@@ -9,13 +9,12 @@ package leveldb
 import (
 	"bytes"
 	"fmt"
+	"github.com/syndtr/goleveldb/leveldb/filter"
+	"github.com/syndtr/goleveldb/leveldb/opt"
+	"github.com/syndtr/goleveldb/leveldb/storage"
 	"io"
 	"math/rand"
 	"testing"
-
-	"github.com/syndtr/goleveldb/leveldb/cache"
-	"github.com/syndtr/goleveldb/leveldb/opt"
-	"github.com/syndtr/goleveldb/leveldb/storage"
 )
 
 const ctValSize = 1000
@@ -32,8 +31,8 @@ func newDbCorruptHarnessWopt(t *testing.T, o *opt.Options) *dbCorruptHarness {
 
 func newDbCorruptHarness(t *testing.T) *dbCorruptHarness {
 	return newDbCorruptHarnessWopt(t, &opt.Options{
-		BlockCache: cache.NewLRUCache(100),
-		Strict:     opt.StrictJournalChecksum,
+		BlockCacheCapacity: 100,
+		Strict:             opt.StrictJournalChecksum,
 	})
 }
 
@@ -96,21 +95,22 @@ func (h *dbCorruptHarness) deleteRand(n, max int, rnd *rand.Rand) {
 	}
 }
 
-func (h *dbCorruptHarness) corrupt(ft storage.FileType, offset, n int) {
+func (h *dbCorruptHarness) corrupt(ft storage.FileType, fi, offset, n int) {
 	p := &h.dbHarness
 	t := p.t
 
-	var file storage.File
 	ff, _ := p.stor.GetFiles(ft)
-	for _, f := range ff {
-		if file == nil || f.Num() > file.Num() {
-			file = f
-		}
+	sff := files(ff)
+	sff.sort()
+	if fi < 0 {
+		fi = len(sff) - 1
 	}
-	if file == nil {
-		t.Fatalf("no such file with type %q", ft)
+	if fi >= len(sff) {
+		t.Fatalf("no such file with type %q with index %d", ft, fi)
 	}
 
+	file := sff[fi]
+
 	r, err := file.Open()
 	if err != nil {
 		t.Fatal("cannot open file: ", err)
@@ -225,8 +225,8 @@ func TestCorruptDB_Journal(t *testing.T) {
 	h.build(100)
 	h.check(100, 100)
 	h.closeDB()
-	h.corrupt(storage.TypeJournal, 19, 1)
-	h.corrupt(storage.TypeJournal, 32*1024+1000, 1)
+	h.corrupt(storage.TypeJournal, -1, 19, 1)
+	h.corrupt(storage.TypeJournal, -1, 32*1024+1000, 1)
 
 	h.openDB()
 	h.check(36, 36)
@@ -242,7 +242,7 @@ func TestCorruptDB_Table(t *testing.T) {
 	h.compactRangeAt(0, "", "")
 	h.compactRangeAt(1, "", "")
 	h.closeDB()
-	h.corrupt(storage.TypeTable, 100, 1)
+	h.corrupt(storage.TypeTable, -1, 100, 1)
 
 	h.openDB()
 	h.check(99, 99)
@@ -256,7 +256,7 @@ func TestCorruptDB_TableIndex(t *testing.T) {
 	h.build(10000)
 	h.compactMem()
 	h.closeDB()
-	h.corrupt(storage.TypeTable, -2000, 500)
+	h.corrupt(storage.TypeTable, -1, -2000, 500)
 
 	h.openDB()
 	h.check(5000, 9999)
@@ -267,9 +267,9 @@ func TestCorruptDB_TableIndex(t *testing.T) {
 func TestCorruptDB_MissingManifest(t *testing.T) {
 	rnd := rand.New(rand.NewSource(0x0badda7a))
 	h := newDbCorruptHarnessWopt(t, &opt.Options{
-		BlockCache:  cache.NewLRUCache(100),
-		Strict:      opt.StrictJournalChecksum,
-		WriteBuffer: 1000 * 60,
+		BlockCacheCapacity: 100,
+		Strict:             opt.StrictJournalChecksum,
+		WriteBuffer:        1000 * 60,
 	})
 
 	h.build(1000)
@@ -355,7 +355,7 @@ func TestCorruptDB_CorruptedManifest(t *testing.T) {
 	h.compactMem()
 	h.compactRange("", "")
 	h.closeDB()
-	h.corrupt(storage.TypeManifest, 0, 1000)
+	h.corrupt(storage.TypeManifest, -1, 0, 1000)
 	h.openAssert(false)
 
 	h.recover()
@@ -370,7 +370,7 @@ func TestCorruptDB_CompactionInputError(t *testing.T) {
 	h.build(10)
 	h.compactMem()
 	h.closeDB()
-	h.corrupt(storage.TypeTable, 100, 1)
+	h.corrupt(storage.TypeTable, -1, 100, 1)
 
 	h.openDB()
 	h.check(9, 9)
@@ -387,7 +387,7 @@ func TestCorruptDB_UnrelatedKeys(t *testing.T) {
 	h.build(10)
 	h.compactMem()
 	h.closeDB()
-	h.corrupt(storage.TypeTable, 100, 1)
+	h.corrupt(storage.TypeTable, -1, 100, 1)
 
 	h.openDB()
 	h.put(string(tkey(1000)), string(tval(1000, ctValSize)))
@@ -470,3 +470,31 @@ func TestCorruptDB_MissingTableFiles(t *testing.T) {
 
 	h.close()
 }
+
+func TestCorruptDB_RecoverTable(t *testing.T) {
+	h := newDbCorruptHarnessWopt(t, &opt.Options{
+		WriteBuffer:         112 * opt.KiB,
+		CompactionTableSize: 90 * opt.KiB,
+		Filter:              filter.NewBloomFilter(10),
+	})
+
+	h.build(1000)
+	h.compactMem()
+	h.compactRangeAt(0, "", "")
+	h.compactRangeAt(1, "", "")
+	seq := h.db.seq
+	h.closeDB()
+	h.corrupt(storage.TypeTable, 0, 1000, 1)
+	h.corrupt(storage.TypeTable, 3, 10000, 1)
+	// Corrupted filter shouldn't affect recovery.
+	h.corrupt(storage.TypeTable, 3, 113888, 10)
+	h.corrupt(storage.TypeTable, -1, 20000, 1)
+
+	h.recover()
+	if h.db.seq != seq {
+		t.Errorf("invalid seq, want=%d got=%d", seq, h.db.seq)
+	}
+	h.check(985, 985)
+
+	h.close()
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
index 8e975dc63cb2145ecbba02a2b75539a707192242..323353b2ae84ae50d4c1283ac6e5750751d4b54c 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
@@ -7,15 +7,17 @@
 package leveldb
 
 import (
-	"errors"
+	"container/list"
 	"fmt"
 	"io"
 	"os"
 	"runtime"
 	"strings"
 	"sync"
+	"sync/atomic"
 	"time"
 
+	"github.com/syndtr/goleveldb/leveldb/errors"
 	"github.com/syndtr/goleveldb/leveldb/iterator"
 	"github.com/syndtr/goleveldb/leveldb/journal"
 	"github.com/syndtr/goleveldb/leveldb/memdb"
@@ -30,41 +32,46 @@ type DB struct {
 	// Need 64-bit alignment.
 	seq uint64
 
+	// Session.
 	s *session
 
-	// MemDB
+	// MemDB.
 	memMu             sync.RWMutex
-	mem               *memdb.DB
-	frozenMem         *memdb.DB
+	memPool           chan *memdb.DB
+	mem, frozenMem    *memDB
 	journal           *journal.Writer
 	journalWriter     storage.Writer
 	journalFile       storage.File
 	frozenJournalFile storage.File
 	frozenSeq         uint64
 
-	// Snapshot
+	// Snapshot.
 	snapsMu   sync.Mutex
-	snapsRoot snapshotElement
+	snapsList *list.List
 
-	// Write
+	// Stats.
+	aliveSnaps, aliveIters int32
+
+	// Write.
 	writeC       chan *Batch
 	writeMergedC chan bool
 	writeLockC   chan struct{}
 	writeAckC    chan error
+	writeDelay   time.Duration
+	writeDelayN  int
 	journalC     chan *Batch
 	journalAckC  chan error
 
-	// Compaction
-	tcompCmdC     chan cCmd
-	tcompPauseC   chan chan<- struct{}
-	tcompTriggerC chan struct{}
-	mcompCmdC     chan cCmd
-	mcompTriggerC chan struct{}
-	compErrC      chan error
-	compErrSetC   chan error
-	compStats     [kNumLevels]cStats
-
-	// Close
+	// Compaction.
+	tcompCmdC   chan cCmd
+	tcompPauseC chan chan<- struct{}
+	mcompCmdC   chan cCmd
+	compErrC    chan error
+	compPerErrC chan error
+	compErrSetC chan error
+	compStats   []cStats
+
+	// Close.
 	closeW sync.WaitGroup
 	closeC chan struct{}
 	closed uint32
@@ -77,7 +84,11 @@ func openDB(s *session) (*DB, error) {
 	db := &DB{
 		s: s,
 		// Initial sequence
-		seq: s.stSeq,
+		seq: s.stSeqNum,
+		// MemDB
+		memPool: make(chan *memdb.DB, 1),
+		// Snapshot
+		snapsList: list.New(),
 		// Write
 		writeC:       make(chan *Batch),
 		writeMergedC: make(chan bool),
@@ -86,17 +97,16 @@ func openDB(s *session) (*DB, error) {
 		journalC:     make(chan *Batch),
 		journalAckC:  make(chan error),
 		// Compaction
-		tcompCmdC:     make(chan cCmd),
-		tcompPauseC:   make(chan chan<- struct{}),
-		tcompTriggerC: make(chan struct{}, 1),
-		mcompCmdC:     make(chan cCmd),
-		mcompTriggerC: make(chan struct{}, 1),
-		compErrC:      make(chan error),
-		compErrSetC:   make(chan error),
+		tcompCmdC:   make(chan cCmd),
+		tcompPauseC: make(chan chan<- struct{}),
+		mcompCmdC:   make(chan cCmd),
+		compErrC:    make(chan error),
+		compPerErrC: make(chan error),
+		compErrSetC: make(chan error),
+		compStats:   make([]cStats, s.o.GetNumLevel()),
 		// Close
 		closeC: make(chan struct{}),
 	}
-	db.initSnapshot()
 
 	if err := db.recoverJournal(); err != nil {
 		return nil, err
@@ -112,8 +122,9 @@ func openDB(s *session) (*DB, error) {
 		return nil, err
 	}
 
-	// Don't include compaction error goroutine into wait group.
+	// Doesn't need to be included in the wait group.
 	go db.compactionError()
+	go db.mpoolDrain()
 
 	db.closeW.Add(3)
 	go db.tCompaction()
@@ -135,9 +146,10 @@ func openDB(s *session) (*DB, error) {
 // detected in the DB. Corrupted DB can be recovered with Recover
 // function.
 //
+// The returned DB instance is goroutine-safe.
 // The DB must be closed after use, by calling Close method.
-func Open(p storage.Storage, o *opt.Options) (db *DB, err error) {
-	s, err := newSession(p, o)
+func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) {
+	s, err := newSession(stor, o)
 	if err != nil {
 		return
 	}
@@ -177,6 +189,7 @@ func Open(p storage.Storage, o *opt.Options) (db *DB, err error) {
 // detected in the DB. Corrupted DB can be recovered with Recover
 // function.
 //
+// The returned DB instance is goroutine-safe.
 // The DB must be closed after use, by calling Close method.
 func OpenFile(path string, o *opt.Options) (db *DB, err error) {
 	stor, err := storage.OpenFile(path)
@@ -197,9 +210,10 @@ func OpenFile(path string, o *opt.Options) (db *DB, err error) {
 // The DB must already exist or it will returns an error.
 // Also, Recover will ignore ErrorIfMissing and ErrorIfExist options.
 //
+// The returned DB instance is goroutine-safe.
 // The DB must be closed after use, by calling Close method.
-func Recover(p storage.Storage, o *opt.Options) (db *DB, err error) {
-	s, err := newSession(p, o)
+func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) {
+	s, err := newSession(stor, o)
 	if err != nil {
 		return
 	}
@@ -225,6 +239,7 @@ func Recover(p storage.Storage, o *opt.Options) (db *DB, err error) {
 // RecoverFile uses standard file-system backed storage implementation as desribed
 // in the leveldb/storage package.
 //
+// The returned DB instance is goroutine-safe.
 // The DB must be closed after use, by calling Close method.
 func RecoverFile(path string, o *opt.Options) (db *DB, err error) {
 	stor, err := storage.OpenFile(path)
@@ -241,16 +256,28 @@ func RecoverFile(path string, o *opt.Options) (db *DB, err error) {
 }
 
 func recoverTable(s *session, o *opt.Options) error {
-	ff0, err := s.getFiles(storage.TypeTable)
+	o = dupOptions(o)
+	// Mask StrictReader, lets StrictRecovery doing its job.
+	o.Strict &= ^opt.StrictReader
+
+	// Get all tables and sort it by file number.
+	tableFiles_, err := s.getFiles(storage.TypeTable)
 	if err != nil {
 		return err
 	}
-	ff1 := files(ff0)
-	ff1.sort()
+	tableFiles := files(tableFiles_)
+	tableFiles.sort()
 
-	var mSeq uint64
-	var good, corrupted int
-	rec := new(sessionRecord)
+	var (
+		maxSeq                                                            uint64
+		recoveredKey, goodKey, corruptedKey, corruptedBlock, droppedTable int
+
+		// We will drop corrupted table.
+		strict = o.GetStrict(opt.StrictRecovery)
+
+		rec   = &sessionRecord{numLevel: o.GetNumLevel()}
+		bpool = util.NewBufferPool(o.GetBlockSize() + 5)
+	)
 	buildTable := func(iter iterator.Iterator) (tmp storage.File, size int64, err error) {
 		tmp = s.newTemp()
 		writer, err := tmp.Create()
@@ -264,8 +291,9 @@ func recoverTable(s *session, o *opt.Options) error {
 				tmp = nil
 			}
 		}()
+
+		// Copy entries.
 		tw := table.NewWriter(writer, o)
-		// Copy records.
 		for iter.Next() {
 			key := iter.Key()
 			if validIkey(key) {
@@ -296,45 +324,73 @@ func recoverTable(s *session, o *opt.Options) error {
 		if err != nil {
 			return err
 		}
-		defer reader.Close()
+		var closed bool
+		defer func() {
+			if !closed {
+				reader.Close()
+			}
+		}()
+
 		// Get file size.
 		size, err := reader.Seek(0, 2)
 		if err != nil {
 			return err
 		}
-		var tSeq uint64
-		var tgood, tcorrupted, blockerr int
-		var min, max []byte
-		tr := table.NewReader(reader, size, nil, o)
+
+		var (
+			tSeq                                     uint64
+			tgoodKey, tcorruptedKey, tcorruptedBlock int
+			imin, imax                               []byte
+		)
+		tr, err := table.NewReader(reader, size, storage.NewFileInfo(file), nil, bpool, o)
+		if err != nil {
+			return err
+		}
 		iter := tr.NewIterator(nil, nil)
-		iter.(iterator.ErrorCallbackSetter).SetErrorCallback(func(err error) {
-			s.logf("table@recovery found error @%d %q", file.Num(), err)
-			blockerr++
-		})
+		if itererr, ok := iter.(iterator.ErrorCallbackSetter); ok {
+			itererr.SetErrorCallback(func(err error) {
+				if errors.IsCorrupted(err) {
+					s.logf("table@recovery block corruption @%d %q", file.Num(), err)
+					tcorruptedBlock++
+				}
+			})
+		}
+
 		// Scan the table.
 		for iter.Next() {
 			key := iter.Key()
-			_, seq, _, ok := parseIkey(key)
-			if !ok {
-				tcorrupted++
+			_, seq, _, kerr := parseIkey(key)
+			if kerr != nil {
+				tcorruptedKey++
 				continue
 			}
-			tgood++
+			tgoodKey++
 			if seq > tSeq {
 				tSeq = seq
 			}
-			if min == nil {
-				min = append([]byte{}, key...)
+			if imin == nil {
+				imin = append([]byte{}, key...)
 			}
-			max = append(max[:0], key...)
+			imax = append(imax[:0], key...)
 		}
 		if err := iter.Error(); err != nil {
 			iter.Release()
 			return err
 		}
 		iter.Release()
-		if tgood > 0 {
-			if tcorrupted > 0 || blockerr > 0 {
+
+		goodKey += tgoodKey
+		corruptedKey += tcorruptedKey
+		corruptedBlock += tcorruptedBlock
+
+		if strict && (tcorruptedKey > 0 || tcorruptedBlock > 0) {
+			droppedTable++
+			s.logf("table@recovery dropped @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
+			return nil
+		}
+
+		if tgoodKey > 0 {
+			if tcorruptedKey > 0 || tcorruptedBlock > 0 {
 				// Rebuild the table.
 				s.logf("table@recovery rebuilding @%d", file.Num())
 				iter := tr.NewIterator(nil, nil)
@@ -343,62 +399,77 @@ func recoverTable(s *session, o *opt.Options) error {
 				if err != nil {
 					return err
 				}
+				closed = true
 				reader.Close()
 				if err := file.Replace(tmp); err != nil {
 					return err
 				}
 				size = newSize
 			}
-			if tSeq > mSeq {
-				mSeq = tSeq
+			if tSeq > maxSeq {
+				maxSeq = tSeq
 			}
+			recoveredKey += tgoodKey
 			// Add table to level 0.
-			rec.addTable(0, file.Num(), uint64(size), min, max)
-			s.logf("table@recovery recovered @%d N·%d C·%d B·%d S·%d Q·%d", file.Num(), tgood, tcorrupted, blockerr, size, tSeq)
+			rec.addTable(0, file.Num(), uint64(size), imin, imax)
+			s.logf("table@recovery recovered @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", file.Num(), tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
 		} else {
-			s.logf("table@recovery unrecoverable @%d C·%d B·%d S·%d", file.Num(), tcorrupted, blockerr, size)
+			droppedTable++
+			s.logf("table@recovery unrecoverable @%d Ck·%d Cb·%d S·%d", file.Num(), tcorruptedKey, tcorruptedBlock, size)
 		}
 
-		good += tgood
-		corrupted += tcorrupted
-
 		return nil
 	}
+
 	// Recover all tables.
-	if len(ff1) > 0 {
-		s.logf("table@recovery F·%d", len(ff1))
-		s.markFileNum(ff1[len(ff1)-1].Num())
-		for _, file := range ff1 {
+	if len(tableFiles) > 0 {
+		s.logf("table@recovery F·%d", len(tableFiles))
+
+		// Mark file number as used.
+		s.markFileNum(tableFiles[len(tableFiles)-1].Num())
+
+		for _, file := range tableFiles {
 			if err := recoverTable(file); err != nil {
 				return err
 			}
 		}
-		s.logf("table@recovery recovered F·%d N·%d C·%d Q·%d", len(ff1), good, corrupted, mSeq)
+
+		s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(tableFiles), recoveredKey, goodKey, corruptedKey, maxSeq)
 	}
+
 	// Set sequence number.
-	rec.setSeq(mSeq + 1)
+	rec.setSeqNum(maxSeq)
+
 	// Create new manifest.
 	if err := s.create(); err != nil {
 		return err
 	}
+
 	// Commit.
 	return s.commit(rec)
 }
 
-func (d *DB) recoverJournal() error {
-	s := d.s
-
-	ff0, err := s.getFiles(storage.TypeJournal)
+func (db *DB) recoverJournal() error {
+	// Get all tables and sort it by file number.
+	journalFiles_, err := db.s.getFiles(storage.TypeJournal)
 	if err != nil {
 		return err
 	}
-	ff1 := files(ff0)
-	ff1.sort()
-	ff2 := make([]storage.File, 0, len(ff1))
-	for _, file := range ff1 {
-		if file.Num() >= s.stJournalNum || file.Num() == s.stPrevJournalNum {
-			s.markFileNum(file.Num())
-			ff2 = append(ff2, file)
+	journalFiles := files(journalFiles_)
+	journalFiles.sort()
+
+	// Discard older journal.
+	prev := -1
+	for i, file := range journalFiles {
+		if file.Num() >= db.s.stJournalNum {
+			if prev >= 0 {
+				i--
+				journalFiles[i] = journalFiles[prev]
+			}
+			journalFiles = journalFiles[i:]
+			break
+		} else if file.Num() == db.s.stPrevJournalNum {
+			prev = i
 		}
 	}
 
@@ -406,38 +477,43 @@ func (d *DB) recoverJournal() error {
 	var of storage.File
 	var mem *memdb.DB
 	batch := new(Batch)
-	cm := newCMem(s)
+	cm := newCMem(db.s)
 	buf := new(util.Buffer)
 	// Options.
-	strict := s.o.GetStrict(opt.StrictJournal)
-	checksum := s.o.GetStrict(opt.StrictJournalChecksum)
-	writeBuffer := s.o.GetWriteBuffer()
+	strict := db.s.o.GetStrict(opt.StrictJournal)
+	checksum := db.s.o.GetStrict(opt.StrictJournalChecksum)
+	writeBuffer := db.s.o.GetWriteBuffer()
 	recoverJournal := func(file storage.File) error {
-		s.logf("journal@recovery recovering @%d", file.Num())
+		db.logf("journal@recovery recovering @%d", file.Num())
 		reader, err := file.Open()
 		if err != nil {
 			return err
 		}
 		defer reader.Close()
+
+		// Create/reset journal reader instance.
 		if jr == nil {
-			jr = journal.NewReader(reader, dropper{s, file}, strict, checksum)
+			jr = journal.NewReader(reader, dropper{db.s, file}, strict, checksum)
 		} else {
-			jr.Reset(reader, dropper{s, file}, strict, checksum)
+			jr.Reset(reader, dropper{db.s, file}, strict, checksum)
 		}
+
+		// Flush memdb and remove obsolete journal file.
 		if of != nil {
 			if mem.Len() > 0 {
 				if err := cm.flush(mem, 0); err != nil {
 					return err
 				}
 			}
-			if err := cm.commit(file.Num(), d.seq); err != nil {
+			if err := cm.commit(file.Num(), db.seq); err != nil {
 				return err
 			}
 			cm.reset()
 			of.Remove()
 			of = nil
 		}
-		// Reset memdb.
+
+		// Replay journal to memdb.
 		mem.Reset()
 		for {
 			r, err := jr.Next()
@@ -445,43 +521,58 @@ func (d *DB) recoverJournal() error {
 				if err == io.EOF {
 					break
 				}
-				return err
+				return errors.SetFile(err, file)
 			}
+
 			buf.Reset()
 			if _, err := buf.ReadFrom(r); err != nil {
-				if strict {
-					return err
+				if err == io.ErrUnexpectedEOF {
+					// This is error returned due to corruption, with strict == false.
+					continue
+				} else {
+					return errors.SetFile(err, file)
 				}
-				continue
 			}
-			if err := batch.decode(buf.Bytes()); err != nil {
-				return err
-			}
-			if err := batch.memReplay(mem); err != nil {
-				return err
+			if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mem); err != nil {
+				if strict || !errors.IsCorrupted(err) {
+					return errors.SetFile(err, file)
+				} else {
+					db.s.logf("journal error: %v (skipped)", err)
+					// We won't apply sequence number as it might be corrupted.
+					continue
+				}
 			}
-			d.seq = batch.seq + uint64(batch.len())
+
+			// Save sequence number.
+			db.seq = batch.seq + uint64(batch.Len())
+
+			// Flush it if large enough.
 			if mem.Size() >= writeBuffer {
-				// Large enough, flush it.
 				if err := cm.flush(mem, 0); err != nil {
 					return err
 				}
-				// Reset memdb.
 				mem.Reset()
 			}
 		}
+
 		of = file
 		return nil
 	}
+
 	// Recover all journals.
-	if len(ff2) > 0 {
-		s.logf("journal@recovery F·%d", len(ff2))
-		mem = memdb.New(s.icmp, writeBuffer)
-		for _, file := range ff2 {
+	if len(journalFiles) > 0 {
+		db.logf("journal@recovery F·%d", len(journalFiles))
+
+		// Mark file number as used.
+		db.s.markFileNum(journalFiles[len(journalFiles)-1].Num())
+
+		mem = memdb.New(db.s.icmp, writeBuffer)
+		for _, file := range journalFiles {
 			if err := recoverJournal(file); err != nil {
 				return err
 			}
 		}
+
 		// Flush the last journal.
 		if mem.Len() > 0 {
 			if err := cm.flush(mem, 0); err != nil {
@@ -489,72 +580,140 @@ func (d *DB) recoverJournal() error {
 			}
 		}
 	}
+
 	// Create a new journal.
-	if _, err := d.newMem(0); err != nil {
+	if _, err := db.newMem(0); err != nil {
 		return err
 	}
+
 	// Commit.
-	if err := cm.commit(d.journalFile.Num(), d.seq); err != nil {
+	if err := cm.commit(db.journalFile.Num(), db.seq); err != nil {
 		// Close journal.
-		if d.journal != nil {
-			d.journal.Close()
-			d.journalWriter.Close()
+		if db.journal != nil {
+			db.journal.Close()
+			db.journalWriter.Close()
 		}
 		return err
 	}
-	// Remove the last journal.
+
+	// Remove the last obsolete journal file.
 	if of != nil {
 		of.Remove()
 	}
+
 	return nil
 }
 
-func (d *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
-	s := d.s
+func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
+	ikey := newIkey(key, seq, ktSeek)
 
-	ikey := newIKey(key, seq, tSeek)
-
-	em, fm := d.getMems()
-	for _, m := range [...]*memdb.DB{em, fm} {
+	em, fm := db.getMems()
+	for _, m := range [...]*memDB{em, fm} {
 		if m == nil {
 			continue
 		}
-		mk, mv, me := m.Find(ikey)
+		defer m.decref()
+
+		mk, mv, me := m.mdb.Find(ikey)
 		if me == nil {
-			ukey, _, t, ok := parseIkey(mk)
-			if ok && s.icmp.uCompare(ukey, key) == 0 {
-				if t == tDel {
+			ukey, _, kt, kerr := parseIkey(mk)
+			if kerr != nil {
+				// Shouldn't have had happen.
+				panic(kerr)
+			}
+			if db.s.icmp.uCompare(ukey, key) == 0 {
+				if kt == ktDel {
 					return nil, ErrNotFound
 				}
-				return mv, nil
+				return append([]byte{}, mv...), nil
 			}
 		} else if me != ErrNotFound {
 			return nil, me
 		}
 	}
 
-	v := s.version()
-	value, cSched, err := v.get(ikey, ro)
+	v := db.s.version()
+	value, cSched, err := v.get(ikey, ro, false)
 	v.release()
 	if cSched {
 		// Trigger table compaction.
-		d.compTrigger(d.tcompTriggerC)
+		db.compSendTrigger(db.tcompCmdC)
+	}
+	return
+}
+
+func (db *DB) has(key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) {
+	ikey := newIkey(key, seq, ktSeek)
+
+	em, fm := db.getMems()
+	for _, m := range [...]*memDB{em, fm} {
+		if m == nil {
+			continue
+		}
+		defer m.decref()
+
+		mk, _, me := m.mdb.Find(ikey)
+		if me == nil {
+			ukey, _, kt, kerr := parseIkey(mk)
+			if kerr != nil {
+				// Shouldn't have had happen.
+				panic(kerr)
+			}
+			if db.s.icmp.uCompare(ukey, key) == 0 {
+				if kt == ktDel {
+					return false, nil
+				}
+				return true, nil
+			}
+		} else if me != ErrNotFound {
+			return false, me
+		}
+	}
+
+	v := db.s.version()
+	_, cSched, err := v.get(ikey, ro, true)
+	v.release()
+	if cSched {
+		// Trigger table compaction.
+		db.compSendTrigger(db.tcompCmdC)
+	}
+	if err == nil {
+		ret = true
+	} else if err == ErrNotFound {
+		err = nil
 	}
 	return
 }
 
 // Get gets the value for the given key. It returns ErrNotFound if the
-// DB does not contain the key.
+// DB does not contains the key.
 //
-// The caller should not modify the contents of the returned slice, but
-// it is safe to modify the contents of the argument after Get returns.
-func (d *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
-	err = d.ok()
+// The returned slice is its own copy, it is safe to modify the contents
+// of the returned slice.
+// It is safe to modify the contents of the argument after Get returns.
+func (db *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
+	err = db.ok()
 	if err != nil {
 		return
 	}
 
-	return d.get(key, d.getSeq(), ro)
+	se := db.acquireSnapshot()
+	defer db.releaseSnapshot(se)
+	return db.get(key, se.seq, ro)
+}
+
+// Has returns true if the DB does contains the given key.
+//
+// It is safe to modify the contents of the argument after Get returns.
+func (db *DB) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) {
+	err = db.ok()
+	if err != nil {
+		return
+	}
+
+	se := db.acquireSnapshot()
+	defer db.releaseSnapshot(se)
+	return db.has(key, se.seq, ro)
 }
 
 // NewIterator returns an iterator for the latest snapshot of the
@@ -573,14 +732,16 @@ func (d *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
 // The iterator must be released after use, by calling Release method.
 //
 // Also read Iterator documentation of the leveldb/iterator package.
-func (d *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
-	if err := d.ok(); err != nil {
+func (db *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+	if err := db.ok(); err != nil {
 		return iterator.NewEmptyIterator(err)
 	}
 
-	p := d.newSnapshot()
-	defer p.Release()
-	return p.NewIterator(slice, ro)
+	se := db.acquireSnapshot()
+	defer db.releaseSnapshot(se)
+	// Iterator holds 'version' lock, 'version' is immutable so snapshot
+	// can be released after iterator created.
+	return db.newIterator(se.seq, slice, ro)
 }
 
 // GetSnapshot returns a latest snapshot of the underlying DB. A snapshot
@@ -588,25 +749,35 @@ func (d *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterat
 // content of snapshot are guaranteed to be consistent.
 //
 // The snapshot must be released after use, by calling Release method.
-func (d *DB) GetSnapshot() (*Snapshot, error) {
-	if err := d.ok(); err != nil {
+func (db *DB) GetSnapshot() (*Snapshot, error) {
+	if err := db.ok(); err != nil {
 		return nil, err
 	}
 
-	return d.newSnapshot(), nil
+	return db.newSnapshot(), nil
 }
 
 // GetProperty returns value of the given property name.
 //
 // Property names:
 //	leveldb.num-files-at-level{n}
-//		Returns the number of filer at level 'n'.
+//		Returns the number of files at level 'n'.
 //	leveldb.stats
 //		Returns statistics of the underlying DB.
 //	leveldb.sstables
 //		Returns sstables list for each level.
-func (d *DB) GetProperty(name string) (value string, err error) {
-	err = d.ok()
+//	leveldb.blockpool
+//		Returns block pool stats.
+//	leveldb.cachedblock
+//		Returns size of cached block.
+//	leveldb.openedtables
+//		Returns number of opened tables.
+//	leveldb.alivesnaps
+//		Returns number of alive snapshots.
+//	leveldb.aliveiters
+//		Returns number of alive iterators.
+func (db *DB) GetProperty(name string) (value string, err error) {
+	err = db.ok()
 	if err != nil {
 		return
 	}
@@ -615,19 +786,18 @@ func (d *DB) GetProperty(name string) (value string, err error) {
 	if !strings.HasPrefix(name, prefix) {
 		return "", errors.New("leveldb: GetProperty: unknown property: " + name)
 	}
-
 	p := name[len(prefix):]
 
-	s := d.s
-	v := s.version()
+	v := db.s.version()
 	defer v.release()
 
+	numFilesPrefix := "num-files-at-level"
 	switch {
-	case strings.HasPrefix(p, "num-files-at-level"):
+	case strings.HasPrefix(p, numFilesPrefix):
 		var level uint
 		var rest string
-		n, _ := fmt.Scanf("%d%s", &level, &rest)
-		if n != 1 || level >= kNumLevels {
+		n, _ := fmt.Sscanf(p[len(numFilesPrefix):], "%d%s", &level, &rest)
+		if n != 1 || int(level) >= db.s.o.GetNumLevel() {
 			err = errors.New("leveldb: GetProperty: invalid property: " + name)
 		} else {
 			value = fmt.Sprint(v.tLen(int(level)))
@@ -636,22 +806,36 @@ func (d *DB) GetProperty(name string) (value string, err error) {
 		value = "Compactions\n" +
 			" Level |   Tables   |    Size(MB)   |    Time(sec)  |    Read(MB)   |   Write(MB)\n" +
 			"-------+------------+---------------+---------------+---------------+---------------\n"
-		for level, tt := range v.tables {
-			duration, read, write := d.compStats[level].get()
-			if len(tt) == 0 && duration == 0 {
+		for level, tables := range v.tables {
+			duration, read, write := db.compStats[level].get()
+			if len(tables) == 0 && duration == 0 {
 				continue
 			}
 			value += fmt.Sprintf(" %3d   | %10d | %13.5f | %13.5f | %13.5f | %13.5f\n",
-				level, len(tt), float64(tt.size())/1048576.0, duration.Seconds(),
+				level, len(tables), float64(tables.size())/1048576.0, duration.Seconds(),
 				float64(read)/1048576.0, float64(write)/1048576.0)
 		}
 	case p == "sstables":
-		for level, tt := range v.tables {
+		for level, tables := range v.tables {
 			value += fmt.Sprintf("--- level %d ---\n", level)
-			for _, t := range tt {
-				value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.file.Num(), t.size, t.min, t.max)
+			for _, t := range tables {
+				value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.file.Num(), t.size, t.imin, t.imax)
 			}
 		}
+	case p == "blockpool":
+		value = fmt.Sprintf("%v", db.s.tops.bpool)
+	case p == "cachedblock":
+		if db.s.tops.bcache != nil {
+			value = fmt.Sprintf("%d", db.s.tops.bcache.Size())
+		} else {
+			value = "<nil>"
+		}
+	case p == "openedtables":
+		value = fmt.Sprintf("%d", db.s.tops.cache.Size())
+	case p == "alivesnaps":
+		value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveSnaps))
+	case p == "aliveiters":
+		value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveIters))
 	default:
 		err = errors.New("leveldb: GetProperty: unknown property: " + name)
 	}
@@ -665,23 +849,23 @@ func (d *DB) GetProperty(name string) (value string, err error) {
 // data compresses by a factor of ten, the returned sizes will be one-tenth
 // the size of the corresponding user data size.
 // The results may not include the sizes of recently written data.
-func (d *DB) SizeOf(ranges []util.Range) (Sizes, error) {
-	if err := d.ok(); err != nil {
+func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) {
+	if err := db.ok(); err != nil {
 		return nil, err
 	}
 
-	v := d.s.version()
+	v := db.s.version()
 	defer v.release()
 
 	sizes := make(Sizes, 0, len(ranges))
 	for _, r := range ranges {
-		min := newIKey(r.Start, kMaxSeq, tSeek)
-		max := newIKey(r.Limit, kMaxSeq, tSeek)
-		start, err := v.offsetOf(min)
+		imin := newIkey(r.Start, kMaxSeq, ktSeek)
+		imax := newIkey(r.Limit, kMaxSeq, ktSeek)
+		start, err := v.offsetOf(imin)
 		if err != nil {
 			return nil, err
 		}
-		limit, err := v.offsetOf(max)
+		limit, err := v.offsetOf(imax)
 		if err != nil {
 			return nil, err
 		}
@@ -695,61 +879,67 @@ func (d *DB) SizeOf(ranges []util.Range) (Sizes, error) {
 	return sizes, nil
 }
 
-// Close closes the DB. This will also releases any outstanding snapshot.
+// Close closes the DB. This will also releases any outstanding snapshot and
+// abort any in-flight compaction.
 //
 // It is not safe to close a DB until all outstanding iterators are released.
 // It is valid to call Close multiple times. Other methods should not be
 // called after the DB has been closed.
-func (d *DB) Close() error {
-	if !d.setClosed() {
+func (db *DB) Close() error {
+	if !db.setClosed() {
 		return ErrClosed
 	}
 
-	s := d.s
 	start := time.Now()
-	s.log("db@close closing")
+	db.log("db@close closing")
 
 	// Clear the finalizer.
-	runtime.SetFinalizer(d, nil)
+	runtime.SetFinalizer(db, nil)
 
 	// Get compaction error.
 	var err error
 	select {
-	case err = <-d.compErrC:
+	case err = <-db.compErrC:
 	default:
 	}
 
-	close(d.closeC)
+	// Signal all goroutines.
+	close(db.closeC)
 
-	// Wait for the close WaitGroup.
-	d.closeW.Wait()
+	// Wait for all gorotines to exit.
+	db.closeW.Wait()
+
+	// Lock writer and closes journal.
+	db.writeLockC <- struct{}{}
+	if db.journal != nil {
+		db.journal.Close()
+		db.journalWriter.Close()
+	}
 
-	// Close journal.
-	if d.journal != nil {
-		d.journal.Close()
-		d.journalWriter.Close()
+	if db.writeDelayN > 0 {
+		db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay)
 	}
 
 	// Close session.
-	s.close()
-	s.logf("db@close done T·%v", time.Since(start))
-	s.release()
+	db.s.close()
+	db.logf("db@close done T·%v", time.Since(start))
+	db.s.release()
 
-	if d.closer != nil {
-		if err1 := d.closer.Close(); err == nil {
+	if db.closer != nil {
+		if err1 := db.closer.Close(); err == nil {
 			err = err1
 		}
 	}
 
-	d.s = nil
-	d.mem = nil
-	d.frozenMem = nil
-	d.journal = nil
-	d.journalWriter = nil
-	d.journalFile = nil
-	d.frozenJournalFile = nil
-	d.snapsRoot = snapshotElement{}
-	d.closer = nil
+	// NIL'ing pointers.
+	db.s = nil
+	db.mem = nil
+	db.frozenMem = nil
+	db.journal = nil
+	db.journalWriter = nil
+	db.journalFile = nil
+	db.frozenJournalFile = nil
+	db.closer = nil
 
 	return err
 }
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go
index c82bd9f28ac23a143a5311742a6ef42d92eb01d0..447407abae6806ce427566bd18f05c7c8be05f8b 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go
@@ -7,11 +7,12 @@
 package leveldb
 
 import (
-	"errors"
 	"sync"
 	"time"
 
+	"github.com/syndtr/goleveldb/leveldb/errors"
 	"github.com/syndtr/goleveldb/leveldb/memdb"
+	"github.com/syndtr/goleveldb/leveldb/opt"
 )
 
 var (
@@ -68,13 +69,13 @@ type cMem struct {
 }
 
 func newCMem(s *session) *cMem {
-	return &cMem{s: s, rec: new(sessionRecord)}
+	return &cMem{s: s, rec: &sessionRecord{numLevel: s.o.GetNumLevel()}}
 }
 
 func (c *cMem) flush(mem *memdb.DB, level int) error {
 	s := c.s
 
-	// Write memdb to table
+	// Write memdb to table.
 	iter := mem.NewIterator(nil)
 	defer iter.Release()
 	t, n, err := s.tops.createFrom(iter)
@@ -82,51 +83,85 @@ func (c *cMem) flush(mem *memdb.DB, level int) error {
 		return err
 	}
 
+	// Pick level.
 	if level < 0 {
-		level = s.version_NB().pickLevel(t.min.ukey(), t.max.ukey())
+		v := s.version()
+		level = v.pickLevel(t.imin.ukey(), t.imax.ukey())
+		v.release()
 	}
 	c.rec.addTableFile(level, t)
 
-	s.logf("mem@flush created L%d@%d N·%d S·%s %q:%q", level, t.file.Num(), n, shortenb(int(t.size)), t.min, t.max)
+	s.logf("mem@flush created L%d@%d N·%d S·%s %q:%q", level, t.file.Num(), n, shortenb(int(t.size)), t.imin, t.imax)
 
 	c.level = level
 	return nil
 }
 
 func (c *cMem) reset() {
-	c.rec = new(sessionRecord)
+	c.rec = &sessionRecord{numLevel: c.s.o.GetNumLevel()}
 }
 
 func (c *cMem) commit(journal, seq uint64) error {
 	c.rec.setJournalNum(journal)
-	c.rec.setSeq(seq)
-	// Commit changes
+	c.rec.setSeqNum(seq)
+
+	// Commit changes.
 	return c.s.commit(c.rec)
 }
 
-func (d *DB) compactionError() {
-	var err error
+func (db *DB) compactionError() {
+	var (
+		err     error
+		wlocked bool
+	)
 noerr:
+	// No error.
 	for {
 		select {
-		case _, _ = <-d.closeC:
-			return
-		case err = <-d.compErrSetC:
-			if err != nil {
+		case err = <-db.compErrSetC:
+			switch {
+			case err == nil:
+			case errors.IsCorrupted(err):
+				goto hasperr
+			default:
 				goto haserr
 			}
+		case _, _ = <-db.closeC:
+			return
 		}
 	}
 haserr:
+	// Transient error.
 	for {
 		select {
-		case _, _ = <-d.closeC:
-			return
-		case err = <-d.compErrSetC:
-			if err == nil {
+		case db.compErrC <- err:
+		case err = <-db.compErrSetC:
+			switch {
+			case err == nil:
 				goto noerr
+			case errors.IsCorrupted(err):
+				goto hasperr
+			default:
 			}
-		case d.compErrC <- err:
+		case _, _ = <-db.closeC:
+			return
+		}
+	}
+hasperr:
+	// Persistent error.
+	for {
+		select {
+		case db.compErrC <- err:
+		case db.compPerErrC <- err:
+		case db.writeLockC <- struct{}{}:
+			// Hold write lock, so that write won't pass-through.
+			wlocked = true
+		case _, _ = <-db.closeC:
+			if wlocked {
+				// We should release the lock or Close will hang.
+				<-db.writeLockC
+			}
+			return
 		}
 	}
 }
@@ -137,114 +172,159 @@ func (cnt *compactionTransactCounter) incr() {
 	*cnt++
 }
 
-func (d *DB) compactionTransact(name string, exec func(cnt *compactionTransactCounter) error, rollback func() error) {
-	s := d.s
+type compactionTransactInterface interface {
+	run(cnt *compactionTransactCounter) error
+	revert() error
+}
+
+func (db *DB) compactionTransact(name string, t compactionTransactInterface) {
 	defer func() {
 		if x := recover(); x != nil {
-			if x == errCompactionTransactExiting && rollback != nil {
-				if err := rollback(); err != nil {
-					s.logf("%s rollback error %q", name, err)
+			if x == errCompactionTransactExiting {
+				if err := t.revert(); err != nil {
+					db.logf("%s revert error %q", name, err)
 				}
 			}
 			panic(x)
 		}
 	}()
+
 	const (
 		backoffMin = 1 * time.Second
 		backoffMax = 8 * time.Second
 		backoffMul = 2 * time.Second
 	)
-	backoff := backoffMin
-	backoffT := time.NewTimer(backoff)
-	lastCnt := compactionTransactCounter(0)
+	var (
+		backoff  = backoffMin
+		backoffT = time.NewTimer(backoff)
+		lastCnt  = compactionTransactCounter(0)
+
+		disableBackoff = db.s.o.GetDisableCompactionBackoff()
+	)
 	for n := 0; ; n++ {
 		// Check wether the DB is closed.
-		if d.isClosed() {
-			s.logf("%s exiting", name)
-			d.compactionExitTransact()
+		if db.isClosed() {
+			db.logf("%s exiting", name)
+			db.compactionExitTransact()
 		} else if n > 0 {
-			s.logf("%s retrying N·%d", name, n)
+			db.logf("%s retrying N·%d", name, n)
 		}
 
 		// Execute.
 		cnt := compactionTransactCounter(0)
-		err := exec(&cnt)
+		err := t.run(&cnt)
+		if err != nil {
+			db.logf("%s error I·%d %q", name, cnt, err)
+		}
 
 		// Set compaction error status.
 		select {
-		case d.compErrSetC <- err:
-		case _, _ = <-d.closeC:
-			s.logf("%s exiting", name)
-			d.compactionExitTransact()
+		case db.compErrSetC <- err:
+		case perr := <-db.compPerErrC:
+			if err != nil {
+				db.logf("%s exiting (persistent error %q)", name, perr)
+				db.compactionExitTransact()
+			}
+		case _, _ = <-db.closeC:
+			db.logf("%s exiting", name)
+			db.compactionExitTransact()
 		}
 		if err == nil {
 			return
 		}
-		s.logf("%s error I·%d %q", name, cnt, err)
-
-		// Reset backoff duration if counter is advancing.
-		if cnt > lastCnt {
-			backoff = backoffMin
-			lastCnt = cnt
+		if errors.IsCorrupted(err) {
+			db.logf("%s exiting (corruption detected)", name)
+			db.compactionExitTransact()
 		}
 
-		// Backoff.
-		backoffT.Reset(backoff)
-		if backoff < backoffMax {
-			backoff *= backoffMul
-			if backoff > backoffMax {
-				backoff = backoffMax
+		if !disableBackoff {
+			// Reset backoff duration if counter is advancing.
+			if cnt > lastCnt {
+				backoff = backoffMin
+				lastCnt = cnt
+			}
+
+			// Backoff.
+			backoffT.Reset(backoff)
+			if backoff < backoffMax {
+				backoff *= backoffMul
+				if backoff > backoffMax {
+					backoff = backoffMax
+				}
+			}
+			select {
+			case <-backoffT.C:
+			case _, _ = <-db.closeC:
+				db.logf("%s exiting", name)
+				db.compactionExitTransact()
 			}
 		}
-		select {
-		case <-backoffT.C:
-		case _, _ = <-d.closeC:
-			s.logf("%s exiting", name)
-			d.compactionExitTransact()
-		}
 	}
 }
 
-func (d *DB) compactionExitTransact() {
+type compactionTransactFunc struct {
+	runFunc    func(cnt *compactionTransactCounter) error
+	revertFunc func() error
+}
+
+func (t *compactionTransactFunc) run(cnt *compactionTransactCounter) error {
+	return t.runFunc(cnt)
+}
+
+func (t *compactionTransactFunc) revert() error {
+	if t.revertFunc != nil {
+		return t.revertFunc()
+	}
+	return nil
+}
+
+func (db *DB) compactionTransactFunc(name string, run func(cnt *compactionTransactCounter) error, revert func() error) {
+	db.compactionTransact(name, &compactionTransactFunc{run, revert})
+}
+
+func (db *DB) compactionExitTransact() {
 	panic(errCompactionTransactExiting)
 }
 
-func (d *DB) memCompaction() {
-	mem := d.getFrozenMem()
+func (db *DB) memCompaction() {
+	mem := db.getFrozenMem()
 	if mem == nil {
 		return
 	}
+	defer mem.decref()
 
-	s := d.s
-	c := newCMem(s)
+	c := newCMem(db.s)
 	stats := new(cStatsStaging)
 
-	s.logf("mem@flush N·%d S·%s", mem.Len(), shortenb(mem.Size()))
+	db.logf("mem@flush N·%d S·%s", mem.mdb.Len(), shortenb(mem.mdb.Size()))
 
 	// Don't compact empty memdb.
-	if mem.Len() == 0 {
-		s.logf("mem@flush skipping")
+	if mem.mdb.Len() == 0 {
+		db.logf("mem@flush skipping")
 		// drop frozen mem
-		d.dropFrozenMem()
+		db.dropFrozenMem()
 		return
 	}
 
 	// Pause table compaction.
-	ch := make(chan struct{})
+	resumeC := make(chan struct{})
 	select {
-	case d.tcompPauseC <- (chan<- struct{})(ch):
-	case _, _ = <-d.closeC:
+	case db.tcompPauseC <- (chan<- struct{})(resumeC):
+	case <-db.compPerErrC:
+		close(resumeC)
+		resumeC = nil
+	case _, _ = <-db.closeC:
 		return
 	}
 
-	d.compactionTransact("mem@flush", func(cnt *compactionTransactCounter) (err error) {
+	db.compactionTransactFunc("mem@flush", func(cnt *compactionTransactCounter) (err error) {
 		stats.startTimer()
 		defer stats.stopTimer()
-		return c.flush(mem, -1)
+		return c.flush(mem.mdb, -1)
 	}, func() error {
 		for _, r := range c.rec.addedTables {
-			s.logf("mem@flush rollback @%d", r.num)
-			f := s.getTableFile(r.num)
+			db.logf("mem@flush revert @%d", r.num)
+			f := db.s.getTableFile(r.num)
 			if err := f.Remove(); err != nil {
 				return err
 			}
@@ -252,279 +332,327 @@ func (d *DB) memCompaction() {
 		return nil
 	})
 
-	d.compactionTransact("mem@commit", func(cnt *compactionTransactCounter) (err error) {
+	db.compactionTransactFunc("mem@commit", func(cnt *compactionTransactCounter) (err error) {
 		stats.startTimer()
 		defer stats.stopTimer()
-		return c.commit(d.journalFile.Num(), d.frozenSeq)
+		return c.commit(db.journalFile.Num(), db.frozenSeq)
 	}, nil)
 
-	s.logf("mem@flush commited F·%d T·%v", len(c.rec.addedTables), stats.duration)
+	db.logf("mem@flush committed F·%d T·%v", len(c.rec.addedTables), stats.duration)
 
 	for _, r := range c.rec.addedTables {
 		stats.write += r.size
 	}
-	d.compStats[c.level].add(stats)
+	db.compStats[c.level].add(stats)
 
 	// Drop frozen mem.
-	d.dropFrozenMem()
+	db.dropFrozenMem()
 
 	// Resume table compaction.
-	select {
-	case <-ch:
-	case _, _ = <-d.closeC:
-		return
+	if resumeC != nil {
+		select {
+		case <-resumeC:
+			close(resumeC)
+		case _, _ = <-db.closeC:
+			return
+		}
 	}
 
 	// Trigger table compaction.
-	d.compTrigger(d.mcompTriggerC)
+	db.compSendTrigger(db.tcompCmdC)
 }
 
-func (d *DB) tableCompaction(c *compaction, noTrivial bool) {
-	s := d.s
+type tableCompactionBuilder struct {
+	db           *DB
+	s            *session
+	c            *compaction
+	rec          *sessionRecord
+	stat0, stat1 *cStatsStaging
 
-	rec := new(sessionRecord)
-	rec.addCompactionPointer(c.level, c.max)
+	snapHasLastUkey bool
+	snapLastUkey    []byte
+	snapLastSeq     uint64
+	snapIter        int
+	snapKerrCnt     int
+	snapDropCnt     int
 
-	if !noTrivial && c.trivial() {
-		t := c.tables[0][0]
-		s.logf("table@move L%d@%d -> L%d", c.level, t.file.Num(), c.level+1)
-		rec.deleteTable(c.level, t.file.Num())
-		rec.addTableFile(c.level+1, t)
-		d.compactionTransact("table@move", func(cnt *compactionTransactCounter) (err error) {
-			return s.commit(rec)
-		}, nil)
-		return
-	}
+	kerrCnt int
+	dropCnt int
 
-	var stats [2]cStatsStaging
-	for i, tt := range c.tables {
-		for _, t := range tt {
-			stats[i].read += t.size
-			// Insert deleted tables into record
-			rec.deleteTable(c.level+i, t.file.Num())
-		}
-	}
-	sourceSize := int(stats[0].read + stats[1].read)
-	minSeq := d.minSeq()
-	s.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.level, len(c.tables[0]), c.level+1, len(c.tables[1]), shortenb(sourceSize), minSeq)
-
-	var snapUkey []byte
-	var snapHasUkey bool
-	var snapSeq uint64
-	var snapIter int
-	var snapDropCnt int
-	var dropCnt int
-	d.compactionTransact("table@build", func(cnt *compactionTransactCounter) (err error) {
-		ukey := append([]byte{}, snapUkey...)
-		hasUkey := snapHasUkey
-		lseq := snapSeq
-		dropCnt = snapDropCnt
-		snapSched := snapIter == 0
-
-		var tw *tWriter
-		finish := func() error {
-			t, err := tw.finish()
-			if err != nil {
-				return err
+	minSeq    uint64
+	strict    bool
+	tableSize int
+
+	tw *tWriter
+}
+
+func (b *tableCompactionBuilder) appendKV(key, value []byte) error {
+	// Create new table if not already.
+	if b.tw == nil {
+		// Check for pause event.
+		if b.db != nil {
+			select {
+			case ch := <-b.db.tcompPauseC:
+				b.db.pauseCompaction(ch)
+			case _, _ = <-b.db.closeC:
+				b.db.compactionExitTransact()
+			default:
 			}
-			rec.addTableFile(c.level+1, t)
-			stats[1].write += t.size
-			s.logf("table@build created L%d@%d N·%d S·%s %q:%q", c.level+1, t.file.Num(), tw.tw.EntriesLen(), shortenb(int(t.size)), t.min, t.max)
-			return nil
 		}
 
-		defer func() {
-			stats[1].stopTimer()
-			if tw != nil {
-				tw.drop()
-				tw = nil
-			}
-		}()
+		// Create new table.
+		var err error
+		b.tw, err = b.s.tops.create()
+		if err != nil {
+			return err
+		}
+	}
 
-		stats[1].startTimer()
-		iter := c.newIterator()
-		defer iter.Release()
-		for i := 0; iter.Next(); i++ {
-			// Incr transact counter.
-			cnt.incr()
-
-			// Skip until last state.
-			if i < snapIter {
-				continue
-			}
+	// Write key/value into table.
+	return b.tw.append(key, value)
+}
 
-			key := iKey(iter.Key())
+func (b *tableCompactionBuilder) needFlush() bool {
+	return b.tw.tw.BytesLen() >= b.tableSize
+}
 
-			if c.shouldStopBefore(key) && tw != nil {
-				err = finish()
-				if err != nil {
-					return
-				}
-				snapSched = true
-				tw = nil
-			}
+func (b *tableCompactionBuilder) flush() error {
+	t, err := b.tw.finish()
+	if err != nil {
+		return err
+	}
+	b.rec.addTableFile(b.c.level+1, t)
+	b.stat1.write += t.size
+	b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.level+1, t.file.Num(), b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax)
+	b.tw = nil
+	return nil
+}
 
-			// Scheduled for snapshot, snapshot will used to retry compaction
-			// if error occured.
-			if snapSched {
-				snapUkey = append(snapUkey[:0], ukey...)
-				snapHasUkey = hasUkey
-				snapSeq = lseq
-				snapIter = i
-				snapDropCnt = dropCnt
-				snapSched = false
-			}
+func (b *tableCompactionBuilder) cleanup() {
+	if b.tw != nil {
+		b.tw.drop()
+		b.tw = nil
+	}
+}
 
-			if seq, t, ok := key.parseNum(); !ok {
-				// Don't drop error keys
-				ukey = ukey[:0]
-				hasUkey = false
-				lseq = kMaxSeq
-			} else {
-				if !hasUkey || s.icmp.uCompare(key.ukey(), ukey) != 0 {
-					// First occurrence of this user key
-					ukey = append(ukey[:0], key.ukey()...)
-					hasUkey = true
-					lseq = kMaxSeq
-				}
+func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
+	snapResumed := b.snapIter > 0
+	hasLastUkey := b.snapHasLastUkey // The key might has zero length, so this is necessary.
+	lastUkey := append([]byte{}, b.snapLastUkey...)
+	lastSeq := b.snapLastSeq
+	b.kerrCnt = b.snapKerrCnt
+	b.dropCnt = b.snapDropCnt
+	// Restore compaction state.
+	b.c.restore()
 
-				drop := false
-				if lseq <= minSeq {
-					// Dropped because newer entry for same user key exist
-					drop = true // (A)
-				} else if t == tDel && seq <= minSeq && c.isBaseLevelForKey(ukey) {
-					// For this user key:
-					// (1) there is no data in higher levels
-					// (2) data in lower levels will have larger seq numbers
-					// (3) data in layers that are being compacted here and have
-					//     smaller seq numbers will be dropped in the next
-					//     few iterations of this loop (by rule (A) above).
-					// Therefore this deletion marker is obsolete and can be dropped.
-					drop = true
-				}
+	defer b.cleanup()
 
-				lseq = seq
-				if drop {
-					dropCnt++
-					continue
-				}
-			}
+	b.stat1.startTimer()
+	defer b.stat1.stopTimer()
 
-			// Create new table if not already
-			if tw == nil {
-				// Check for pause event.
-				select {
-				case ch := <-d.tcompPauseC:
-					d.pauseCompaction(ch)
-				case _, _ = <-d.closeC:
-					d.compactionExitTransact()
-				default:
-				}
+	iter := b.c.newIterator()
+	defer iter.Release()
+	for i := 0; iter.Next(); i++ {
+		// Incr transact counter.
+		cnt.incr()
+
+		// Skip until last state.
+		if i < b.snapIter {
+			continue
+		}
 
-				// Create new table.
-				tw, err = s.tops.create()
-				if err != nil {
-					return
+		resumed := false
+		if snapResumed {
+			resumed = true
+			snapResumed = false
+		}
+
+		ikey := iter.Key()
+		ukey, seq, kt, kerr := parseIkey(ikey)
+
+		if kerr == nil {
+			shouldStop := !resumed && b.c.shouldStopBefore(ikey)
+
+			if !hasLastUkey || b.s.icmp.uCompare(lastUkey, ukey) != 0 {
+				// First occurrence of this user key.
+
+				// Only rotate tables if ukey doesn't hop across.
+				if b.tw != nil && (shouldStop || b.needFlush()) {
+					if err := b.flush(); err != nil {
+						return err
+					}
+
+					// Creates snapshot of the state.
+					b.c.save()
+					b.snapHasLastUkey = hasLastUkey
+					b.snapLastUkey = append(b.snapLastUkey[:0], lastUkey...)
+					b.snapLastSeq = lastSeq
+					b.snapIter = i
+					b.snapKerrCnt = b.kerrCnt
+					b.snapDropCnt = b.dropCnt
 				}
-			}
 
-			// Write key/value into table
-			err = tw.add(key, iter.Value())
-			if err != nil {
-				return
+				hasLastUkey = true
+				lastUkey = append(lastUkey[:0], ukey...)
+				lastSeq = kMaxSeq
 			}
 
-			// Finish table if it is big enough
-			if tw.tw.BytesLen() >= kMaxTableSize {
-				err = finish()
-				if err != nil {
-					return
-				}
-				snapSched = true
-				tw = nil
+			switch {
+			case lastSeq <= b.minSeq:
+				// Dropped because newer entry for same user key exist
+				fallthrough // (A)
+			case kt == ktDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey):
+				// For this user key:
+				// (1) there is no data in higher levels
+				// (2) data in lower levels will have larger seq numbers
+				// (3) data in layers that are being compacted here and have
+				//     smaller seq numbers will be dropped in the next
+				//     few iterations of this loop (by rule (A) above).
+				// Therefore this deletion marker is obsolete and can be dropped.
+				lastSeq = seq
+				b.dropCnt++
+				continue
+			default:
+				lastSeq = seq
+			}
+		} else {
+			if b.strict {
+				return kerr
 			}
+
+			// Don't drop corrupted keys.
+			hasLastUkey = false
+			lastUkey = lastUkey[:0]
+			lastSeq = kMaxSeq
+			b.kerrCnt++
 		}
 
-		err = iter.Error()
-		if err != nil {
-			return
+		if err := b.appendKV(ikey, iter.Value()); err != nil {
+			return err
 		}
+	}
 
-		// Finish last table
-		if tw != nil && !tw.empty() {
-			err = finish()
-			if err != nil {
-				return
-			}
-			tw = nil
+	if err := iter.Error(); err != nil {
+		return err
+	}
+
+	// Finish last table.
+	if b.tw != nil && !b.tw.empty() {
+		return b.flush()
+	}
+	return nil
+}
+
+func (b *tableCompactionBuilder) revert() error {
+	for _, at := range b.rec.addedTables {
+		b.s.logf("table@build revert @%d", at.num)
+		f := b.s.getTableFile(at.num)
+		if err := f.Remove(); err != nil {
+			return err
 		}
+	}
+	return nil
+}
+
+func (db *DB) tableCompaction(c *compaction, noTrivial bool) {
+	defer c.release()
+
+	rec := &sessionRecord{numLevel: db.s.o.GetNumLevel()}
+	rec.addCompPtr(c.level, c.imax)
+
+	if !noTrivial && c.trivial() {
+		t := c.tables[0][0]
+		db.logf("table@move L%d@%d -> L%d", c.level, t.file.Num(), c.level+1)
+		rec.delTable(c.level, t.file.Num())
+		rec.addTableFile(c.level+1, t)
+		db.compactionTransactFunc("table@move", func(cnt *compactionTransactCounter) (err error) {
+			return db.s.commit(rec)
+		}, nil)
 		return
-	}, func() error {
-		for _, r := range rec.addedTables {
-			s.logf("table@build rollback @%d", r.num)
-			f := s.getTableFile(r.num)
-			if err := f.Remove(); err != nil {
-				return err
-			}
+	}
+
+	var stats [2]cStatsStaging
+	for i, tables := range c.tables {
+		for _, t := range tables {
+			stats[i].read += t.size
+			// Insert deleted tables into record
+			rec.delTable(c.level+i, t.file.Num())
 		}
-		return nil
-	})
+	}
+	sourceSize := int(stats[0].read + stats[1].read)
+	minSeq := db.minSeq()
+	db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.level, len(c.tables[0]), c.level+1, len(c.tables[1]), shortenb(sourceSize), minSeq)
+
+	b := &tableCompactionBuilder{
+		db:        db,
+		s:         db.s,
+		c:         c,
+		rec:       rec,
+		stat1:     &stats[1],
+		minSeq:    minSeq,
+		strict:    db.s.o.GetStrict(opt.StrictCompaction),
+		tableSize: db.s.o.GetCompactionTableSize(c.level + 1),
+	}
+	db.compactionTransact("table@build", b)
 
 	// Commit changes
-	d.compactionTransact("table@commit", func(cnt *compactionTransactCounter) (err error) {
+	db.compactionTransactFunc("table@commit", func(cnt *compactionTransactCounter) (err error) {
 		stats[1].startTimer()
 		defer stats[1].stopTimer()
-		return s.commit(rec)
+		return db.s.commit(rec)
 	}, nil)
 
-	resultSize := int(int(stats[1].write))
-	s.logf("table@compaction commited F%s S%s D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), dropCnt, stats[1].duration)
+	resultSize := int(stats[1].write)
+	db.logf("table@compaction committed F%s S%s Ke·%d D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), b.kerrCnt, b.dropCnt, stats[1].duration)
 
 	// Save compaction stats
 	for i := range stats {
-		d.compStats[c.level+1].add(&stats[i])
+		db.compStats[c.level+1].add(&stats[i])
 	}
 }
 
-func (d *DB) tableRangeCompaction(level int, min, max []byte) {
-	s := d.s
-	s.logf("table@compaction range L%d %q:%q", level, min, max)
+func (db *DB) tableRangeCompaction(level int, umin, umax []byte) {
+	db.logf("table@compaction range L%d %q:%q", level, umin, umax)
 
 	if level >= 0 {
-		if c := s.getCompactionRange(level, min, max); c != nil {
-			d.tableCompaction(c, true)
+		if c := db.s.getCompactionRange(level, umin, umax); c != nil {
+			db.tableCompaction(c, true)
 		}
 	} else {
-		v := s.version_NB()
+		v := db.s.version()
 		m := 1
 		for i, t := range v.tables[1:] {
-			if t.isOverlaps(min, max, true, s.icmp) {
+			if t.overlaps(db.s.icmp, umin, umax, false) {
 				m = i + 1
 			}
 		}
+		v.release()
+
 		for level := 0; level < m; level++ {
-			if c := s.getCompactionRange(level, min, max); c != nil {
-				d.tableCompaction(c, true)
+			if c := db.s.getCompactionRange(level, umin, umax); c != nil {
+				db.tableCompaction(c, true)
 			}
 		}
 	}
 }
 
-func (d *DB) tableAutoCompaction() {
-	if c := d.s.pickCompaction(); c != nil {
-		d.tableCompaction(c, false)
+func (db *DB) tableAutoCompaction() {
+	if c := db.s.pickCompaction(); c != nil {
+		db.tableCompaction(c, false)
 	}
 }
 
-func (d *DB) tableNeedCompaction() bool {
-	return d.s.version_NB().needCompaction()
+func (db *DB) tableNeedCompaction() bool {
+	v := db.s.version()
+	defer v.release()
+	return v.needCompaction()
 }
 
-func (d *DB) pauseCompaction(ch chan<- struct{}) {
+func (db *DB) pauseCompaction(ch chan<- struct{}) {
 	select {
 	case ch <- struct{}{}:
-	case _, _ = <-d.closeC:
-		d.compactionExitTransact()
+	case _, _ = <-db.closeC:
+		db.compactionExitTransact()
 	}
 }
 
@@ -537,7 +665,12 @@ type cIdle struct {
 }
 
 func (r cIdle) ack(err error) {
-	r.ackC <- err
+	if r.ackC != nil {
+		defer func() {
+			recover()
+		}()
+		r.ackC <- err
+	}
 }
 
 type cRange struct {
@@ -547,56 +680,67 @@ type cRange struct {
 }
 
 func (r cRange) ack(err error) {
-	defer func() {
-		recover()
-	}()
 	if r.ackC != nil {
+		defer func() {
+			recover()
+		}()
 		r.ackC <- err
 	}
 }
 
-func (d *DB) compSendIdle(compC chan<- cCmd) error {
+// This will trigger auto compation and/or wait for all compaction to be done.
+func (db *DB) compSendIdle(compC chan<- cCmd) (err error) {
 	ch := make(chan error)
 	defer close(ch)
 	// Send cmd.
 	select {
 	case compC <- cIdle{ch}:
-	case err := <-d.compErrC:
-		return err
-	case _, _ = <-d.closeC:
+	case err = <-db.compErrC:
+		return
+	case _, _ = <-db.closeC:
 		return ErrClosed
 	}
 	// Wait cmd.
-	return <-ch
+	select {
+	case err = <-ch:
+	case err = <-db.compErrC:
+	case _, _ = <-db.closeC:
+		return ErrClosed
+	}
+	return err
 }
 
-func (d *DB) compSendRange(compC chan<- cCmd, level int, min, max []byte) (err error) {
+// This will trigger auto compaction but will not wait for it.
+func (db *DB) compSendTrigger(compC chan<- cCmd) {
+	select {
+	case compC <- cIdle{}:
+	default:
+	}
+}
+
+// Send range compaction request.
+func (db *DB) compSendRange(compC chan<- cCmd, level int, min, max []byte) (err error) {
 	ch := make(chan error)
 	defer close(ch)
 	// Send cmd.
 	select {
 	case compC <- cRange{level, min, max, ch}:
-	case err := <-d.compErrC:
+	case err := <-db.compErrC:
 		return err
-	case _, _ = <-d.closeC:
+	case _, _ = <-db.closeC:
 		return ErrClosed
 	}
 	// Wait cmd.
 	select {
-	case err = <-d.compErrC:
 	case err = <-ch:
+	case err = <-db.compErrC:
+	case _, _ = <-db.closeC:
+		return ErrClosed
 	}
 	return err
 }
 
-func (d *DB) compTrigger(compTriggerC chan struct{}) {
-	select {
-	case compTriggerC <- struct{}{}:
-	default:
-	}
-}
-
-func (d *DB) mCompaction() {
+func (db *DB) mCompaction() {
 	var x cCmd
 
 	defer func() {
@@ -608,24 +752,27 @@ func (d *DB) mCompaction() {
 		if x != nil {
 			x.ack(ErrClosed)
 		}
-		d.closeW.Done()
+		db.closeW.Done()
 	}()
 
 	for {
 		select {
-		case _, _ = <-d.closeC:
+		case x = <-db.mcompCmdC:
+			switch x.(type) {
+			case cIdle:
+				db.memCompaction()
+				x.ack(nil)
+				x = nil
+			default:
+				panic("leveldb: unknown command")
+			}
+		case _, _ = <-db.closeC:
 			return
-		case x = <-d.mcompCmdC:
-			d.memCompaction()
-			x.ack(nil)
-			x = nil
-		case <-d.mcompTriggerC:
-			d.memCompaction()
 		}
 	}
 }
 
-func (d *DB) tCompaction() {
+func (db *DB) tCompaction() {
 	var x cCmd
 	var ackQ []cCmd
 
@@ -642,19 +789,18 @@ func (d *DB) tCompaction() {
 		if x != nil {
 			x.ack(ErrClosed)
 		}
-		d.closeW.Done()
+		db.closeW.Done()
 	}()
 
 	for {
-		if d.tableNeedCompaction() {
+		if db.tableNeedCompaction() {
 			select {
-			case x = <-d.tcompCmdC:
-			case <-d.tcompTriggerC:
-			case _, _ = <-d.closeC:
-				return
-			case ch := <-d.tcompPauseC:
-				d.pauseCompaction(ch)
+			case x = <-db.tcompCmdC:
+			case ch := <-db.tcompPauseC:
+				db.pauseCompaction(ch)
 				continue
+			case _, _ = <-db.closeC:
+				return
 			default:
 			}
 		} else {
@@ -664,12 +810,11 @@ func (d *DB) tCompaction() {
 			}
 			ackQ = ackQ[:0]
 			select {
-			case x = <-d.tcompCmdC:
-			case <-d.tcompTriggerC:
-			case ch := <-d.tcompPauseC:
-				d.pauseCompaction(ch)
+			case x = <-db.tcompCmdC:
+			case ch := <-db.tcompPauseC:
+				db.pauseCompaction(ch)
 				continue
-			case _, _ = <-d.closeC:
+			case _, _ = <-db.closeC:
 				return
 			}
 		}
@@ -678,11 +823,13 @@ func (d *DB) tCompaction() {
 			case cIdle:
 				ackQ = append(ackQ, x)
 			case cRange:
-				d.tableRangeCompaction(cmd.level, cmd.min, cmd.max)
+				db.tableRangeCompaction(cmd.level, cmd.min, cmd.max)
 				x.ack(nil)
+			default:
+				panic("leveldb: unknown command")
 			}
 			x = nil
 		}
-		d.tableAutoCompaction()
+		db.tableAutoCompaction()
 	}
 }
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
index 9973a8fef555b0933fb4d14c8c1d0d76bdef1102..011a94a3513c930a1303033bc64ec9ea4073810e 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
@@ -8,7 +8,10 @@ package leveldb
 
 import (
 	"errors"
+	"math/rand"
 	"runtime"
+	"sync"
+	"sync/atomic"
 
 	"github.com/syndtr/goleveldb/leveldb/iterator"
 	"github.com/syndtr/goleveldb/leveldb/opt"
@@ -19,50 +22,69 @@ var (
 	errInvalidIkey = errors.New("leveldb: Iterator: invalid internal key")
 )
 
-func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
-	s := db.s
+type memdbReleaser struct {
+	once sync.Once
+	m    *memDB
+}
+
+func (mr *memdbReleaser) Release() {
+	mr.once.Do(func() {
+		mr.m.decref()
+	})
+}
 
+func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
 	em, fm := db.getMems()
-	v := s.version()
+	v := db.s.version()
 
 	ti := v.getIterators(slice, ro)
 	n := len(ti) + 2
 	i := make([]iterator.Iterator, 0, n)
-	i = append(i, em.NewIterator(slice))
+	emi := em.mdb.NewIterator(slice)
+	emi.SetReleaser(&memdbReleaser{m: em})
+	i = append(i, emi)
 	if fm != nil {
-		i = append(i, fm.NewIterator(slice))
+		fmi := fm.mdb.NewIterator(slice)
+		fmi.SetReleaser(&memdbReleaser{m: fm})
+		i = append(i, fmi)
 	}
 	i = append(i, ti...)
-	strict := s.o.GetStrict(opt.StrictIterator) || ro.GetStrict(opt.StrictIterator)
-	mi := iterator.NewMergedIterator(i, s.icmp, strict)
+	strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader)
+	mi := iterator.NewMergedIterator(i, db.s.icmp, strict)
 	mi.SetReleaser(&versionReleaser{v: v})
 	return mi
 }
 
 func (db *DB) newIterator(seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter {
-	var slice_ *util.Range
+	var islice *util.Range
 	if slice != nil {
-		slice_ = &util.Range{}
+		islice = &util.Range{}
 		if slice.Start != nil {
-			slice_.Start = newIKey(slice.Start, kMaxSeq, tSeek)
+			islice.Start = newIkey(slice.Start, kMaxSeq, ktSeek)
 		}
 		if slice.Limit != nil {
-			slice_.Limit = newIKey(slice.Limit, kMaxSeq, tSeek)
+			islice.Limit = newIkey(slice.Limit, kMaxSeq, ktSeek)
 		}
 	}
-	rawIter := db.newRawIterator(slice_, ro)
+	rawIter := db.newRawIterator(islice, ro)
 	iter := &dbIter{
+		db:     db,
 		icmp:   db.s.icmp,
 		iter:   rawIter,
 		seq:    seq,
-		strict: db.s.o.GetStrict(opt.StrictIterator) || ro.GetStrict(opt.StrictIterator),
+		strict: opt.GetStrict(db.s.o.Options, ro, opt.StrictReader),
 		key:    make([]byte, 0),
 		value:  make([]byte, 0),
 	}
+	atomic.AddInt32(&db.aliveIters, 1)
 	runtime.SetFinalizer(iter, (*dbIter).Release)
 	return iter
 }
 
+func (db *DB) iterSamplingRate() int {
+	return rand.Intn(2 * db.s.o.GetIteratorSamplingRate())
+}
+
 type dir int
 
 const (
@@ -75,16 +97,27 @@ const (
 
 // dbIter represent an interator states over a database session.
 type dbIter struct {
+	db     *DB
 	icmp   *iComparer
 	iter   iterator.Iterator
 	seq    uint64
 	strict bool
 
-	dir      dir
-	key      []byte
-	value    []byte
-	err      error
-	releaser util.Releaser
+	smaplingGap int
+	dir         dir
+	key         []byte
+	value       []byte
+	err         error
+	releaser    util.Releaser
+}
+
+func (i *dbIter) sampleSeek() {
+	ikey := i.iter.Key()
+	i.smaplingGap -= len(ikey) + len(i.iter.Value())
+	for i.smaplingGap < 0 {
+		i.smaplingGap += i.db.iterSamplingRate()
+		i.db.sampleSeek(ikey)
+	}
 }
 
 func (i *dbIter) setErr(err error) {
@@ -144,7 +177,7 @@ func (i *dbIter) Seek(key []byte) bool {
 		return false
 	}
 
-	ikey := newIKey(key, i.seq, tSeek)
+	ikey := newIkey(key, i.seq, ktSeek)
 	if i.iter.Seek(ikey) {
 		i.dir = dirSOI
 		return i.next()
@@ -156,15 +189,15 @@ func (i *dbIter) Seek(key []byte) bool {
 
 func (i *dbIter) next() bool {
 	for {
-		ukey, seq, t, ok := parseIkey(i.iter.Key())
-		if ok {
+		if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
+			i.sampleSeek()
 			if seq <= i.seq {
-				switch t {
-				case tDel:
+				switch kt {
+				case ktDel:
 					// Skip deleted key.
 					i.key = append(i.key[:0], ukey...)
 					i.dir = dirForward
-				case tVal:
+				case ktVal:
 					if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 {
 						i.key = append(i.key[:0], ukey...)
 						i.value = append(i.value[:0], i.iter.Value()...)
@@ -174,7 +207,7 @@ func (i *dbIter) next() bool {
 				}
 			}
 		} else if i.strict {
-			i.setErr(errInvalidIkey)
+			i.setErr(kerr)
 			break
 		}
 		if !i.iter.Next() {
@@ -207,20 +240,20 @@ func (i *dbIter) prev() bool {
 	del := true
 	if i.iter.Valid() {
 		for {
-			ukey, seq, t, ok := parseIkey(i.iter.Key())
-			if ok {
+			if ukey, seq, kt, kerr := parseIkey(i.iter.Key()); kerr == nil {
+				i.sampleSeek()
 				if seq <= i.seq {
 					if !del && i.icmp.uCompare(ukey, i.key) < 0 {
 						return true
 					}
-					del = (t == tDel)
+					del = (kt == ktDel)
 					if !del {
 						i.key = append(i.key[:0], ukey...)
 						i.value = append(i.value[:0], i.iter.Value()...)
 					}
 				}
 			} else if i.strict {
-				i.setErr(errInvalidIkey)
+				i.setErr(kerr)
 				return false
 			}
 			if !i.iter.Prev() {
@@ -249,13 +282,13 @@ func (i *dbIter) Prev() bool {
 		return i.Last()
 	case dirForward:
 		for i.iter.Prev() {
-			ukey, _, _, ok := parseIkey(i.iter.Key())
-			if ok {
+			if ukey, _, _, kerr := parseIkey(i.iter.Key()); kerr == nil {
+				i.sampleSeek()
 				if i.icmp.uCompare(ukey, i.key) < 0 {
 					goto cont
 				}
 			} else if i.strict {
-				i.setErr(errInvalidIkey)
+				i.setErr(kerr)
 				return false
 			}
 		}
@@ -289,6 +322,7 @@ func (i *dbIter) Release() {
 
 		if i.releaser != nil {
 			i.releaser.Release()
+			i.releaser = nil
 		}
 
 		i.dir = dirReleased
@@ -296,13 +330,19 @@ func (i *dbIter) Release() {
 		i.value = nil
 		i.iter.Release()
 		i.iter = nil
+		atomic.AddInt32(&i.db.aliveIters, -1)
+		i.db = nil
 	}
 }
 
 func (i *dbIter) SetReleaser(releaser util.Releaser) {
-	if i.dir != dirReleased {
-		i.releaser = releaser
+	if i.dir == dirReleased {
+		panic(util.ErrReleased)
+	}
+	if i.releaser != nil && releaser != nil {
+		panic(util.ErrHasReleaser)
 	}
+	i.releaser = releaser
 }
 
 func (i *dbIter) Error() error {
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go
index 225b7cd5e2a9698c15ce3dd15ccd5421e40fd4ec..0372848ff1e4f62ececce67d9abe04d8e08121bd 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go
@@ -7,8 +7,11 @@
 package leveldb
 
 import (
+	"container/list"
+	"fmt"
 	"runtime"
 	"sync"
+	"sync/atomic"
 
 	"github.com/syndtr/goleveldb/leveldb/iterator"
 	"github.com/syndtr/goleveldb/leveldb/opt"
@@ -18,51 +21,41 @@ import (
 type snapshotElement struct {
 	seq uint64
 	ref int
-	// Next and previous pointers in the doubly-linked list of elements.
-	next, prev *snapshotElement
-}
-
-// Initialize the snapshot.
-func (db *DB) initSnapshot() {
-	db.snapsRoot.next = &db.snapsRoot
-	db.snapsRoot.prev = &db.snapsRoot
+	e   *list.Element
 }
 
 // Acquires a snapshot, based on latest sequence.
 func (db *DB) acquireSnapshot() *snapshotElement {
 	db.snapsMu.Lock()
+	defer db.snapsMu.Unlock()
+
 	seq := db.getSeq()
-	elem := db.snapsRoot.prev
-	if elem == &db.snapsRoot || elem.seq != seq {
-		at := db.snapsRoot.prev
-		next := at.next
-		elem = &snapshotElement{
-			seq:  seq,
-			prev: at,
-			next: next,
+
+	if e := db.snapsList.Back(); e != nil {
+		se := e.Value.(*snapshotElement)
+		if se.seq == seq {
+			se.ref++
+			return se
+		} else if seq < se.seq {
+			panic("leveldb: sequence number is not increasing")
 		}
-		at.next = elem
-		next.prev = elem
 	}
-	elem.ref++
-	db.snapsMu.Unlock()
-	return elem
+	se := &snapshotElement{seq: seq, ref: 1}
+	se.e = db.snapsList.PushBack(se)
+	return se
 }
 
 // Releases given snapshot element.
-func (db *DB) releaseSnapshot(elem *snapshotElement) {
-	if !db.isClosed() {
-		db.snapsMu.Lock()
-		elem.ref--
-		if elem.ref == 0 {
-			elem.prev.next = elem.next
-			elem.next.prev = elem.prev
-			elem.next = nil
-			elem.prev = nil
-		} else if elem.ref < 0 {
-			panic("leveldb: Snapshot: negative element reference")
-		}
-		db.snapsMu.Unlock()
+func (db *DB) releaseSnapshot(se *snapshotElement) {
+	db.snapsMu.Lock()
+	defer db.snapsMu.Unlock()
+
+	se.ref--
+	if se.ref == 0 {
+		db.snapsList.Remove(se.e)
+		se.e = nil
+	} else if se.ref < 0 {
+		panic("leveldb: Snapshot: negative element reference")
 	}
 }
 
@@ -70,10 +63,11 @@ func (db *DB) releaseSnapshot(elem *snapshotElement) {
 func (db *DB) minSeq() uint64 {
 	db.snapsMu.Lock()
 	defer db.snapsMu.Unlock()
-	elem := db.snapsRoot.prev
-	if elem != &db.snapsRoot {
-		return elem.seq
+
+	if e := db.snapsList.Front(); e != nil {
+		return e.Value.(*snapshotElement).seq
 	}
+
 	return db.getSeq()
 }
 
@@ -81,38 +75,59 @@ func (db *DB) minSeq() uint64 {
 type Snapshot struct {
 	db       *DB
 	elem     *snapshotElement
-	mu       sync.Mutex
+	mu       sync.RWMutex
 	released bool
 }
 
 // Creates new snapshot object.
 func (db *DB) newSnapshot() *Snapshot {
-	p := &Snapshot{
+	snap := &Snapshot{
 		db:   db,
 		elem: db.acquireSnapshot(),
 	}
-	runtime.SetFinalizer(p, (*Snapshot).Release)
-	return p
+	atomic.AddInt32(&db.aliveSnaps, 1)
+	runtime.SetFinalizer(snap, (*Snapshot).Release)
+	return snap
+}
+
+func (snap *Snapshot) String() string {
+	return fmt.Sprintf("leveldb.Snapshot{%d}", snap.elem.seq)
 }
 
 // Get gets the value for the given key. It returns ErrNotFound if
-// the DB does not contain the key.
+// the DB does not contains the key.
 //
 // The caller should not modify the contents of the returned slice, but
 // it is safe to modify the contents of the argument after Get returns.
-func (p *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
-	db := p.db
-	err = db.ok()
+func (snap *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
+	err = snap.db.ok()
 	if err != nil {
 		return
 	}
-	p.mu.Lock()
-	defer p.mu.Unlock()
-	if p.released {
+	snap.mu.RLock()
+	defer snap.mu.RUnlock()
+	if snap.released {
 		err = ErrSnapshotReleased
 		return
 	}
-	return db.get(key, p.elem.seq, ro)
+	return snap.db.get(key, snap.elem.seq, ro)
+}
+
+// Has returns true if the DB does contains the given key.
+//
+// It is safe to modify the contents of the argument after Get returns.
+func (snap *Snapshot) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) {
+	err = snap.db.ok()
+	if err != nil {
+		return
+	}
+	snap.mu.RLock()
+	defer snap.mu.RUnlock()
+	if snap.released {
+		err = ErrSnapshotReleased
+		return
+	}
+	return snap.db.has(key, snap.elem.seq, ro)
 }
 
 // NewIterator returns an iterator for the snapshot of the uderlying DB.
@@ -132,17 +147,18 @@ func (p *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error
 // iterator would be still valid until released.
 //
 // Also read Iterator documentation of the leveldb/iterator package.
-func (p *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
-	db := p.db
-	if err := db.ok(); err != nil {
+func (snap *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+	if err := snap.db.ok(); err != nil {
 		return iterator.NewEmptyIterator(err)
 	}
-	p.mu.Lock()
-	defer p.mu.Unlock()
-	if p.released {
+	snap.mu.Lock()
+	defer snap.mu.Unlock()
+	if snap.released {
 		return iterator.NewEmptyIterator(ErrSnapshotReleased)
 	}
-	return db.newIterator(p.elem.seq, slice, ro)
+	// Since iterator already hold version ref, it doesn't need to
+	// hold snapshot ref.
+	return snap.db.newIterator(snap.elem.seq, slice, ro)
 }
 
 // Release releases the snapshot. This will not release any returned
@@ -150,16 +166,18 @@ func (p *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.
 // underlying DB is closed.
 //
 // Other methods should not be called after the snapshot has been released.
-func (p *Snapshot) Release() {
-	p.mu.Lock()
-	if !p.released {
+func (snap *Snapshot) Release() {
+	snap.mu.Lock()
+	defer snap.mu.Unlock()
+
+	if !snap.released {
 		// Clear the finalizer.
-		runtime.SetFinalizer(p, nil)
+		runtime.SetFinalizer(snap, nil)
 
-		p.released = true
-		p.db.releaseSnapshot(p.elem)
-		p.db = nil
-		p.elem = nil
+		snap.released = true
+		snap.db.releaseSnapshot(snap.elem)
+		atomic.AddInt32(&snap.db.aliveSnaps, -1)
+		snap.db = nil
+		snap.elem = nil
 	}
-	p.mu.Unlock()
 }
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go
index a137061422bec83c0b5e179c9bc6260dfe7b9495..d4db9d6dd36b02fd7bb3c1dcc6ddf74812b0a661 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go
@@ -8,106 +8,203 @@ package leveldb
 
 import (
 	"sync/atomic"
+	"time"
 
 	"github.com/syndtr/goleveldb/leveldb/journal"
 	"github.com/syndtr/goleveldb/leveldb/memdb"
 )
 
+type memDB struct {
+	db  *DB
+	mdb *memdb.DB
+	ref int32
+}
+
+func (m *memDB) incref() {
+	atomic.AddInt32(&m.ref, 1)
+}
+
+func (m *memDB) decref() {
+	if ref := atomic.AddInt32(&m.ref, -1); ref == 0 {
+		// Only put back memdb with std capacity.
+		if m.mdb.Capacity() == m.db.s.o.GetWriteBuffer() {
+			m.mdb.Reset()
+			m.db.mpoolPut(m.mdb)
+		}
+		m.db = nil
+		m.mdb = nil
+	} else if ref < 0 {
+		panic("negative memdb ref")
+	}
+}
+
 // Get latest sequence number.
-func (d *DB) getSeq() uint64 {
-	return atomic.LoadUint64(&d.seq)
+func (db *DB) getSeq() uint64 {
+	return atomic.LoadUint64(&db.seq)
 }
 
 // Atomically adds delta to seq.
-func (d *DB) addSeq(delta uint64) {
-	atomic.AddUint64(&d.seq, delta)
+func (db *DB) addSeq(delta uint64) {
+	atomic.AddUint64(&db.seq, delta)
+}
+
+func (db *DB) sampleSeek(ikey iKey) {
+	v := db.s.version()
+	if v.sampleSeek(ikey) {
+		// Trigger table compaction.
+		db.compSendTrigger(db.tcompCmdC)
+	}
+	v.release()
+}
+
+func (db *DB) mpoolPut(mem *memdb.DB) {
+	defer func() {
+		recover()
+	}()
+	select {
+	case db.memPool <- mem:
+	default:
+	}
+}
+
+func (db *DB) mpoolGet() *memdb.DB {
+	select {
+	case mem := <-db.memPool:
+		return mem
+	default:
+		return nil
+	}
+}
+
+func (db *DB) mpoolDrain() {
+	ticker := time.NewTicker(30 * time.Second)
+	for {
+		select {
+		case <-ticker.C:
+			select {
+			case <-db.memPool:
+			default:
+			}
+		case _, _ = <-db.closeC:
+			close(db.memPool)
+			return
+		}
+	}
 }
 
 // Create new memdb and froze the old one; need external synchronization.
 // newMem only called synchronously by the writer.
-func (d *DB) newMem(n int) (mem *memdb.DB, err error) {
-	s := d.s
-
-	num := s.allocFileNum()
-	file := s.getJournalFile(num)
+func (db *DB) newMem(n int) (mem *memDB, err error) {
+	num := db.s.allocFileNum()
+	file := db.s.getJournalFile(num)
 	w, err := file.Create()
 	if err != nil {
-		s.reuseFileNum(num)
+		db.s.reuseFileNum(num)
 		return
 	}
-	d.memMu.Lock()
-	if d.journal == nil {
-		d.journal = journal.NewWriter(w)
+
+	db.memMu.Lock()
+	defer db.memMu.Unlock()
+
+	if db.frozenMem != nil {
+		panic("still has frozen mem")
+	}
+
+	if db.journal == nil {
+		db.journal = journal.NewWriter(w)
 	} else {
-		d.journal.Reset(w)
-		d.journalWriter.Close()
-		d.frozenJournalFile = d.journalFile
-	}
-	d.journalWriter = w
-	d.journalFile = file
-	d.frozenMem = d.mem
-	d.mem = memdb.New(s.icmp, maxInt(d.s.o.GetWriteBuffer(), n))
-	mem = d.mem
-	// The seq only incremented by the writer.
-	d.frozenSeq = d.seq
-	d.memMu.Unlock()
+		db.journal.Reset(w)
+		db.journalWriter.Close()
+		db.frozenJournalFile = db.journalFile
+	}
+	db.journalWriter = w
+	db.journalFile = file
+	db.frozenMem = db.mem
+	mdb := db.mpoolGet()
+	if mdb == nil || mdb.Capacity() < n {
+		mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n))
+	}
+	mem = &memDB{
+		db:  db,
+		mdb: mdb,
+		ref: 2,
+	}
+	db.mem = mem
+	// The seq only incremented by the writer. And whoever called newMem
+	// should hold write lock, so no need additional synchronization here.
+	db.frozenSeq = db.seq
 	return
 }
 
 // Get all memdbs.
-func (d *DB) getMems() (e *memdb.DB, f *memdb.DB) {
-	d.memMu.RLock()
-	defer d.memMu.RUnlock()
-	return d.mem, d.frozenMem
+func (db *DB) getMems() (e, f *memDB) {
+	db.memMu.RLock()
+	defer db.memMu.RUnlock()
+	if db.mem == nil {
+		panic("nil effective mem")
+	}
+	db.mem.incref()
+	if db.frozenMem != nil {
+		db.frozenMem.incref()
+	}
+	return db.mem, db.frozenMem
 }
 
 // Get frozen memdb.
-func (d *DB) getEffectiveMem() *memdb.DB {
-	d.memMu.RLock()
-	defer d.memMu.RUnlock()
-	return d.mem
+func (db *DB) getEffectiveMem() *memDB {
+	db.memMu.RLock()
+	defer db.memMu.RUnlock()
+	if db.mem == nil {
+		panic("nil effective mem")
+	}
+	db.mem.incref()
+	return db.mem
 }
 
 // Check whether we has frozen memdb.
-func (d *DB) hasFrozenMem() bool {
-	d.memMu.RLock()
-	defer d.memMu.RUnlock()
-	return d.frozenMem != nil
+func (db *DB) hasFrozenMem() bool {
+	db.memMu.RLock()
+	defer db.memMu.RUnlock()
+	return db.frozenMem != nil
 }
 
 // Get frozen memdb.
-func (d *DB) getFrozenMem() *memdb.DB {
-	d.memMu.RLock()
-	defer d.memMu.RUnlock()
-	return d.frozenMem
+func (db *DB) getFrozenMem() *memDB {
+	db.memMu.RLock()
+	defer db.memMu.RUnlock()
+	if db.frozenMem != nil {
+		db.frozenMem.incref()
+	}
+	return db.frozenMem
 }
 
 // Drop frozen memdb; assume that frozen memdb isn't nil.
-func (d *DB) dropFrozenMem() {
-	d.memMu.Lock()
-	if err := d.frozenJournalFile.Remove(); err != nil {
-		d.s.logf("journal@remove removing @%d %q", d.frozenJournalFile.Num(), err)
+func (db *DB) dropFrozenMem() {
+	db.memMu.Lock()
+	if err := db.frozenJournalFile.Remove(); err != nil {
+		db.logf("journal@remove removing @%d %q", db.frozenJournalFile.Num(), err)
 	} else {
-		d.s.logf("journal@remove removed @%d", d.frozenJournalFile.Num())
+		db.logf("journal@remove removed @%d", db.frozenJournalFile.Num())
 	}
-	d.frozenJournalFile = nil
-	d.frozenMem = nil
-	d.memMu.Unlock()
+	db.frozenJournalFile = nil
+	db.frozenMem.decref()
+	db.frozenMem = nil
+	db.memMu.Unlock()
 }
 
 // Set closed flag; return true if not already closed.
-func (d *DB) setClosed() bool {
-	return atomic.CompareAndSwapUint32(&d.closed, 0, 1)
+func (db *DB) setClosed() bool {
+	return atomic.CompareAndSwapUint32(&db.closed, 0, 1)
 }
 
 // Check whether DB was closed.
-func (d *DB) isClosed() bool {
-	return atomic.LoadUint32(&d.closed) != 0
+func (db *DB) isClosed() bool {
+	return atomic.LoadUint32(&db.closed) != 0
 }
 
 // Check read ok status.
-func (d *DB) ok() error {
-	if d.isClosed() {
+func (db *DB) ok() error {
+	if db.isClosed() {
 		return ErrClosed
 	}
 	return nil
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
index 5de7d9723aecec4ee1fc8ee4d2b82f42a5f91945..38bfbf1ea9cc7b7570a426d04aad6aaf3b76f6e3 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
@@ -7,6 +7,10 @@
 package leveldb
 
 import (
+	"bytes"
+	"container/list"
+	crand "crypto/rand"
+	"encoding/binary"
 	"fmt"
 	"math/rand"
 	"os"
@@ -20,6 +24,7 @@ import (
 	"unsafe"
 
 	"github.com/syndtr/goleveldb/leveldb/comparer"
+	"github.com/syndtr/goleveldb/leveldb/errors"
 	"github.com/syndtr/goleveldb/leveldb/filter"
 	"github.com/syndtr/goleveldb/leveldb/iterator"
 	"github.com/syndtr/goleveldb/leveldb/opt"
@@ -148,25 +153,29 @@ func (h *dbHarness) maxNextLevelOverlappingBytes(want uint64) {
 	t := h.t
 	db := h.db
 
-	var res uint64
+	var (
+		maxOverlaps uint64
+		maxLevel    int
+	)
 	v := db.s.version()
 	for i, tt := range v.tables[1 : len(v.tables)-1] {
 		level := i + 1
 		next := v.tables[level+1]
 		for _, t := range tt {
-			var r tFiles
-			min, max := t.min.ukey(), t.max.ukey()
-			next.getOverlaps(min, max, &r, true, db.s.icmp.ucmp)
+			r := next.getOverlaps(nil, db.s.icmp, t.imin.ukey(), t.imax.ukey(), false)
 			sum := r.size()
-			if sum > res {
-				res = sum
+			if sum > maxOverlaps {
+				maxOverlaps = sum
+				maxLevel = level
 			}
 		}
 	}
 	v.release()
 
-	if res > want {
-		t.Errorf("next level overlapping bytes is more than %d, got=%d", want, res)
+	if maxOverlaps > want {
+		t.Errorf("next level most overlapping bytes is more than %d, got=%d level=%d", want, maxOverlaps, maxLevel)
+	} else {
+		t.Logf("next level most overlapping bytes is %d, level=%d want=%d", maxOverlaps, maxLevel, want)
 	}
 }
 
@@ -239,7 +248,7 @@ func (h *dbHarness) allEntriesFor(key, want string) {
 	db := h.db
 	s := db.s
 
-	ikey := newIKey([]byte(key), kMaxSeq, tVal)
+	ikey := newIkey([]byte(key), kMaxSeq, ktVal)
 	iter := db.newRawIterator(nil, nil)
 	if !iter.Seek(ikey) && iter.Error() != nil {
 		t.Error("AllEntries: error during seek, err: ", iter.Error())
@@ -248,19 +257,18 @@ func (h *dbHarness) allEntriesFor(key, want string) {
 	res := "[ "
 	first := true
 	for iter.Valid() {
-		rkey := iKey(iter.Key())
-		if _, t, ok := rkey.parseNum(); ok {
-			if s.icmp.uCompare(ikey.ukey(), rkey.ukey()) != 0 {
+		if ukey, _, kt, kerr := parseIkey(iter.Key()); kerr == nil {
+			if s.icmp.uCompare(ikey.ukey(), ukey) != 0 {
 				break
 			}
 			if !first {
 				res += ", "
 			}
 			first = false
-			switch t {
-			case tVal:
+			switch kt {
+			case ktVal:
 				res += string(iter.Value())
-			case tDel:
+			case ktDel:
 				res += "DEL"
 			}
 		} else {
@@ -325,6 +333,8 @@ func (h *dbHarness) compactMem() {
 	t := h.t
 	db := h.db
 
+	t.Log("starting memdb compaction")
+
 	db.writeLockC <- struct{}{}
 	defer func() {
 		<-db.writeLockC
@@ -340,6 +350,8 @@ func (h *dbHarness) compactMem() {
 	if h.totalTables() == 0 {
 		t.Error("zero tables after mem compaction")
 	}
+
+	t.Log("memdb compaction done")
 }
 
 func (h *dbHarness) compactRangeAtErr(level int, min, max string, wanterr bool) {
@@ -354,6 +366,8 @@ func (h *dbHarness) compactRangeAtErr(level int, min, max string, wanterr bool)
 		_max = []byte(max)
 	}
 
+	t.Logf("starting table range compaction: level=%d, min=%q, max=%q", level, min, max)
+
 	if err := db.compSendRange(db.tcompCmdC, level, _min, _max); err != nil {
 		if wanterr {
 			t.Log("CompactRangeAt: got error (expected): ", err)
@@ -363,6 +377,8 @@ func (h *dbHarness) compactRangeAtErr(level int, min, max string, wanterr bool)
 	} else if wanterr {
 		t.Error("CompactRangeAt: expect error")
 	}
+
+	t.Log("table range compaction done")
 }
 
 func (h *dbHarness) compactRangeAt(level int, min, max string) {
@@ -373,6 +389,8 @@ func (h *dbHarness) compactRange(min, max string) {
 	t := h.t
 	db := h.db
 
+	t.Logf("starting DB range compaction: min=%q, max=%q", min, max)
+
 	var r util.Range
 	if min != "" {
 		r.Start = []byte(min)
@@ -383,21 +401,25 @@ func (h *dbHarness) compactRange(min, max string) {
 	if err := db.CompactRange(r); err != nil {
 		t.Error("CompactRange: got error: ", err)
 	}
-}
 
-func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) {
-	t := h.t
-	db := h.db
+	t.Log("DB range compaction done")
+}
 
-	s, err := db.SizeOf([]util.Range{
+func (h *dbHarness) sizeOf(start, limit string) uint64 {
+	sz, err := h.db.SizeOf([]util.Range{
 		{[]byte(start), []byte(limit)},
 	})
 	if err != nil {
-		t.Error("SizeOf: got error: ", err)
+		h.t.Error("SizeOf: got error: ", err)
 	}
-	if s.Sum() < low || s.Sum() > hi {
-		t.Errorf("sizeof %q to %q not in range, want %d - %d, got %d",
-			shorten(start), shorten(limit), low, hi, s.Sum())
+	return sz.Sum()
+}
+
+func (h *dbHarness) sizeAssert(start, limit string, low, hi uint64) {
+	sz := h.sizeOf(start, limit)
+	if sz < low || sz > hi {
+		h.t.Errorf("sizeOf %q to %q not in range, want %d - %d, got %d",
+			shorten(start), shorten(limit), low, hi, sz)
 	}
 }
 
@@ -504,13 +526,13 @@ func Test_FieldsAligned(t *testing.T) {
 	p1 := new(DB)
 	testAligned(t, "DB.seq", unsafe.Offsetof(p1.seq))
 	p2 := new(session)
-	testAligned(t, "session.stFileNum", unsafe.Offsetof(p2.stFileNum))
+	testAligned(t, "session.stNextFileNum", unsafe.Offsetof(p2.stNextFileNum))
 	testAligned(t, "session.stJournalNum", unsafe.Offsetof(p2.stJournalNum))
 	testAligned(t, "session.stPrevJournalNum", unsafe.Offsetof(p2.stPrevJournalNum))
-	testAligned(t, "session.stSeq", unsafe.Offsetof(p2.stSeq))
+	testAligned(t, "session.stSeqNum", unsafe.Offsetof(p2.stSeqNum))
 }
 
-func TestDb_Locking(t *testing.T) {
+func TestDB_Locking(t *testing.T) {
 	h := newDbHarness(t)
 	defer h.stor.Close()
 	h.openAssert(false)
@@ -518,7 +540,7 @@ func TestDb_Locking(t *testing.T) {
 	h.openAssert(true)
 }
 
-func TestDb_Empty(t *testing.T) {
+func TestDB_Empty(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		h.get("foo", false)
 
@@ -527,7 +549,7 @@ func TestDb_Empty(t *testing.T) {
 	})
 }
 
-func TestDb_ReadWrite(t *testing.T) {
+func TestDB_ReadWrite(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		h.put("foo", "v1")
 		h.getVal("foo", "v1")
@@ -542,7 +564,7 @@ func TestDb_ReadWrite(t *testing.T) {
 	})
 }
 
-func TestDb_PutDeleteGet(t *testing.T) {
+func TestDB_PutDeleteGet(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		h.put("foo", "v1")
 		h.getVal("foo", "v1")
@@ -556,7 +578,7 @@ func TestDb_PutDeleteGet(t *testing.T) {
 	})
 }
 
-func TestDb_EmptyBatch(t *testing.T) {
+func TestDB_EmptyBatch(t *testing.T) {
 	h := newDbHarness(t)
 	defer h.close()
 
@@ -568,7 +590,7 @@ func TestDb_EmptyBatch(t *testing.T) {
 	h.get("foo", false)
 }
 
-func TestDb_GetFromFrozen(t *testing.T) {
+func TestDB_GetFromFrozen(t *testing.T) {
 	h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100100})
 	defer h.close()
 
@@ -594,7 +616,7 @@ func TestDb_GetFromFrozen(t *testing.T) {
 	h.get("k2", true)
 }
 
-func TestDb_GetFromTable(t *testing.T) {
+func TestDB_GetFromTable(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		h.put("foo", "v1")
 		h.compactMem()
@@ -602,7 +624,7 @@ func TestDb_GetFromTable(t *testing.T) {
 	})
 }
 
-func TestDb_GetSnapshot(t *testing.T) {
+func TestDB_GetSnapshot(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		bar := strings.Repeat("b", 200)
 		h.put("foo", "v1")
@@ -636,7 +658,7 @@ func TestDb_GetSnapshot(t *testing.T) {
 	})
 }
 
-func TestDb_GetLevel0Ordering(t *testing.T) {
+func TestDB_GetLevel0Ordering(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		for i := 0; i < 4; i++ {
 			h.put("bar", fmt.Sprintf("b%d", i))
@@ -659,7 +681,7 @@ func TestDb_GetLevel0Ordering(t *testing.T) {
 	})
 }
 
-func TestDb_GetOrderedByLevels(t *testing.T) {
+func TestDB_GetOrderedByLevels(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		h.put("foo", "v1")
 		h.compactMem()
@@ -671,7 +693,7 @@ func TestDb_GetOrderedByLevels(t *testing.T) {
 	})
 }
 
-func TestDb_GetPicksCorrectFile(t *testing.T) {
+func TestDB_GetPicksCorrectFile(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		// Arrange to have multiple files in a non-level-0 level.
 		h.put("a", "va")
@@ -695,7 +717,7 @@ func TestDb_GetPicksCorrectFile(t *testing.T) {
 	})
 }
 
-func TestDb_GetEncountersEmptyLevel(t *testing.T) {
+func TestDB_GetEncountersEmptyLevel(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		// Arrange for the following to happen:
 		//   * sstable A in level 0
@@ -750,7 +772,7 @@ func TestDb_GetEncountersEmptyLevel(t *testing.T) {
 	})
 }
 
-func TestDb_IterMultiWithDelete(t *testing.T) {
+func TestDB_IterMultiWithDelete(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		h.put("a", "va")
 		h.put("b", "vb")
@@ -776,7 +798,7 @@ func TestDb_IterMultiWithDelete(t *testing.T) {
 	})
 }
 
-func TestDb_IteratorPinsRef(t *testing.T) {
+func TestDB_IteratorPinsRef(t *testing.T) {
 	h := newDbHarness(t)
 	defer h.close()
 
@@ -800,7 +822,7 @@ func TestDb_IteratorPinsRef(t *testing.T) {
 	iter.Release()
 }
 
-func TestDb_Recover(t *testing.T) {
+func TestDB_Recover(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		h.put("foo", "v1")
 		h.put("baz", "v5")
@@ -822,7 +844,7 @@ func TestDb_Recover(t *testing.T) {
 	})
 }
 
-func TestDb_RecoverWithEmptyJournal(t *testing.T) {
+func TestDB_RecoverWithEmptyJournal(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		h.put("foo", "v1")
 		h.put("foo", "v2")
@@ -836,7 +858,7 @@ func TestDb_RecoverWithEmptyJournal(t *testing.T) {
 	})
 }
 
-func TestDb_RecoverDuringMemtableCompaction(t *testing.T) {
+func TestDB_RecoverDuringMemtableCompaction(t *testing.T) {
 	truno(t, &opt.Options{WriteBuffer: 1000000}, func(h *dbHarness) {
 
 		h.stor.DelaySync(storage.TypeTable)
@@ -852,7 +874,7 @@ func TestDb_RecoverDuringMemtableCompaction(t *testing.T) {
 	})
 }
 
-func TestDb_MinorCompactionsHappen(t *testing.T) {
+func TestDB_MinorCompactionsHappen(t *testing.T) {
 	h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 10000})
 	defer h.close()
 
@@ -876,7 +898,7 @@ func TestDb_MinorCompactionsHappen(t *testing.T) {
 	}
 }
 
-func TestDb_RecoverWithLargeJournal(t *testing.T) {
+func TestDB_RecoverWithLargeJournal(t *testing.T) {
 	h := newDbHarness(t)
 	defer h.close()
 
@@ -901,7 +923,7 @@ func TestDb_RecoverWithLargeJournal(t *testing.T) {
 	v.release()
 }
 
-func TestDb_CompactionsGenerateMultipleFiles(t *testing.T) {
+func TestDB_CompactionsGenerateMultipleFiles(t *testing.T) {
 	h := newDbHarnessWopt(t, &opt.Options{
 		WriteBuffer: 10000000,
 		Compression: opt.NoCompression,
@@ -939,11 +961,11 @@ func TestDb_CompactionsGenerateMultipleFiles(t *testing.T) {
 	}
 }
 
-func TestDb_RepeatedWritesToSameKey(t *testing.T) {
+func TestDB_RepeatedWritesToSameKey(t *testing.T) {
 	h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000})
 	defer h.close()
 
-	maxTables := kNumLevels + kL0_StopWritesTrigger
+	maxTables := h.o.GetNumLevel() + h.o.GetWriteL0PauseTrigger()
 
 	value := strings.Repeat("v", 2*h.o.GetWriteBuffer())
 	for i := 0; i < 5*maxTables; i++ {
@@ -955,13 +977,13 @@ func TestDb_RepeatedWritesToSameKey(t *testing.T) {
 	}
 }
 
-func TestDb_RepeatedWritesToSameKeyAfterReopen(t *testing.T) {
+func TestDB_RepeatedWritesToSameKeyAfterReopen(t *testing.T) {
 	h := newDbHarnessWopt(t, &opt.Options{WriteBuffer: 100000})
 	defer h.close()
 
 	h.reopenDB()
 
-	maxTables := kNumLevels + kL0_StopWritesTrigger
+	maxTables := h.o.GetNumLevel() + h.o.GetWriteL0PauseTrigger()
 
 	value := strings.Repeat("v", 2*h.o.GetWriteBuffer())
 	for i := 0; i < 5*maxTables; i++ {
@@ -973,11 +995,11 @@ func TestDb_RepeatedWritesToSameKeyAfterReopen(t *testing.T) {
 	}
 }
 
-func TestDb_SparseMerge(t *testing.T) {
+func TestDB_SparseMerge(t *testing.T) {
 	h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression})
 	defer h.close()
 
-	h.putMulti(kNumLevels, "A", "Z")
+	h.putMulti(h.o.GetNumLevel(), "A", "Z")
 
 	// Suppose there is:
 	//    small amount of data with prefix A
@@ -1001,6 +1023,7 @@ func TestDb_SparseMerge(t *testing.T) {
 	h.put("C", "vc2")
 	h.compactMem()
 
+	h.waitCompaction()
 	h.maxNextLevelOverlappingBytes(20 * 1048576)
 	h.compactRangeAt(0, "", "")
 	h.waitCompaction()
@@ -1010,7 +1033,7 @@ func TestDb_SparseMerge(t *testing.T) {
 	h.maxNextLevelOverlappingBytes(20 * 1048576)
 }
 
-func TestDb_SizeOf(t *testing.T) {
+func TestDB_SizeOf(t *testing.T) {
 	h := newDbHarnessWopt(t, &opt.Options{
 		Compression: opt.NoCompression,
 		WriteBuffer: 10000000,
@@ -1060,7 +1083,7 @@ func TestDb_SizeOf(t *testing.T) {
 	}
 }
 
-func TestDb_SizeOf_MixOfSmallAndLarge(t *testing.T) {
+func TestDB_SizeOf_MixOfSmallAndLarge(t *testing.T) {
 	h := newDbHarnessWopt(t, &opt.Options{Compression: opt.NoCompression})
 	defer h.close()
 
@@ -1098,7 +1121,7 @@ func TestDb_SizeOf_MixOfSmallAndLarge(t *testing.T) {
 	}
 }
 
-func TestDb_Snapshot(t *testing.T) {
+func TestDB_Snapshot(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		h.put("foo", "v1")
 		s1 := h.getSnapshot()
@@ -1127,13 +1150,51 @@ func TestDb_Snapshot(t *testing.T) {
 	})
 }
 
-func TestDb_HiddenValuesAreRemoved(t *testing.T) {
+func TestDB_SnapshotList(t *testing.T) {
+	db := &DB{snapsList: list.New()}
+	e0a := db.acquireSnapshot()
+	e0b := db.acquireSnapshot()
+	db.seq = 1
+	e1 := db.acquireSnapshot()
+	db.seq = 2
+	e2 := db.acquireSnapshot()
+
+	if db.minSeq() != 0 {
+		t.Fatalf("invalid sequence number, got=%d", db.minSeq())
+	}
+	db.releaseSnapshot(e0a)
+	if db.minSeq() != 0 {
+		t.Fatalf("invalid sequence number, got=%d", db.minSeq())
+	}
+	db.releaseSnapshot(e2)
+	if db.minSeq() != 0 {
+		t.Fatalf("invalid sequence number, got=%d", db.minSeq())
+	}
+	db.releaseSnapshot(e0b)
+	if db.minSeq() != 1 {
+		t.Fatalf("invalid sequence number, got=%d", db.minSeq())
+	}
+	e2 = db.acquireSnapshot()
+	if db.minSeq() != 1 {
+		t.Fatalf("invalid sequence number, got=%d", db.minSeq())
+	}
+	db.releaseSnapshot(e1)
+	if db.minSeq() != 2 {
+		t.Fatalf("invalid sequence number, got=%d", db.minSeq())
+	}
+	db.releaseSnapshot(e2)
+	if db.minSeq() != 2 {
+		t.Fatalf("invalid sequence number, got=%d", db.minSeq())
+	}
+}
+
+func TestDB_HiddenValuesAreRemoved(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		s := h.db.s
 
 		h.put("foo", "v1")
 		h.compactMem()
-		m := kMaxMemCompactLevel
+		m := h.o.GetMaxMemCompationLevel()
 		v := s.version()
 		num := v.tLen(m)
 		v.release()
@@ -1170,14 +1231,14 @@ func TestDb_HiddenValuesAreRemoved(t *testing.T) {
 	})
 }
 
-func TestDb_DeletionMarkers2(t *testing.T) {
+func TestDB_DeletionMarkers2(t *testing.T) {
 	h := newDbHarness(t)
 	defer h.close()
 	s := h.db.s
 
 	h.put("foo", "v1")
 	h.compactMem()
-	m := kMaxMemCompactLevel
+	m := h.o.GetMaxMemCompationLevel()
 	v := s.version()
 	num := v.tLen(m)
 	v.release()
@@ -1211,8 +1272,8 @@ func TestDb_DeletionMarkers2(t *testing.T) {
 	h.allEntriesFor("foo", "[ ]")
 }
 
-func TestDb_CompactionTableOpenError(t *testing.T) {
-	h := newDbHarnessWopt(t, &opt.Options{MaxOpenFiles: 0})
+func TestDB_CompactionTableOpenError(t *testing.T) {
+	h := newDbHarnessWopt(t, &opt.Options{OpenFilesCacheCapacity: -1})
 	defer h.close()
 
 	im := 10
@@ -1230,14 +1291,14 @@ func TestDb_CompactionTableOpenError(t *testing.T) {
 		t.Errorf("total tables is %d, want %d", n, im)
 	}
 
-	h.stor.SetOpenErr(storage.TypeTable)
+	h.stor.SetEmuErr(storage.TypeTable, tsOpOpen)
 	go h.db.CompactRange(util.Range{})
 	if err := h.db.compSendIdle(h.db.tcompCmdC); err != nil {
 		t.Log("compaction error: ", err)
 	}
 	h.closeDB0()
 	h.openDB()
-	h.stor.SetOpenErr(0)
+	h.stor.SetEmuErr(0, tsOpOpen)
 
 	for i := 0; i < im; i++ {
 		for j := 0; j < jm; j++ {
@@ -1246,9 +1307,9 @@ func TestDb_CompactionTableOpenError(t *testing.T) {
 	}
 }
 
-func TestDb_OverlapInLevel0(t *testing.T) {
+func TestDB_OverlapInLevel0(t *testing.T) {
 	trun(t, func(h *dbHarness) {
-		if kMaxMemCompactLevel != 2 {
+		if h.o.GetMaxMemCompationLevel() != 2 {
 			t.Fatal("fix test to reflect the config")
 		}
 
@@ -1289,7 +1350,7 @@ func TestDb_OverlapInLevel0(t *testing.T) {
 	})
 }
 
-func TestDb_L0_CompactionBug_Issue44_a(t *testing.T) {
+func TestDB_L0_CompactionBug_Issue44_a(t *testing.T) {
 	h := newDbHarness(t)
 	defer h.close()
 
@@ -1309,7 +1370,7 @@ func TestDb_L0_CompactionBug_Issue44_a(t *testing.T) {
 	h.getKeyVal("(a->v)")
 }
 
-func TestDb_L0_CompactionBug_Issue44_b(t *testing.T) {
+func TestDB_L0_CompactionBug_Issue44_b(t *testing.T) {
 	h := newDbHarness(t)
 	defer h.close()
 
@@ -1338,7 +1399,7 @@ func TestDb_L0_CompactionBug_Issue44_b(t *testing.T) {
 	h.getKeyVal("(->)(c->cv)")
 }
 
-func TestDb_SingleEntryMemCompaction(t *testing.T) {
+func TestDB_SingleEntryMemCompaction(t *testing.T) {
 	trun(t, func(h *dbHarness) {
 		for i := 0; i < 10; i++ {
 			h.put("big", strings.Repeat("v", opt.DefaultWriteBuffer))
@@ -1355,7 +1416,7 @@ func TestDb_SingleEntryMemCompaction(t *testing.T) {
 	})
 }
 
-func TestDb_ManifestWriteError(t *testing.T) {
+func TestDB_ManifestWriteError(t *testing.T) {
 	for i := 0; i < 2; i++ {
 		func() {
 			h := newDbHarness(t)
@@ -1368,23 +1429,23 @@ func TestDb_ManifestWriteError(t *testing.T) {
 			h.compactMem()
 			h.getVal("foo", "bar")
 			v := h.db.s.version()
-			if n := v.tLen(kMaxMemCompactLevel); n != 1 {
+			if n := v.tLen(h.o.GetMaxMemCompationLevel()); n != 1 {
 				t.Errorf("invalid total tables, want=1 got=%d", n)
 			}
 			v.release()
 
 			if i == 0 {
-				h.stor.SetWriteErr(storage.TypeManifest)
+				h.stor.SetEmuErr(storage.TypeManifest, tsOpWrite)
 			} else {
-				h.stor.SetSyncErr(storage.TypeManifest)
+				h.stor.SetEmuErr(storage.TypeManifest, tsOpSync)
 			}
 
 			// Merging compaction (will fail)
-			h.compactRangeAtErr(kMaxMemCompactLevel, "", "", true)
+			h.compactRangeAtErr(h.o.GetMaxMemCompationLevel(), "", "", true)
 
 			h.db.Close()
-			h.stor.SetWriteErr(0)
-			h.stor.SetSyncErr(0)
+			h.stor.SetEmuErr(0, tsOpWrite)
+			h.stor.SetEmuErr(0, tsOpSync)
 
 			// Should not lose data
 			h.openDB()
@@ -1405,7 +1466,7 @@ func assertErr(t *testing.T, err error, wanterr bool) {
 	}
 }
 
-func TestDb_ClosedIsClosed(t *testing.T) {
+func TestDB_ClosedIsClosed(t *testing.T) {
 	h := newDbHarness(t)
 	db := h.db
 
@@ -1500,7 +1561,7 @@ func (p numberComparer) Compare(a, b []byte) int {
 func (numberComparer) Separator(dst, a, b []byte) []byte { return nil }
 func (numberComparer) Successor(dst, b []byte) []byte    { return nil }
 
-func TestDb_CustomComparer(t *testing.T) {
+func TestDB_CustomComparer(t *testing.T) {
 	h := newDbHarnessWopt(t, &opt.Options{
 		Comparer:    numberComparer{},
 		WriteBuffer: 1000,
@@ -1530,11 +1591,11 @@ func TestDb_CustomComparer(t *testing.T) {
 	}
 }
 
-func TestDb_ManualCompaction(t *testing.T) {
+func TestDB_ManualCompaction(t *testing.T) {
 	h := newDbHarness(t)
 	defer h.close()
 
-	if kMaxMemCompactLevel != 2 {
+	if h.o.GetMaxMemCompationLevel() != 2 {
 		t.Fatal("fix test to reflect the config")
 	}
 
@@ -1568,10 +1629,10 @@ func TestDb_ManualCompaction(t *testing.T) {
 	h.tablesPerLevel("0,0,1")
 }
 
-func TestDb_BloomFilter(t *testing.T) {
+func TestDB_BloomFilter(t *testing.T) {
 	h := newDbHarnessWopt(t, &opt.Options{
-		BlockCache: opt.NoCache,
-		Filter:     filter.NewBloomFilter(10),
+		DisableBlockCache: true,
+		Filter:            filter.NewBloomFilter(10),
 	})
 	defer h.close()
 
@@ -1579,7 +1640,7 @@ func TestDb_BloomFilter(t *testing.T) {
 		return fmt.Sprintf("key%06d", i)
 	}
 
-	n := 10000
+	const n = 10000
 
 	// Populate multiple layers
 	for i := 0; i < n; i++ {
@@ -1621,7 +1682,7 @@ func TestDb_BloomFilter(t *testing.T) {
 	h.stor.ReleaseSync(storage.TypeTable)
 }
 
-func TestDb_Concurrent(t *testing.T) {
+func TestDB_Concurrent(t *testing.T) {
 	const n, secs, maxkey = 4, 2, 1000
 
 	runtime.GOMAXPROCS(n)
@@ -1686,7 +1747,7 @@ func TestDb_Concurrent(t *testing.T) {
 	runtime.GOMAXPROCS(1)
 }
 
-func TestDb_Concurrent2(t *testing.T) {
+func TestDB_Concurrent2(t *testing.T) {
 	const n, n2 = 4, 4000
 
 	runtime.GOMAXPROCS(n*2 + 2)
@@ -1757,7 +1818,7 @@ func TestDb_Concurrent2(t *testing.T) {
 	runtime.GOMAXPROCS(1)
 }
 
-func TestDb_CreateReopenDbOnFile(t *testing.T) {
+func TestDB_CreateReopenDbOnFile(t *testing.T) {
 	dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile-%d", os.Getuid()))
 	if err := os.RemoveAll(dbpath); err != nil {
 		t.Fatal("cannot remove old db: ", err)
@@ -1785,7 +1846,7 @@ func TestDb_CreateReopenDbOnFile(t *testing.T) {
 	}
 }
 
-func TestDb_CreateReopenDbOnFile2(t *testing.T) {
+func TestDB_CreateReopenDbOnFile2(t *testing.T) {
 	dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile2-%d", os.Getuid()))
 	if err := os.RemoveAll(dbpath); err != nil {
 		t.Fatal("cannot remove old db: ", err)
@@ -1806,7 +1867,7 @@ func TestDb_CreateReopenDbOnFile2(t *testing.T) {
 	}
 }
 
-func TestDb_DeletionMarkersOnMemdb(t *testing.T) {
+func TestDB_DeletionMarkersOnMemdb(t *testing.T) {
 	h := newDbHarness(t)
 	defer h.close()
 
@@ -1817,8 +1878,8 @@ func TestDb_DeletionMarkersOnMemdb(t *testing.T) {
 	h.getKeyVal("")
 }
 
-func TestDb_LeveldbIssue178(t *testing.T) {
-	nKeys := (kMaxTableSize / 30) * 5
+func TestDB_LeveldbIssue178(t *testing.T) {
+	nKeys := (opt.DefaultCompactionTableSize / 30) * 5
 	key1 := func(i int) string {
 		return fmt.Sprintf("my_key_%d", i)
 	}
@@ -1860,7 +1921,7 @@ func TestDb_LeveldbIssue178(t *testing.T) {
 	h.assertNumKeys(nKeys)
 }
 
-func TestDb_LeveldbIssue200(t *testing.T) {
+func TestDB_LeveldbIssue200(t *testing.T) {
 	h := newDbHarness(t)
 	defer h.close()
 
@@ -1886,3 +1947,719 @@ func TestDb_LeveldbIssue200(t *testing.T) {
 	iter.Next()
 	assertBytes(t, []byte("5"), iter.Key())
 }
+
+func TestDB_GoleveldbIssue74(t *testing.T) {
+	h := newDbHarnessWopt(t, &opt.Options{
+		WriteBuffer: 1 * opt.MiB,
+	})
+	defer h.close()
+
+	const n, dur = 10000, 5 * time.Second
+
+	runtime.GOMAXPROCS(runtime.NumCPU())
+
+	until := time.Now().Add(dur)
+	wg := new(sync.WaitGroup)
+	wg.Add(2)
+	var done uint32
+	go func() {
+		var i int
+		defer func() {
+			t.Logf("WRITER DONE #%d", i)
+			atomic.StoreUint32(&done, 1)
+			wg.Done()
+		}()
+
+		b := new(Batch)
+		for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ {
+			iv := fmt.Sprintf("VAL%010d", i)
+			for k := 0; k < n; k++ {
+				key := fmt.Sprintf("KEY%06d", k)
+				b.Put([]byte(key), []byte(key+iv))
+				b.Put([]byte(fmt.Sprintf("PTR%06d", k)), []byte(key))
+			}
+			h.write(b)
+
+			b.Reset()
+			snap := h.getSnapshot()
+			iter := snap.NewIterator(util.BytesPrefix([]byte("PTR")), nil)
+			var k int
+			for ; iter.Next(); k++ {
+				ptrKey := iter.Key()
+				key := iter.Value()
+
+				if _, err := snap.Get(ptrKey, nil); err != nil {
+					t.Fatalf("WRITER #%d snapshot.Get %q: %v", i, ptrKey, err)
+				}
+				if value, err := snap.Get(key, nil); err != nil {
+					t.Fatalf("WRITER #%d snapshot.Get %q: %v", i, key, err)
+				} else if string(value) != string(key)+iv {
+					t.Fatalf("WRITER #%d snapshot.Get %q got invalid value, want %q got %q", i, key, string(key)+iv, value)
+				}
+
+				b.Delete(key)
+				b.Delete(ptrKey)
+			}
+			h.write(b)
+			iter.Release()
+			snap.Release()
+			if k != n {
+				t.Fatalf("#%d %d != %d", i, k, n)
+			}
+		}
+	}()
+	go func() {
+		var i int
+		defer func() {
+			t.Logf("READER DONE #%d", i)
+			atomic.StoreUint32(&done, 1)
+			wg.Done()
+		}()
+		for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ {
+			snap := h.getSnapshot()
+			iter := snap.NewIterator(util.BytesPrefix([]byte("PTR")), nil)
+			var prevValue string
+			var k int
+			for ; iter.Next(); k++ {
+				ptrKey := iter.Key()
+				key := iter.Value()
+
+				if _, err := snap.Get(ptrKey, nil); err != nil {
+					t.Fatalf("READER #%d snapshot.Get %q: %v", i, ptrKey, err)
+				}
+
+				if value, err := snap.Get(key, nil); err != nil {
+					t.Fatalf("READER #%d snapshot.Get %q: %v", i, key, err)
+				} else if prevValue != "" && string(value) != string(key)+prevValue {
+					t.Fatalf("READER #%d snapshot.Get %q got invalid value, want %q got %q", i, key, string(key)+prevValue, value)
+				} else {
+					prevValue = string(value[len(key):])
+				}
+			}
+			iter.Release()
+			snap.Release()
+			if k > 0 && k != n {
+				t.Fatalf("#%d %d != %d", i, k, n)
+			}
+		}
+	}()
+	wg.Wait()
+}
+
+func TestDB_GetProperties(t *testing.T) {
+	h := newDbHarness(t)
+	defer h.close()
+
+	_, err := h.db.GetProperty("leveldb.num-files-at-level")
+	if err == nil {
+		t.Error("GetProperty() failed to detect missing level")
+	}
+
+	_, err = h.db.GetProperty("leveldb.num-files-at-level0")
+	if err != nil {
+		t.Error("got unexpected error", err)
+	}
+
+	_, err = h.db.GetProperty("leveldb.num-files-at-level0x")
+	if err == nil {
+		t.Error("GetProperty() failed to detect invalid level")
+	}
+}
+
+func TestDB_GoleveldbIssue72and83(t *testing.T) {
+	h := newDbHarnessWopt(t, &opt.Options{
+		WriteBuffer:            1 * opt.MiB,
+		OpenFilesCacheCapacity: 3,
+	})
+	defer h.close()
+
+	const n, wn, dur = 10000, 100, 30 * time.Second
+
+	runtime.GOMAXPROCS(runtime.NumCPU())
+
+	randomData := func(prefix byte, i int) []byte {
+		data := make([]byte, 1+4+32+64+32)
+		_, err := crand.Reader.Read(data[1 : len(data)-8])
+		if err != nil {
+			panic(err)
+		}
+		data[0] = prefix
+		binary.LittleEndian.PutUint32(data[len(data)-8:], uint32(i))
+		binary.LittleEndian.PutUint32(data[len(data)-4:], util.NewCRC(data[:len(data)-4]).Value())
+		return data
+	}
+
+	keys := make([][]byte, n)
+	for i := range keys {
+		keys[i] = randomData(1, 0)
+	}
+
+	until := time.Now().Add(dur)
+	wg := new(sync.WaitGroup)
+	wg.Add(3)
+	var done uint32
+	go func() {
+		i := 0
+		defer func() {
+			t.Logf("WRITER DONE #%d", i)
+			wg.Done()
+		}()
+
+		b := new(Batch)
+		for ; i < wn && atomic.LoadUint32(&done) == 0; i++ {
+			b.Reset()
+			for _, k1 := range keys {
+				k2 := randomData(2, i)
+				b.Put(k2, randomData(42, i))
+				b.Put(k1, k2)
+			}
+			if err := h.db.Write(b, h.wo); err != nil {
+				atomic.StoreUint32(&done, 1)
+				t.Fatalf("WRITER #%d db.Write: %v", i, err)
+			}
+		}
+	}()
+	go func() {
+		var i int
+		defer func() {
+			t.Logf("READER0 DONE #%d", i)
+			atomic.StoreUint32(&done, 1)
+			wg.Done()
+		}()
+		for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ {
+			snap := h.getSnapshot()
+			seq := snap.elem.seq
+			if seq == 0 {
+				snap.Release()
+				continue
+			}
+			iter := snap.NewIterator(util.BytesPrefix([]byte{1}), nil)
+			writei := int(seq/(n*2) - 1)
+			var k int
+			for ; iter.Next(); k++ {
+				k1 := iter.Key()
+				k2 := iter.Value()
+				k1checksum0 := binary.LittleEndian.Uint32(k1[len(k1)-4:])
+				k1checksum1 := util.NewCRC(k1[:len(k1)-4]).Value()
+				if k1checksum0 != k1checksum1 {
+					t.Fatalf("READER0 #%d.%d W#%d invalid K1 checksum: %#x != %#x", i, k, k1checksum0, k1checksum0)
+				}
+				k2checksum0 := binary.LittleEndian.Uint32(k2[len(k2)-4:])
+				k2checksum1 := util.NewCRC(k2[:len(k2)-4]).Value()
+				if k2checksum0 != k2checksum1 {
+					t.Fatalf("READER0 #%d.%d W#%d invalid K2 checksum: %#x != %#x", i, k, k2checksum0, k2checksum1)
+				}
+				kwritei := int(binary.LittleEndian.Uint32(k2[len(k2)-8:]))
+				if writei != kwritei {
+					t.Fatalf("READER0 #%d.%d W#%d invalid write iteration num: %d", i, k, writei, kwritei)
+				}
+				if _, err := snap.Get(k2, nil); err != nil {
+					t.Fatalf("READER0 #%d.%d W#%d snap.Get: %v\nk1: %x\n -> k2: %x", i, k, writei, err, k1, k2)
+				}
+			}
+			if err := iter.Error(); err != nil {
+				t.Fatalf("READER0 #%d.%d W#%d snap.Iterator: %v", i, k, writei, err)
+			}
+			iter.Release()
+			snap.Release()
+			if k > 0 && k != n {
+				t.Fatalf("READER0 #%d W#%d short read, got=%d want=%d", i, writei, k, n)
+			}
+		}
+	}()
+	go func() {
+		var i int
+		defer func() {
+			t.Logf("READER1 DONE #%d", i)
+			atomic.StoreUint32(&done, 1)
+			wg.Done()
+		}()
+		for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ {
+			iter := h.db.NewIterator(nil, nil)
+			seq := iter.(*dbIter).seq
+			if seq == 0 {
+				iter.Release()
+				continue
+			}
+			writei := int(seq/(n*2) - 1)
+			var k int
+			for ok := iter.Last(); ok; ok = iter.Prev() {
+				k++
+			}
+			if err := iter.Error(); err != nil {
+				t.Fatalf("READER1 #%d.%d W#%d db.Iterator: %v", i, k, writei, err)
+			}
+			iter.Release()
+			if m := (writei+1)*n + n; k != m {
+				t.Fatalf("READER1 #%d W#%d short read, got=%d want=%d", i, writei, k, m)
+			}
+		}
+	}()
+
+	wg.Wait()
+}
+
+func TestDB_TransientError(t *testing.T) {
+	h := newDbHarnessWopt(t, &opt.Options{
+		WriteBuffer:              128 * opt.KiB,
+		OpenFilesCacheCapacity:   3,
+		DisableCompactionBackoff: true,
+	})
+	defer h.close()
+
+	const (
+		nSnap = 20
+		nKey  = 10000
+	)
+
+	var (
+		snaps [nSnap]*Snapshot
+		b     = &Batch{}
+	)
+	for i := range snaps {
+		vtail := fmt.Sprintf("VAL%030d", i)
+		b.Reset()
+		for k := 0; k < nKey; k++ {
+			key := fmt.Sprintf("KEY%8d", k)
+			b.Put([]byte(key), []byte(key+vtail))
+		}
+		h.stor.SetEmuRandErr(storage.TypeTable, tsOpOpen, tsOpRead, tsOpReadAt)
+		if err := h.db.Write(b, nil); err != nil {
+			t.Logf("WRITE #%d error: %v", i, err)
+			h.stor.SetEmuRandErr(0, tsOpOpen, tsOpRead, tsOpReadAt, tsOpWrite)
+			for {
+				if err := h.db.Write(b, nil); err == nil {
+					break
+				} else if errors.IsCorrupted(err) {
+					t.Fatalf("WRITE #%d corrupted: %v", i, err)
+				}
+			}
+		}
+
+		snaps[i] = h.db.newSnapshot()
+		b.Reset()
+		for k := 0; k < nKey; k++ {
+			key := fmt.Sprintf("KEY%8d", k)
+			b.Delete([]byte(key))
+		}
+		h.stor.SetEmuRandErr(storage.TypeTable, tsOpOpen, tsOpRead, tsOpReadAt)
+		if err := h.db.Write(b, nil); err != nil {
+			t.Logf("WRITE #%d  error: %v", i, err)
+			h.stor.SetEmuRandErr(0, tsOpOpen, tsOpRead, tsOpReadAt)
+			for {
+				if err := h.db.Write(b, nil); err == nil {
+					break
+				} else if errors.IsCorrupted(err) {
+					t.Fatalf("WRITE #%d corrupted: %v", i, err)
+				}
+			}
+		}
+	}
+	h.stor.SetEmuRandErr(0, tsOpOpen, tsOpRead, tsOpReadAt)
+
+	runtime.GOMAXPROCS(runtime.NumCPU())
+
+	rnd := rand.New(rand.NewSource(0xecafdaed))
+	wg := &sync.WaitGroup{}
+	for i, snap := range snaps {
+		wg.Add(2)
+
+		go func(i int, snap *Snapshot, sk []int) {
+			defer wg.Done()
+
+			vtail := fmt.Sprintf("VAL%030d", i)
+			for _, k := range sk {
+				key := fmt.Sprintf("KEY%8d", k)
+				xvalue, err := snap.Get([]byte(key), nil)
+				if err != nil {
+					t.Fatalf("READER_GET #%d SEQ=%d K%d error: %v", i, snap.elem.seq, k, err)
+				}
+				value := key + vtail
+				if !bytes.Equal([]byte(value), xvalue) {
+					t.Fatalf("READER_GET #%d SEQ=%d K%d invalid value: want %q, got %q", i, snap.elem.seq, k, value, xvalue)
+				}
+			}
+		}(i, snap, rnd.Perm(nKey))
+
+		go func(i int, snap *Snapshot) {
+			defer wg.Done()
+
+			vtail := fmt.Sprintf("VAL%030d", i)
+			iter := snap.NewIterator(nil, nil)
+			defer iter.Release()
+			for k := 0; k < nKey; k++ {
+				if !iter.Next() {
+					if err := iter.Error(); err != nil {
+						t.Fatalf("READER_ITER #%d K%d error: %v", i, k, err)
+					} else {
+						t.Fatalf("READER_ITER #%d K%d eoi", i, k)
+					}
+				}
+				key := fmt.Sprintf("KEY%8d", k)
+				xkey := iter.Key()
+				if !bytes.Equal([]byte(key), xkey) {
+					t.Fatalf("READER_ITER #%d K%d invalid key: want %q, got %q", i, k, key, xkey)
+				}
+				value := key + vtail
+				xvalue := iter.Value()
+				if !bytes.Equal([]byte(value), xvalue) {
+					t.Fatalf("READER_ITER #%d K%d invalid value: want %q, got %q", i, k, value, xvalue)
+				}
+			}
+		}(i, snap)
+	}
+
+	wg.Wait()
+}
+
+func TestDB_UkeyShouldntHopAcrossTable(t *testing.T) {
+	h := newDbHarnessWopt(t, &opt.Options{
+		WriteBuffer:                 112 * opt.KiB,
+		CompactionTableSize:         90 * opt.KiB,
+		CompactionExpandLimitFactor: 1,
+	})
+	defer h.close()
+
+	const (
+		nSnap = 190
+		nKey  = 140
+	)
+
+	var (
+		snaps [nSnap]*Snapshot
+		b     = &Batch{}
+	)
+	for i := range snaps {
+		vtail := fmt.Sprintf("VAL%030d", i)
+		b.Reset()
+		for k := 0; k < nKey; k++ {
+			key := fmt.Sprintf("KEY%08d", k)
+			b.Put([]byte(key), []byte(key+vtail))
+		}
+		if err := h.db.Write(b, nil); err != nil {
+			t.Fatalf("WRITE #%d error: %v", i, err)
+		}
+
+		snaps[i] = h.db.newSnapshot()
+		b.Reset()
+		for k := 0; k < nKey; k++ {
+			key := fmt.Sprintf("KEY%08d", k)
+			b.Delete([]byte(key))
+		}
+		if err := h.db.Write(b, nil); err != nil {
+			t.Fatalf("WRITE #%d  error: %v", i, err)
+		}
+	}
+
+	h.compactMem()
+
+	h.waitCompaction()
+	for level, tables := range h.db.s.stVersion.tables {
+		for _, table := range tables {
+			t.Logf("L%d@%d %q:%q", level, table.file.Num(), table.imin, table.imax)
+		}
+	}
+
+	h.compactRangeAt(0, "", "")
+	h.waitCompaction()
+	for level, tables := range h.db.s.stVersion.tables {
+		for _, table := range tables {
+			t.Logf("L%d@%d %q:%q", level, table.file.Num(), table.imin, table.imax)
+		}
+	}
+	h.compactRangeAt(1, "", "")
+	h.waitCompaction()
+	for level, tables := range h.db.s.stVersion.tables {
+		for _, table := range tables {
+			t.Logf("L%d@%d %q:%q", level, table.file.Num(), table.imin, table.imax)
+		}
+	}
+	runtime.GOMAXPROCS(runtime.NumCPU())
+
+	wg := &sync.WaitGroup{}
+	for i, snap := range snaps {
+		wg.Add(1)
+
+		go func(i int, snap *Snapshot) {
+			defer wg.Done()
+
+			vtail := fmt.Sprintf("VAL%030d", i)
+			for k := 0; k < nKey; k++ {
+				key := fmt.Sprintf("KEY%08d", k)
+				xvalue, err := snap.Get([]byte(key), nil)
+				if err != nil {
+					t.Fatalf("READER_GET #%d SEQ=%d K%d error: %v", i, snap.elem.seq, k, err)
+				}
+				value := key + vtail
+				if !bytes.Equal([]byte(value), xvalue) {
+					t.Fatalf("READER_GET #%d SEQ=%d K%d invalid value: want %q, got %q", i, snap.elem.seq, k, value, xvalue)
+				}
+			}
+		}(i, snap)
+	}
+
+	wg.Wait()
+}
+
+func TestDB_TableCompactionBuilder(t *testing.T) {
+	stor := newTestStorage(t)
+	defer stor.Close()
+
+	const nSeq = 99
+
+	o := &opt.Options{
+		WriteBuffer:                 112 * opt.KiB,
+		CompactionTableSize:         43 * opt.KiB,
+		CompactionExpandLimitFactor: 1,
+		CompactionGPOverlapsFactor:  1,
+		DisableBlockCache:           true,
+	}
+	s, err := newSession(stor, o)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if err := s.create(); err != nil {
+		t.Fatal(err)
+	}
+	defer s.close()
+	var (
+		seq        uint64
+		targetSize = 5 * o.CompactionTableSize
+		value      = bytes.Repeat([]byte{'0'}, 100)
+	)
+	for i := 0; i < 2; i++ {
+		tw, err := s.tops.create()
+		if err != nil {
+			t.Fatal(err)
+		}
+		for k := 0; tw.tw.BytesLen() < targetSize; k++ {
+			key := []byte(fmt.Sprintf("%09d", k))
+			seq += nSeq - 1
+			for x := uint64(0); x < nSeq; x++ {
+				if err := tw.append(newIkey(key, seq-x, ktVal), value); err != nil {
+					t.Fatal(err)
+				}
+			}
+		}
+		tf, err := tw.finish()
+		if err != nil {
+			t.Fatal(err)
+		}
+		rec := &sessionRecord{numLevel: s.o.GetNumLevel()}
+		rec.addTableFile(i, tf)
+		if err := s.commit(rec); err != nil {
+			t.Fatal(err)
+		}
+	}
+
+	// Build grandparent.
+	v := s.version()
+	c := newCompaction(s, v, 1, append(tFiles{}, v.tables[1]...))
+	rec := &sessionRecord{numLevel: s.o.GetNumLevel()}
+	b := &tableCompactionBuilder{
+		s:         s,
+		c:         c,
+		rec:       rec,
+		stat1:     new(cStatsStaging),
+		minSeq:    0,
+		strict:    true,
+		tableSize: o.CompactionTableSize/3 + 961,
+	}
+	if err := b.run(new(compactionTransactCounter)); err != nil {
+		t.Fatal(err)
+	}
+	for _, t := range c.tables[0] {
+		rec.delTable(c.level, t.file.Num())
+	}
+	if err := s.commit(rec); err != nil {
+		t.Fatal(err)
+	}
+	c.release()
+
+	// Build level-1.
+	v = s.version()
+	c = newCompaction(s, v, 0, append(tFiles{}, v.tables[0]...))
+	rec = &sessionRecord{numLevel: s.o.GetNumLevel()}
+	b = &tableCompactionBuilder{
+		s:         s,
+		c:         c,
+		rec:       rec,
+		stat1:     new(cStatsStaging),
+		minSeq:    0,
+		strict:    true,
+		tableSize: o.CompactionTableSize,
+	}
+	if err := b.run(new(compactionTransactCounter)); err != nil {
+		t.Fatal(err)
+	}
+	for _, t := range c.tables[0] {
+		rec.delTable(c.level, t.file.Num())
+	}
+	// Move grandparent to level-3
+	for _, t := range v.tables[2] {
+		rec.delTable(2, t.file.Num())
+		rec.addTableFile(3, t)
+	}
+	if err := s.commit(rec); err != nil {
+		t.Fatal(err)
+	}
+	c.release()
+
+	v = s.version()
+	for level, want := range []bool{false, true, false, true, false} {
+		got := len(v.tables[level]) > 0
+		if want != got {
+			t.Fatalf("invalid level-%d tables len: want %v, got %v", level, want, got)
+		}
+	}
+	for i, f := range v.tables[1][:len(v.tables[1])-1] {
+		nf := v.tables[1][i+1]
+		if bytes.Equal(f.imax.ukey(), nf.imin.ukey()) {
+			t.Fatalf("KEY %q hop across table %d .. %d", f.imax.ukey(), f.file.Num(), nf.file.Num())
+		}
+	}
+	v.release()
+
+	// Compaction with transient error.
+	v = s.version()
+	c = newCompaction(s, v, 1, append(tFiles{}, v.tables[1]...))
+	rec = &sessionRecord{numLevel: s.o.GetNumLevel()}
+	b = &tableCompactionBuilder{
+		s:         s,
+		c:         c,
+		rec:       rec,
+		stat1:     new(cStatsStaging),
+		minSeq:    0,
+		strict:    true,
+		tableSize: o.CompactionTableSize,
+	}
+	stor.SetEmuErrOnce(storage.TypeTable, tsOpSync)
+	stor.SetEmuRandErr(storage.TypeTable, tsOpRead, tsOpReadAt, tsOpWrite)
+	stor.SetEmuRandErrProb(0xf0)
+	for {
+		if err := b.run(new(compactionTransactCounter)); err != nil {
+			t.Logf("(expected) b.run: %v", err)
+		} else {
+			break
+		}
+	}
+	if err := s.commit(rec); err != nil {
+		t.Fatal(err)
+	}
+	c.release()
+
+	stor.SetEmuErrOnce(0, tsOpSync)
+	stor.SetEmuRandErr(0, tsOpRead, tsOpReadAt, tsOpWrite)
+
+	v = s.version()
+	if len(v.tables[1]) != len(v.tables[2]) {
+		t.Fatalf("invalid tables length, want %d, got %d", len(v.tables[1]), len(v.tables[2]))
+	}
+	for i, f0 := range v.tables[1] {
+		f1 := v.tables[2][i]
+		iter0 := s.tops.newIterator(f0, nil, nil)
+		iter1 := s.tops.newIterator(f1, nil, nil)
+		for j := 0; true; j++ {
+			next0 := iter0.Next()
+			next1 := iter1.Next()
+			if next0 != next1 {
+				t.Fatalf("#%d.%d invalid eoi: want %v, got %v", i, j, next0, next1)
+			}
+			key0 := iter0.Key()
+			key1 := iter1.Key()
+			if !bytes.Equal(key0, key1) {
+				t.Fatalf("#%d.%d invalid key: want %q, got %q", i, j, key0, key1)
+			}
+			if next0 == false {
+				break
+			}
+		}
+		iter0.Release()
+		iter1.Release()
+	}
+	v.release()
+}
+
+func testDB_IterTriggeredCompaction(t *testing.T, limitDiv int) {
+	const (
+		vSize = 200 * opt.KiB
+		tSize = 100 * opt.MiB
+		mIter = 100
+		n     = tSize / vSize
+	)
+
+	h := newDbHarnessWopt(t, &opt.Options{
+		Compression:       opt.NoCompression,
+		DisableBlockCache: true,
+	})
+	defer h.close()
+
+	key := func(x int) string {
+		return fmt.Sprintf("v%06d", x)
+	}
+
+	// Fill.
+	value := strings.Repeat("x", vSize)
+	for i := 0; i < n; i++ {
+		h.put(key(i), value)
+	}
+	h.compactMem()
+
+	// Delete all.
+	for i := 0; i < n; i++ {
+		h.delete(key(i))
+	}
+	h.compactMem()
+
+	var (
+		limit = n / limitDiv
+
+		startKey = key(0)
+		limitKey = key(limit)
+		maxKey   = key(n)
+		slice    = &util.Range{Limit: []byte(limitKey)}
+
+		initialSize0 = h.sizeOf(startKey, limitKey)
+		initialSize1 = h.sizeOf(limitKey, maxKey)
+	)
+
+	t.Logf("inital size %s [rest %s]", shortenb(int(initialSize0)), shortenb(int(initialSize1)))
+
+	for r := 0; true; r++ {
+		if r >= mIter {
+			t.Fatal("taking too long to compact")
+		}
+
+		// Iterates.
+		iter := h.db.NewIterator(slice, h.ro)
+		for iter.Next() {
+		}
+		if err := iter.Error(); err != nil {
+			t.Fatalf("Iter err: %v", err)
+		}
+		iter.Release()
+
+		// Wait compaction.
+		h.waitCompaction()
+
+		// Check size.
+		size0 := h.sizeOf(startKey, limitKey)
+		size1 := h.sizeOf(limitKey, maxKey)
+		t.Logf("#%03d size %s [rest %s]", r, shortenb(int(size0)), shortenb(int(size1)))
+		if size0 < initialSize0/10 {
+			break
+		}
+	}
+
+	if initialSize1 > 0 {
+		h.sizeAssert(limitKey, maxKey, initialSize1/4-opt.MiB, initialSize1+opt.MiB)
+	}
+}
+
+func TestDB_IterTriggeredCompaction(t *testing.T) {
+	testDB_IterTriggeredCompaction(t, 1)
+}
+
+func TestDB_IterTriggeredCompactionHalf(t *testing.T) {
+	testDB_IterTriggeredCompaction(t, 2)
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go
index 2db007e278759e442808e95cdb2eb746cefac805..a8a2bdf72e173d592b4eba894a2996595cb1af12 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_util.go
@@ -7,8 +7,7 @@
 package leveldb
 
 import (
-	"errors"
-
+	"github.com/syndtr/goleveldb/leveldb/errors"
 	"github.com/syndtr/goleveldb/leveldb/iterator"
 	"github.com/syndtr/goleveldb/leveldb/opt"
 	"github.com/syndtr/goleveldb/leveldb/storage"
@@ -32,40 +31,44 @@ func (p Sizes) Sum() (n uint64) {
 	return n
 }
 
+// Logging.
+func (db *DB) log(v ...interface{})                 { db.s.log(v...) }
+func (db *DB) logf(format string, v ...interface{}) { db.s.logf(format, v...) }
+
 // Check and clean files.
-func (d *DB) checkAndCleanFiles() error {
-	s := d.s
+func (db *DB) checkAndCleanFiles() error {
+	v := db.s.version()
+	defer v.release()
 
-	v := s.version_NB()
-	tables := make(map[uint64]bool)
-	for _, tt := range v.tables {
-		for _, t := range tt {
-			tables[t.file.Num()] = false
+	tablesMap := make(map[uint64]bool)
+	for _, tables := range v.tables {
+		for _, t := range tables {
+			tablesMap[t.file.Num()] = false
 		}
 	}
 
-	ff, err := s.getFiles(storage.TypeAll)
+	files, err := db.s.getFiles(storage.TypeAll)
 	if err != nil {
 		return err
 	}
 
 	var nTables int
 	var rem []storage.File
-	for _, f := range ff {
+	for _, f := range files {
 		keep := true
 		switch f.Type() {
 		case storage.TypeManifest:
-			keep = f.Num() >= s.manifestFile.Num()
+			keep = f.Num() >= db.s.manifestFile.Num()
 		case storage.TypeJournal:
-			if d.frozenJournalFile != nil {
-				keep = f.Num() >= d.frozenJournalFile.Num()
+			if db.frozenJournalFile != nil {
+				keep = f.Num() >= db.frozenJournalFile.Num()
 			} else {
-				keep = f.Num() >= d.journalFile.Num()
+				keep = f.Num() >= db.journalFile.Num()
 			}
 		case storage.TypeTable:
-			_, keep = tables[f.Num()]
+			_, keep = tablesMap[f.Num()]
 			if keep {
-				tables[f.Num()] = true
+				tablesMap[f.Num()] = true
 				nTables++
 			}
 		}
@@ -75,18 +78,20 @@ func (d *DB) checkAndCleanFiles() error {
 		}
 	}
 
-	if nTables != len(tables) {
-		for num, present := range tables {
+	if nTables != len(tablesMap) {
+		var missing []*storage.FileInfo
+		for num, present := range tablesMap {
 			if !present {
-				s.logf("db@janitor table missing @%d", num)
+				missing = append(missing, &storage.FileInfo{Type: storage.TypeTable, Num: num})
+				db.logf("db@janitor table missing @%d", num)
 			}
 		}
-		return ErrCorrupted{Type: MissingFiles, Err: errors.New("leveldb: table files missing")}
+		return errors.NewErrCorrupted(nil, &errors.ErrMissingFiles{Files: missing})
 	}
 
-	s.logf("db@janitor F·%d G·%d", len(ff), len(rem))
+	db.logf("db@janitor F·%d G·%d", len(files), len(rem))
 	for _, f := range rem {
-		s.logf("db@janitor removing %s-%d", f.Type(), f.Num())
+		db.logf("db@janitor removing %s-%d", f.Type(), f.Num())
 		if err := f.Remove(); err != nil {
 			return err
 		}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
index 4660e840ca9a3d424916fc850add5ab0fe1ab974..e1cf30c53615e801398a8d82e351817a5bfffe2b 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
@@ -14,84 +14,93 @@ import (
 	"github.com/syndtr/goleveldb/leveldb/util"
 )
 
-func (d *DB) writeJournal(b *Batch) error {
-	w, err := d.journal.Next()
+func (db *DB) writeJournal(b *Batch) error {
+	w, err := db.journal.Next()
 	if err != nil {
 		return err
 	}
 	if _, err := w.Write(b.encode()); err != nil {
 		return err
 	}
-	if err := d.journal.Flush(); err != nil {
+	if err := db.journal.Flush(); err != nil {
 		return err
 	}
 	if b.sync {
-		return d.journalWriter.Sync()
+		return db.journalWriter.Sync()
 	}
 	return nil
 }
 
-func (d *DB) jWriter() {
-	defer d.closeW.Done()
+func (db *DB) jWriter() {
+	defer db.closeW.Done()
 	for {
 		select {
-		case b := <-d.journalC:
+		case b := <-db.journalC:
 			if b != nil {
-				d.journalAckC <- d.writeJournal(b)
+				db.journalAckC <- db.writeJournal(b)
 			}
-		case _, _ = <-d.closeC:
+		case _, _ = <-db.closeC:
 			return
 		}
 	}
 }
 
-func (d *DB) rotateMem(n int) (mem *memdb.DB, err error) {
+func (db *DB) rotateMem(n int) (mem *memDB, err error) {
 	// Wait for pending memdb compaction.
-	err = d.compSendIdle(d.mcompCmdC)
+	err = db.compSendIdle(db.mcompCmdC)
 	if err != nil {
 		return
 	}
 
 	// Create new memdb and journal.
-	mem, err = d.newMem(n)
+	mem, err = db.newMem(n)
 	if err != nil {
 		return
 	}
 
 	// Schedule memdb compaction.
-	d.compTrigger(d.mcompTriggerC)
+	db.compSendTrigger(db.mcompCmdC)
 	return
 }
 
-func (d *DB) flush(n int) (mem *memdb.DB, nn int, err error) {
-	s := d.s
-
+func (db *DB) flush(n int) (mem *memDB, nn int, err error) {
 	delayed := false
-	flush := func() bool {
-		v := s.version()
+	flush := func() (retry bool) {
+		v := db.s.version()
 		defer v.release()
-		mem = d.getEffectiveMem()
-		nn = mem.Free()
+		mem = db.getEffectiveMem()
+		defer func() {
+			if retry {
+				mem.decref()
+				mem = nil
+			}
+		}()
+		nn = mem.mdb.Free()
 		switch {
-		case v.tLen(0) >= kL0_SlowdownWritesTrigger && !delayed:
+		case v.tLen(0) >= db.s.o.GetWriteL0SlowdownTrigger() && !delayed:
 			delayed = true
 			time.Sleep(time.Millisecond)
 		case nn >= n:
 			return false
-		case v.tLen(0) >= kL0_StopWritesTrigger:
+		case v.tLen(0) >= db.s.o.GetWriteL0PauseTrigger():
 			delayed = true
-			err = d.compSendIdle(d.tcompCmdC)
+			err = db.compSendIdle(db.tcompCmdC)
 			if err != nil {
 				return false
 			}
 		default:
 			// Allow memdb to grow if it has no entry.
-			if mem.Len() == 0 {
+			if mem.mdb.Len() == 0 {
 				nn = n
-				return false
+			} else {
+				mem.decref()
+				mem, err = db.rotateMem(n)
+				if err == nil {
+					nn = mem.mdb.Free()
+				} else {
+					nn = 0
+				}
 			}
-			mem, err = d.rotateMem(n)
-			nn = mem.Free()
 			return false
 		}
 		return true
@@ -100,7 +109,12 @@ func (d *DB) flush(n int) (mem *memdb.DB, nn int, err error) {
 	for flush() {
 	}
 	if delayed {
-		s.logf("db@write delayed T·%v", time.Since(start))
+		db.writeDelay += time.Since(start)
+		db.writeDelayN++
+	} else if db.writeDelayN > 0 {
+		db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay)
+		db.writeDelay = 0
+		db.writeDelayN = 0
 	}
 	return
 }
@@ -109,39 +123,45 @@ func (d *DB) flush(n int) (mem *memdb.DB, nn int, err error) {
 // sequentially.
 //
 // It is safe to modify the contents of the arguments after Write returns.
-func (d *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) {
-	err = d.ok()
-	if err != nil || b == nil || b.len() == 0 {
+func (db *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) {
+	err = db.ok()
+	if err != nil || b == nil || b.Len() == 0 {
 		return
 	}
 
 	b.init(wo.GetSync())
 
 	// The write happen synchronously.
-retry:
 	select {
-	case d.writeC <- b:
-		if <-d.writeMergedC {
-			return <-d.writeAckC
+	case db.writeC <- b:
+		if <-db.writeMergedC {
+			return <-db.writeAckC
 		}
-		goto retry
-	case d.writeLockC <- struct{}{}:
-	case _, _ = <-d.closeC:
+	case db.writeLockC <- struct{}{}:
+	case err = <-db.compPerErrC:
+		return
+	case _, _ = <-db.closeC:
 		return ErrClosed
 	}
 
 	merged := 0
+	danglingMerge := false
 	defer func() {
-		<-d.writeLockC
+		if danglingMerge {
+			db.writeMergedC <- false
+		} else {
+			<-db.writeLockC
+		}
 		for i := 0; i < merged; i++ {
-			d.writeAckC <- err
+			db.writeAckC <- err
 		}
 	}()
 
-	mem, memFree, err := d.flush(b.size())
+	mem, memFree, err := db.flush(b.size())
 	if err != nil {
 		return
 	}
+	defer mem.decref()
 
 	// Calculate maximum size of the batch.
 	m := 1 << 20
@@ -154,13 +174,13 @@ retry:
 drain:
 	for b.size() < m && !b.sync {
 		select {
-		case nb := <-d.writeC:
+		case nb := <-db.writeC:
 			if b.size()+nb.size() <= m {
 				b.append(nb)
-				d.writeMergedC <- true
+				db.writeMergedC <- true
 				merged++
 			} else {
-				d.writeMergedC <- false
+				danglingMerge = true
 				break drain
 			}
 		default:
@@ -169,44 +189,52 @@ drain:
 	}
 
 	// Set batch first seq number relative from last seq.
-	b.seq = d.seq + 1
+	b.seq = db.seq + 1
 
 	// Write journal concurrently if it is large enough.
 	if b.size() >= (128 << 10) {
 		// Push the write batch to the journal writer
 		select {
-		case _, _ = <-d.closeC:
+		case db.journalC <- b:
+			// Write into memdb
+			if berr := b.memReplay(mem.mdb); berr != nil {
+				panic(berr)
+			}
+		case err = <-db.compPerErrC:
+			return
+		case _, _ = <-db.closeC:
 			err = ErrClosed
 			return
-		case d.journalC <- b:
-			// Write into memdb
-			b.memReplay(mem)
 		}
 		// Wait for journal writer
 		select {
-		case _, _ = <-d.closeC:
-			err = ErrClosed
-			return
-		case err = <-d.journalAckC:
+		case err = <-db.journalAckC:
 			if err != nil {
 				// Revert memdb if error detected
-				b.revertMemReplay(mem)
+				if berr := b.revertMemReplay(mem.mdb); berr != nil {
+					panic(berr)
+				}
 				return
 			}
+		case _, _ = <-db.closeC:
+			err = ErrClosed
+			return
 		}
 	} else {
-		err = d.writeJournal(b)
+		err = db.writeJournal(b)
 		if err != nil {
 			return
 		}
-		b.memReplay(mem)
+		if berr := b.memReplay(mem.mdb); berr != nil {
+			panic(berr)
+		}
 	}
 
 	// Set last seq number.
-	d.addSeq(uint64(b.len()))
+	db.addSeq(uint64(b.Len()))
 
 	if b.size() >= memFree {
-		d.rotateMem(0)
+		db.rotateMem(0)
 	}
 	return
 }
@@ -215,20 +243,20 @@ drain:
 // for that key; a DB is not a multi-map.
 //
 // It is safe to modify the contents of the arguments after Put returns.
-func (d *DB) Put(key, value []byte, wo *opt.WriteOptions) error {
+func (db *DB) Put(key, value []byte, wo *opt.WriteOptions) error {
 	b := new(Batch)
 	b.Put(key, value)
-	return d.Write(b, wo)
+	return db.Write(b, wo)
 }
 
 // Delete deletes the value for the given key. It returns ErrNotFound if
 // the DB does not contain the key.
 //
 // It is safe to modify the contents of the arguments after Delete returns.
-func (d *DB) Delete(key []byte, wo *opt.WriteOptions) error {
+func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error {
 	b := new(Batch)
 	b.Delete(key)
-	return d.Write(b, wo)
+	return db.Write(b, wo)
 }
 
 func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool {
@@ -247,33 +275,37 @@ func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool {
 // A nil Range.Start is treated as a key before all keys in the DB.
 // And a nil Range.Limit is treated as a key after all keys in the DB.
 // Therefore if both is nil then it will compact entire DB.
-func (d *DB) CompactRange(r util.Range) error {
-	if err := d.ok(); err != nil {
+func (db *DB) CompactRange(r util.Range) error {
+	if err := db.ok(); err != nil {
 		return err
 	}
 
+	// Lock writer.
 	select {
-	case d.writeLockC <- struct{}{}:
-	case _, _ = <-d.closeC:
+	case db.writeLockC <- struct{}{}:
+	case err := <-db.compPerErrC:
+		return err
+	case _, _ = <-db.closeC:
 		return ErrClosed
 	}
 
 	// Check for overlaps in memdb.
-	mem := d.getEffectiveMem()
-	if isMemOverlaps(d.s.icmp, mem, r.Start, r.Limit) {
+	mem := db.getEffectiveMem()
+	defer mem.decref()
+	if isMemOverlaps(db.s.icmp, mem.mdb, r.Start, r.Limit) {
 		// Memdb compaction.
-		if _, err := d.rotateMem(0); err != nil {
-			<-d.writeLockC
+		if _, err := db.rotateMem(0); err != nil {
+			<-db.writeLockC
 			return err
 		}
-		<-d.writeLockC
-		if err := d.compSendIdle(d.mcompCmdC); err != nil {
+		<-db.writeLockC
+		if err := db.compSendIdle(db.mcompCmdC); err != nil {
 			return err
 		}
 	} else {
-		<-d.writeLockC
+		<-db.writeLockC
 	}
 
 	// Table compaction.
-	return d.compSendRange(d.tcompCmdC, -1, r.Start, r.Limit)
+	return db.compSendRange(db.tcompCmdC, -1, r.Start, r.Limit)
 }
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go
index ac9ea3d0c4f874fb13907d1ecc857fb695d5bedd..53f13bb24ccfe6f600cfbf381b4a60f7cd173973 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/doc.go
@@ -37,6 +37,16 @@
 //	err = iter.Error()
 //	...
 //
+// Iterate over subset of database content with a particular prefix:
+//	iter := db.NewIterator(util.BytesPrefix([]byte("foo-")), nil)
+//	for iter.Next() {
+//		// Use key/value.
+//		...
+//	}
+//	iter.Release()
+//	err = iter.Error()
+//	...
+//
 // Seek-then-Iterate:
 //
 // 	iter := db.NewIterator(nil, nil)
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/error.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go
similarity index 50%
rename from Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/error.go
rename to Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go
index 8066bd9a4bfeab18a30cf7603ef64442fc0f7fb0..29d0d2f27993801da7567947f53a47d182d171f8 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/error.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go
@@ -7,32 +7,12 @@
 package leveldb
 
 import (
-	"errors"
-
-	"github.com/syndtr/goleveldb/leveldb/util"
+	"github.com/syndtr/goleveldb/leveldb/errors"
 )
 
 var (
-	ErrNotFound         = util.ErrNotFound
+	ErrNotFound         = errors.ErrNotFound
 	ErrSnapshotReleased = errors.New("leveldb: snapshot released")
 	ErrIterReleased     = errors.New("leveldb: iterator released")
 	ErrClosed           = errors.New("leveldb: closed")
 )
-
-type CorruptionType int
-
-const (
-	CorruptedManifest CorruptionType = iota
-	MissingFiles
-)
-
-// ErrCorrupted is the type that wraps errors that indicate corruption in
-// the database.
-type ErrCorrupted struct {
-	Type CorruptionType
-	Err  error
-}
-
-func (e ErrCorrupted) Error() string {
-	return e.Err.Error()
-}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go
new file mode 100644
index 0000000000000000000000000000000000000000..84b5d6b7b2149744c0098d7ca31b0669554be8ed
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go
@@ -0,0 +1,76 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package errors provides common error types used throughout leveldb.
+package errors
+
+import (
+	"errors"
+	"fmt"
+
+	"github.com/syndtr/goleveldb/leveldb/storage"
+	"github.com/syndtr/goleveldb/leveldb/util"
+)
+
+var (
+	ErrNotFound    = New("leveldb: not found")
+	ErrReleased    = util.ErrReleased
+	ErrHasReleaser = util.ErrHasReleaser
+)
+
+// New returns an error that formats as the given text.
+func New(text string) error {
+	return errors.New(text)
+}
+
+// ErrCorrupted is the type that wraps errors that indicate corruption in
+// the database.
+type ErrCorrupted struct {
+	File *storage.FileInfo
+	Err  error
+}
+
+func (e *ErrCorrupted) Error() string {
+	if e.File != nil {
+		return fmt.Sprintf("%v [file=%v]", e.Err, e.File)
+	} else {
+		return e.Err.Error()
+	}
+}
+
+// NewErrCorrupted creates new ErrCorrupted error.
+func NewErrCorrupted(f storage.File, err error) error {
+	return &ErrCorrupted{storage.NewFileInfo(f), err}
+}
+
+// IsCorrupted returns a boolean indicating whether the error is indicating
+// a corruption.
+func IsCorrupted(err error) bool {
+	switch err.(type) {
+	case *ErrCorrupted:
+		return true
+	}
+	return false
+}
+
+// ErrMissingFiles is the type that indicating a corruption due to missing
+// files.
+type ErrMissingFiles struct {
+	Files []*storage.FileInfo
+}
+
+func (e *ErrMissingFiles) Error() string { return "file missing" }
+
+// SetFile sets 'file info' of the given error with the given file.
+// Currently only ErrCorrupted is supported, otherwise will do nothing.
+func SetFile(err error, f storage.File) error {
+	switch x := err.(type) {
+	case *ErrCorrupted:
+		x.File = storage.NewFileInfo(f)
+		return x
+	}
+	return err
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go
index d7dff04b68a7a76c28568c421fd860bb731486de..b328ece4e2caffe0b1004bed2047d3a3cb56eb4d 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/external_test.go
@@ -17,13 +17,14 @@ import (
 var _ = testutil.Defer(func() {
 	Describe("Leveldb external", func() {
 		o := &opt.Options{
-			BlockCache:           opt.NoCache,
-			BlockRestartInterval: 5,
-			BlockSize:            50,
-			Compression:          opt.NoCompression,
-			MaxOpenFiles:         0,
-			Strict:               opt.StrictAll,
-			WriteBuffer:          1000,
+			DisableBlockCache:      true,
+			BlockRestartInterval:   5,
+			BlockSize:              80,
+			Compression:            opt.NoCompression,
+			OpenFilesCacheCapacity: -1,
+			Strict:                 opt.StrictAll,
+			WriteBuffer:            1000,
+			CompactionTableSize:    2000,
 		}
 
 		Describe("write test", func() {
@@ -36,22 +37,21 @@ var _ = testutil.Defer(func() {
 				testutil.DoDBTesting(&t)
 				db.TestClose()
 				done <- true
-			}, 9.0)
+			}, 20.0)
 		})
 
 		Describe("read test", func() {
-			testutil.AllKeyValueTesting(nil, func(kv testutil.KeyValue) testutil.DB {
+			testutil.AllKeyValueTesting(nil, nil, func(kv testutil.KeyValue) testutil.DB {
 				// Building the DB.
 				db := newTestingDB(o, nil, nil)
 				kv.IterateShuffled(nil, func(i int, key, value []byte) {
 					err := db.TestPut(key, value)
 					Expect(err).NotTo(HaveOccurred())
 				})
-				testutil.Defer("teardown", func() {
-					db.TestClose()
-				})
 
 				return db
+			}, func(db testutil.DB) {
+				db.(*testingDB).TestClose()
 			})
 		})
 	})
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go
index 9b4b72741b5038a17ddc57640b8ba242e61ce7b3..a23ab05f70fe8e45c0c0a8e0e4617884219a0cbf 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go
@@ -40,13 +40,19 @@ type basicArrayIterator struct {
 	util.BasicReleaser
 	array BasicArray
 	pos   int
+	err   error
 }
 
 func (i *basicArrayIterator) Valid() bool {
-	return i.pos >= 0 && i.pos < i.array.Len()
+	return i.pos >= 0 && i.pos < i.array.Len() && !i.Released()
 }
 
 func (i *basicArrayIterator) First() bool {
+	if i.Released() {
+		i.err = ErrIterReleased
+		return false
+	}
+
 	if i.array.Len() == 0 {
 		i.pos = -1
 		return false
@@ -56,6 +62,11 @@ func (i *basicArrayIterator) First() bool {
 }
 
 func (i *basicArrayIterator) Last() bool {
+	if i.Released() {
+		i.err = ErrIterReleased
+		return false
+	}
+
 	n := i.array.Len()
 	if n == 0 {
 		i.pos = 0
@@ -66,6 +77,11 @@ func (i *basicArrayIterator) Last() bool {
 }
 
 func (i *basicArrayIterator) Seek(key []byte) bool {
+	if i.Released() {
+		i.err = ErrIterReleased
+		return false
+	}
+
 	n := i.array.Len()
 	if n == 0 {
 		i.pos = 0
@@ -79,6 +95,11 @@ func (i *basicArrayIterator) Seek(key []byte) bool {
 }
 
 func (i *basicArrayIterator) Next() bool {
+	if i.Released() {
+		i.err = ErrIterReleased
+		return false
+	}
+
 	i.pos++
 	if n := i.array.Len(); i.pos >= n {
 		i.pos = n
@@ -88,6 +109,11 @@ func (i *basicArrayIterator) Next() bool {
 }
 
 func (i *basicArrayIterator) Prev() bool {
+	if i.Released() {
+		i.err = ErrIterReleased
+		return false
+	}
+
 	i.pos--
 	if i.pos < 0 {
 		i.pos = -1
@@ -96,7 +122,7 @@ func (i *basicArrayIterator) Prev() bool {
 	return true
 }
 
-func (i *basicArrayIterator) Error() error { return nil }
+func (i *basicArrayIterator) Error() error { return i.err }
 
 type arrayIterator struct {
 	basicArrayIterator
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go
index 1e99a2bf6d9a1b82a7a6b08876d9cbcaec1fc5af..939adbb9332bcc2ce462303b9f95c9601fada105 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go
@@ -7,6 +7,7 @@
 package iterator
 
 import (
+	"github.com/syndtr/goleveldb/leveldb/errors"
 	"github.com/syndtr/goleveldb/leveldb/util"
 )
 
@@ -22,13 +23,13 @@ type IteratorIndexer interface {
 
 type indexedIterator struct {
 	util.BasicReleaser
-	index     IteratorIndexer
-	strict    bool
-	strictGet bool
+	index  IteratorIndexer
+	strict bool
 
-	data Iterator
-	err  error
-	errf func(err error)
+	data   Iterator
+	err    error
+	errf   func(err error)
+	closed bool
 }
 
 func (i *indexedIterator) setData() {
@@ -36,11 +37,6 @@ func (i *indexedIterator) setData() {
 		i.data.Release()
 	}
 	i.data = i.index.Get()
-	if i.strictGet {
-		if err := i.data.Error(); err != nil {
-			i.err = err
-		}
-	}
 }
 
 func (i *indexedIterator) clearData() {
@@ -50,14 +46,21 @@ func (i *indexedIterator) clearData() {
 	i.data = nil
 }
 
-func (i *indexedIterator) dataErr() bool {
-	if i.errf != nil {
-		if err := i.data.Error(); err != nil {
+func (i *indexedIterator) indexErr() {
+	if err := i.index.Error(); err != nil {
+		if i.errf != nil {
 			i.errf(err)
 		}
+		i.err = err
 	}
-	if i.strict {
-		if err := i.data.Error(); err != nil {
+}
+
+func (i *indexedIterator) dataErr() bool {
+	if err := i.data.Error(); err != nil {
+		if i.errf != nil {
+			i.errf(err)
+		}
+		if i.strict || !errors.IsCorrupted(err) {
 			i.err = err
 			return true
 		}
@@ -72,9 +75,13 @@ func (i *indexedIterator) Valid() bool {
 func (i *indexedIterator) First() bool {
 	if i.err != nil {
 		return false
+	} else if i.Released() {
+		i.err = ErrIterReleased
+		return false
 	}
 
 	if !i.index.First() {
+		i.indexErr()
 		i.clearData()
 		return false
 	}
@@ -85,9 +92,13 @@ func (i *indexedIterator) First() bool {
 func (i *indexedIterator) Last() bool {
 	if i.err != nil {
 		return false
+	} else if i.Released() {
+		i.err = ErrIterReleased
+		return false
 	}
 
 	if !i.index.Last() {
+		i.indexErr()
 		i.clearData()
 		return false
 	}
@@ -105,9 +116,13 @@ func (i *indexedIterator) Last() bool {
 func (i *indexedIterator) Seek(key []byte) bool {
 	if i.err != nil {
 		return false
+	} else if i.Released() {
+		i.err = ErrIterReleased
+		return false
 	}
 
 	if !i.index.Seek(key) {
+		i.indexErr()
 		i.clearData()
 		return false
 	}
@@ -125,6 +140,9 @@ func (i *indexedIterator) Seek(key []byte) bool {
 func (i *indexedIterator) Next() bool {
 	if i.err != nil {
 		return false
+	} else if i.Released() {
+		i.err = ErrIterReleased
+		return false
 	}
 
 	switch {
@@ -136,6 +154,7 @@ func (i *indexedIterator) Next() bool {
 		fallthrough
 	case i.data == nil:
 		if !i.index.Next() {
+			i.indexErr()
 			return false
 		}
 		i.setData()
@@ -147,6 +166,9 @@ func (i *indexedIterator) Next() bool {
 func (i *indexedIterator) Prev() bool {
 	if i.err != nil {
 		return false
+	} else if i.Released() {
+		i.err = ErrIterReleased
+		return false
 	}
 
 	switch {
@@ -158,6 +180,7 @@ func (i *indexedIterator) Prev() bool {
 		fallthrough
 	case i.data == nil:
 		if !i.index.Prev() {
+			i.indexErr()
 			return false
 		}
 		i.setData()
@@ -206,16 +229,14 @@ func (i *indexedIterator) SetErrorCallback(f func(err error)) {
 	i.errf = f
 }
 
-// NewIndexedIterator returns an indexed iterator. An index is iterator
-// that returns another iterator, a data iterator. A data iterator is the
+// NewIndexedIterator returns an 'indexed iterator'. An index is iterator
+// that returns another iterator, a 'data iterator'. A 'data iterator' is the
 // iterator that contains actual key/value pairs.
 //
-// If strict is true then error yield by data iterator will halt the indexed
-// iterator, on contrary if strict is false then the indexed iterator will
-// ignore those error and move on to the next index. If strictGet is true and
-// index.Get() yield an 'error iterator' then the indexed iterator will be halted.
-// An 'error iterator' is iterator which its Error() method always return non-nil
-// even before any 'seeks method' is called.
-func NewIndexedIterator(index IteratorIndexer, strict, strictGet bool) Iterator {
-	return &indexedIterator{index: index, strict: strict, strictGet: strictGet}
+// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true)
+// won't be ignored and will halt 'indexed iterator', otherwise the iterator will
+// continue to the next 'data iterator'. Corruption on 'index iterator' will not be
+// ignored and will halt the iterator.
+func NewIndexedIterator(index IteratorIndexer, strict bool) Iterator {
+	return &indexedIterator{index: index, strict: strict}
 }
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go
index 6a89b3830db37f3d15646ee67117d53e8d5c74f9..72a7978924ebf0aaa52eefd16ef79ac57a40f086 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go
@@ -65,7 +65,7 @@ var _ = testutil.Defer(func() {
 					// Test the iterator.
 					t := testutil.IteratorTesting{
 						KeyValue: kv.Clone(),
-						Iter:     NewIndexedIterator(NewArrayIndexer(index), true, true),
+						Iter:     NewIndexedIterator(NewArrayIndexer(index), true),
 					}
 					testutil.DoIteratorTesting(&t)
 					done <- true
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go
index 1b80184e84201238e857f8caa259937340c4df9b..c2522860b0b8c0477fec8d54068053e22de6a1d4 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter.go
@@ -14,6 +14,10 @@ import (
 	"github.com/syndtr/goleveldb/leveldb/util"
 )
 
+var (
+	ErrIterReleased = errors.New("leveldb/iterator: iterator released")
+)
+
 // IteratorSeeker is the interface that wraps the 'seeks method'.
 type IteratorSeeker interface {
 	// First moves the iterator to the first key/value pair. If the iterator
@@ -100,28 +104,13 @@ type ErrorCallbackSetter interface {
 }
 
 type emptyIterator struct {
-	releaser util.Releaser
-	released bool
-	err      error
+	util.BasicReleaser
+	err error
 }
 
 func (i *emptyIterator) rErr() {
-	if i.err == nil && i.released {
-		i.err = errors.New("leveldb/iterator: iterator released")
-	}
-}
-
-func (i *emptyIterator) Release() {
-	if i.releaser != nil {
-		i.releaser.Release()
-		i.releaser = nil
-	}
-	i.released = true
-}
-
-func (i *emptyIterator) SetReleaser(releaser util.Releaser) {
-	if !i.released {
-		i.releaser = releaser
+	if i.err == nil && i.Released() {
+		i.err = ErrIterReleased
 	}
 }
 
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go
index 7ec2fc6f24b8c64d6d160688e541ea0a07df9300..5ef8d5bafb3f265754c74562d987ed2858bb174f 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go
@@ -3,15 +3,9 @@ package iterator_test
 import (
 	"testing"
 
-	. "github.com/onsi/ginkgo"
-	. "github.com/onsi/gomega"
-
 	"github.com/syndtr/goleveldb/leveldb/testutil"
 )
 
 func TestIterator(t *testing.T) {
-	testutil.RunDefer()
-
-	RegisterFailHandler(Fail)
-	RunSpecs(t, "Iterator Suite")
+	testutil.RunSuite(t, "Iterator Suite")
 }
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go
index c8314c4e5f9bc39fb70d6a148c9de4e6e0cfad1a..1a7e29df8fbd975902e5042031cd777b9a9764ce 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go
@@ -7,16 +7,11 @@
 package iterator
 
 import (
-	"errors"
-
 	"github.com/syndtr/goleveldb/leveldb/comparer"
+	"github.com/syndtr/goleveldb/leveldb/errors"
 	"github.com/syndtr/goleveldb/leveldb/util"
 )
 
-var (
-	ErrIterReleased = errors.New("leveldb/iterator: iterator released")
-)
-
 type dir int
 
 const (
@@ -48,13 +43,11 @@ func assertKey(key []byte) []byte {
 }
 
 func (i *mergedIterator) iterErr(iter Iterator) bool {
-	if i.errf != nil {
-		if err := iter.Error(); err != nil {
+	if err := iter.Error(); err != nil {
+		if i.errf != nil {
 			i.errf(err)
 		}
-	}
-	if i.strict {
-		if err := iter.Error(); err != nil {
+		if i.strict || !errors.IsCorrupted(err) {
 			i.err = err
 			return true
 		}
@@ -274,9 +267,13 @@ func (i *mergedIterator) Release() {
 }
 
 func (i *mergedIterator) SetReleaser(releaser util.Releaser) {
-	if i.dir != dirReleased {
-		i.releaser = releaser
+	if i.dir == dirReleased {
+		panic(util.ErrReleased)
+	}
+	if i.releaser != nil && releaser != nil {
+		panic(util.ErrHasReleaser)
 	}
+	i.releaser = releaser
 }
 
 func (i *mergedIterator) Error() error {
@@ -294,9 +291,9 @@ func (i *mergedIterator) SetErrorCallback(f func(err error)) {
 // keys: if iters[i] contains a key k then iters[j] will not contain that key k.
 // None of the iters may be nil.
 //
-// If strict is true then error yield by any iterators will halt the merged
-// iterator, on contrary if strict is false then the merged iterator will
-// ignore those error and move on to the next iterator.
+// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true)
+// won't be ignored and will halt 'merged iterator', otherwise the iterator will
+// continue to the next 'input iterator'.
 func NewMergedIterator(iters []Iterator, cmp comparer.Comparer, strict bool) Iterator {
 	return &mergedIterator{
 		iters:  iters,
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go
index b522c76e65391b05f4c83b1177258cf96c7e5f04..6519ec660eb7110425d545d6e7154c0a447a869f 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal.go
@@ -79,10 +79,10 @@ package journal
 
 import (
 	"encoding/binary"
-	"errors"
 	"fmt"
 	"io"
 
+	"github.com/syndtr/goleveldb/leveldb/errors"
 	"github.com/syndtr/goleveldb/leveldb/util"
 )
 
@@ -103,18 +103,18 @@ type flusher interface {
 	Flush() error
 }
 
-// DroppedError is the error type that passed to Dropper.Drop method.
-type DroppedError struct {
+// ErrCorrupted is the error type that generated by corrupted block or chunk.
+type ErrCorrupted struct {
 	Size   int
 	Reason string
 }
 
-func (e DroppedError) Error() string {
-	return fmt.Sprintf("leveldb/journal: dropped %d bytes: %s", e.Size, e.Reason)
+func (e *ErrCorrupted) Error() string {
+	return fmt.Sprintf("leveldb/journal: block/chunk corrupted: %s (%d bytes)", e.Reason, e.Size)
 }
 
 // Dropper is the interface that wrap simple Drop method. The Drop
-// method will be called when the journal reader dropping a chunk.
+// method will be called when the journal reader dropping a block or chunk.
 type Dropper interface {
 	Drop(err error)
 }
@@ -158,76 +158,78 @@ func NewReader(r io.Reader, dropper Dropper, strict, checksum bool) *Reader {
 	}
 }
 
+var errSkip = errors.New("leveldb/journal: skipped")
+
+func (r *Reader) corrupt(n int, reason string, skip bool) error {
+	if r.dropper != nil {
+		r.dropper.Drop(&ErrCorrupted{n, reason})
+	}
+	if r.strict && !skip {
+		r.err = errors.NewErrCorrupted(nil, &ErrCorrupted{n, reason})
+		return r.err
+	}
+	return errSkip
+}
+
 // nextChunk sets r.buf[r.i:r.j] to hold the next chunk's payload, reading the
 // next block into the buffer if necessary.
-func (r *Reader) nextChunk(wantFirst, skip bool) error {
+func (r *Reader) nextChunk(first bool) error {
 	for {
 		if r.j+headerSize <= r.n {
 			checksum := binary.LittleEndian.Uint32(r.buf[r.j+0 : r.j+4])
 			length := binary.LittleEndian.Uint16(r.buf[r.j+4 : r.j+6])
 			chunkType := r.buf[r.j+6]
 
-			var err error
 			if checksum == 0 && length == 0 && chunkType == 0 {
 				// Drop entire block.
-				err = DroppedError{r.n - r.j, "zero header"}
+				m := r.n - r.j
 				r.i = r.n
 				r.j = r.n
+				return r.corrupt(m, "zero header", false)
 			} else {
 				m := r.n - r.j
 				r.i = r.j + headerSize
 				r.j = r.j + headerSize + int(length)
 				if r.j > r.n {
 					// Drop entire block.
-					err = DroppedError{m, "chunk length overflows block"}
 					r.i = r.n
 					r.j = r.n
+					return r.corrupt(m, "chunk length overflows block", false)
 				} else if r.checksum && checksum != util.NewCRC(r.buf[r.i-1:r.j]).Value() {
 					// Drop entire block.
-					err = DroppedError{m, "checksum mismatch"}
 					r.i = r.n
 					r.j = r.n
+					return r.corrupt(m, "checksum mismatch", false)
 				}
 			}
-			if wantFirst && err == nil && chunkType != fullChunkType && chunkType != firstChunkType {
-				if skip {
-					// The chunk are intentionally skipped.
-					if chunkType == lastChunkType {
-						skip = false
-					}
-					continue
-				} else {
-					// Drop the chunk.
-					err = DroppedError{r.j - r.i + headerSize, "orphan chunk"}
-				}
-			}
-			if err == nil {
-				r.last = chunkType == fullChunkType || chunkType == lastChunkType
-			} else {
-				if r.dropper != nil {
-					r.dropper.Drop(err)
-				}
-				if r.strict {
-					r.err = err
-				}
+			if first && chunkType != fullChunkType && chunkType != firstChunkType {
+				m := r.j - r.i
+				r.i = r.j
+				// Report the error, but skip it.
+				return r.corrupt(m+headerSize, "orphan chunk", true)
 			}
-			return err
+			r.last = chunkType == fullChunkType || chunkType == lastChunkType
+			return nil
 		}
+
+		// The last block.
 		if r.n < blockSize && r.n > 0 {
-			// This is the last block.
-			if r.j != r.n {
-				r.err = io.ErrUnexpectedEOF
-			} else {
-				r.err = io.EOF
+			if !first {
+				return r.corrupt(0, "missing chunk part", false)
 			}
+			r.err = io.EOF
 			return r.err
 		}
+
+		// Read block.
 		n, err := io.ReadFull(r.r, r.buf[:])
-		if err != nil && err != io.ErrUnexpectedEOF {
-			r.err = err
-			return r.err
+		if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
+			return err
 		}
 		if n == 0 {
+			if !first {
+				return r.corrupt(0, "missing chunk part", false)
+			}
 			r.err = io.EOF
 			return r.err
 		}
@@ -237,29 +239,26 @@ func (r *Reader) nextChunk(wantFirst, skip bool) error {
 
 // Next returns a reader for the next journal. It returns io.EOF if there are no
 // more journals. The reader returned becomes stale after the next Next call,
-// and should no longer be used.
+// and should no longer be used. If strict is false, the reader will returns
+// io.ErrUnexpectedEOF error when found corrupted journal.
 func (r *Reader) Next() (io.Reader, error) {
 	r.seq++
 	if r.err != nil {
 		return nil, r.err
 	}
-	skip := !r.last
+	r.i = r.j
 	for {
-		r.i = r.j
-		if r.nextChunk(true, skip) != nil {
-			// So that 'orphan chunk' drop will be reported.
-			skip = false
-		} else {
+		if err := r.nextChunk(true); err == nil {
 			break
-		}
-		if r.err != nil {
-			return nil, r.err
+		} else if err != errSkip {
+			return nil, err
 		}
 	}
 	return &singleReader{r, r.seq, nil}, nil
 }
 
-// Reset resets the journal reader, allows reuse of the journal reader.
+// Reset resets the journal reader, allows reuse of the journal reader. Reset returns
+// last accumulated error.
 func (r *Reader) Reset(reader io.Reader, dropper Dropper, strict, checksum bool) error {
 	r.seq++
 	err := r.err
@@ -296,7 +295,11 @@ func (x *singleReader) Read(p []byte) (int, error) {
 		if r.last {
 			return 0, io.EOF
 		}
-		if x.err = r.nextChunk(false, false); x.err != nil {
+		x.err = r.nextChunk(false)
+		if x.err != nil {
+			if x.err == errSkip {
+				x.err = io.ErrUnexpectedEOF
+			}
 			return 0, x.err
 		}
 	}
@@ -320,7 +323,11 @@ func (x *singleReader) ReadByte() (byte, error) {
 		if r.last {
 			return 0, io.EOF
 		}
-		if x.err = r.nextChunk(false, false); x.err != nil {
+		x.err = r.nextChunk(false)
+		if x.err != nil {
+			if x.err == errSkip {
+				x.err = io.ErrUnexpectedEOF
+			}
 			return 0, x.err
 		}
 	}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go
index 5e1193ae2cc287ec931f189172ddd016be717403..0fcf22599f265b23175f0dcf60dda176fc2c33e3 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go
@@ -12,6 +12,7 @@ package journal
 
 import (
 	"bytes"
+	"encoding/binary"
 	"fmt"
 	"io"
 	"io/ioutil"
@@ -326,3 +327,492 @@ func TestStaleWriter(t *testing.T) {
 		t.Fatalf("stale write #1: unexpected error: %v", err)
 	}
 }
+
+func TestCorrupt_MissingLastBlock(t *testing.T) {
+	buf := new(bytes.Buffer)
+
+	w := NewWriter(buf)
+
+	// First record.
+	ww, err := w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-1024)); err != nil {
+		t.Fatalf("write #0: unexpected error: %v", err)
+	}
+
+	// Second record.
+	ww, err = w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
+		t.Fatalf("write #1: unexpected error: %v", err)
+	}
+
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	// Cut the last block.
+	b := buf.Bytes()[:blockSize]
+	r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
+
+	// First read.
+	rr, err := r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	n, err := io.Copy(ioutil.Discard, rr)
+	if err != nil {
+		t.Fatalf("read #0: %v", err)
+	}
+	if n != blockSize-1024 {
+		t.Fatalf("read #0: got %d bytes want %d", n, blockSize-1024)
+	}
+
+	// Second read.
+	rr, err = r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	n, err = io.Copy(ioutil.Discard, rr)
+	if err != io.ErrUnexpectedEOF {
+		t.Fatalf("read #1: unexpected error: %v", err)
+	}
+
+	if _, err := r.Next(); err != io.EOF {
+		t.Fatalf("last next: unexpected error: %v", err)
+	}
+}
+
+func TestCorrupt_CorruptedFirstBlock(t *testing.T) {
+	buf := new(bytes.Buffer)
+
+	w := NewWriter(buf)
+
+	// First record.
+	ww, err := w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
+		t.Fatalf("write #0: unexpected error: %v", err)
+	}
+
+	// Second record.
+	ww, err = w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
+		t.Fatalf("write #1: unexpected error: %v", err)
+	}
+
+	// Third record.
+	ww, err = w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
+		t.Fatalf("write #2: unexpected error: %v", err)
+	}
+
+	// Fourth record.
+	ww, err = w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil {
+		t.Fatalf("write #3: unexpected error: %v", err)
+	}
+
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	b := buf.Bytes()
+	// Corrupting block #0.
+	for i := 0; i < 1024; i++ {
+		b[i] = '1'
+	}
+
+	r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
+
+	// First read (third record).
+	rr, err := r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	n, err := io.Copy(ioutil.Discard, rr)
+	if err != nil {
+		t.Fatalf("read #0: %v", err)
+	}
+	if want := int64(blockSize-headerSize) + 1; n != want {
+		t.Fatalf("read #0: got %d bytes want %d", n, want)
+	}
+
+	// Second read (fourth record).
+	rr, err = r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	n, err = io.Copy(ioutil.Discard, rr)
+	if err != nil {
+		t.Fatalf("read #1: %v", err)
+	}
+	if want := int64(blockSize-headerSize) + 2; n != want {
+		t.Fatalf("read #1: got %d bytes want %d", n, want)
+	}
+
+	if _, err := r.Next(); err != io.EOF {
+		t.Fatalf("last next: unexpected error: %v", err)
+	}
+}
+
+func TestCorrupt_CorruptedMiddleBlock(t *testing.T) {
+	buf := new(bytes.Buffer)
+
+	w := NewWriter(buf)
+
+	// First record.
+	ww, err := w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
+		t.Fatalf("write #0: unexpected error: %v", err)
+	}
+
+	// Second record.
+	ww, err = w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
+		t.Fatalf("write #1: unexpected error: %v", err)
+	}
+
+	// Third record.
+	ww, err = w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
+		t.Fatalf("write #2: unexpected error: %v", err)
+	}
+
+	// Fourth record.
+	ww, err = w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil {
+		t.Fatalf("write #3: unexpected error: %v", err)
+	}
+
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	b := buf.Bytes()
+	// Corrupting block #1.
+	for i := 0; i < 1024; i++ {
+		b[blockSize+i] = '1'
+	}
+
+	r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
+
+	// First read (first record).
+	rr, err := r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	n, err := io.Copy(ioutil.Discard, rr)
+	if err != nil {
+		t.Fatalf("read #0: %v", err)
+	}
+	if want := int64(blockSize / 2); n != want {
+		t.Fatalf("read #0: got %d bytes want %d", n, want)
+	}
+
+	// Second read (second record).
+	rr, err = r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	n, err = io.Copy(ioutil.Discard, rr)
+	if err != io.ErrUnexpectedEOF {
+		t.Fatalf("read #1: unexpected error: %v", err)
+	}
+
+	// Third read (fourth record).
+	rr, err = r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	n, err = io.Copy(ioutil.Discard, rr)
+	if err != nil {
+		t.Fatalf("read #2: %v", err)
+	}
+	if want := int64(blockSize-headerSize) + 2; n != want {
+		t.Fatalf("read #2: got %d bytes want %d", n, want)
+	}
+
+	if _, err := r.Next(); err != io.EOF {
+		t.Fatalf("last next: unexpected error: %v", err)
+	}
+}
+
+func TestCorrupt_CorruptedLastBlock(t *testing.T) {
+	buf := new(bytes.Buffer)
+
+	w := NewWriter(buf)
+
+	// First record.
+	ww, err := w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
+		t.Fatalf("write #0: unexpected error: %v", err)
+	}
+
+	// Second record.
+	ww, err = w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
+		t.Fatalf("write #1: unexpected error: %v", err)
+	}
+
+	// Third record.
+	ww, err = w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
+		t.Fatalf("write #2: unexpected error: %v", err)
+	}
+
+	// Fourth record.
+	ww, err = w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil {
+		t.Fatalf("write #3: unexpected error: %v", err)
+	}
+
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	b := buf.Bytes()
+	// Corrupting block #3.
+	for i := len(b) - 1; i > len(b)-1024; i-- {
+		b[i] = '1'
+	}
+
+	r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
+
+	// First read (first record).
+	rr, err := r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	n, err := io.Copy(ioutil.Discard, rr)
+	if err != nil {
+		t.Fatalf("read #0: %v", err)
+	}
+	if want := int64(blockSize / 2); n != want {
+		t.Fatalf("read #0: got %d bytes want %d", n, want)
+	}
+
+	// Second read (second record).
+	rr, err = r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	n, err = io.Copy(ioutil.Discard, rr)
+	if err != nil {
+		t.Fatalf("read #1: %v", err)
+	}
+	if want := int64(blockSize - headerSize); n != want {
+		t.Fatalf("read #1: got %d bytes want %d", n, want)
+	}
+
+	// Third read (third record).
+	rr, err = r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	n, err = io.Copy(ioutil.Discard, rr)
+	if err != nil {
+		t.Fatalf("read #2: %v", err)
+	}
+	if want := int64(blockSize-headerSize) + 1; n != want {
+		t.Fatalf("read #2: got %d bytes want %d", n, want)
+	}
+
+	// Fourth read (fourth record).
+	rr, err = r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	n, err = io.Copy(ioutil.Discard, rr)
+	if err != io.ErrUnexpectedEOF {
+		t.Fatalf("read #3: unexpected error: %v", err)
+	}
+
+	if _, err := r.Next(); err != io.EOF {
+		t.Fatalf("last next: unexpected error: %v", err)
+	}
+}
+
+func TestCorrupt_FirstChuckLengthOverflow(t *testing.T) {
+	buf := new(bytes.Buffer)
+
+	w := NewWriter(buf)
+
+	// First record.
+	ww, err := w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
+		t.Fatalf("write #0: unexpected error: %v", err)
+	}
+
+	// Second record.
+	ww, err = w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
+		t.Fatalf("write #1: unexpected error: %v", err)
+	}
+
+	// Third record.
+	ww, err = w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
+		t.Fatalf("write #2: unexpected error: %v", err)
+	}
+
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	b := buf.Bytes()
+	// Corrupting record #1.
+	x := blockSize
+	binary.LittleEndian.PutUint16(b[x+4:], 0xffff)
+
+	r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
+
+	// First read (first record).
+	rr, err := r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	n, err := io.Copy(ioutil.Discard, rr)
+	if err != nil {
+		t.Fatalf("read #0: %v", err)
+	}
+	if want := int64(blockSize / 2); n != want {
+		t.Fatalf("read #0: got %d bytes want %d", n, want)
+	}
+
+	// Second read (second record).
+	rr, err = r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	n, err = io.Copy(ioutil.Discard, rr)
+	if err != io.ErrUnexpectedEOF {
+		t.Fatalf("read #1: unexpected error: %v", err)
+	}
+
+	if _, err := r.Next(); err != io.EOF {
+		t.Fatalf("last next: unexpected error: %v", err)
+	}
+}
+
+func TestCorrupt_MiddleChuckLengthOverflow(t *testing.T) {
+	buf := new(bytes.Buffer)
+
+	w := NewWriter(buf)
+
+	// First record.
+	ww, err := w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil {
+		t.Fatalf("write #0: unexpected error: %v", err)
+	}
+
+	// Second record.
+	ww, err = w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil {
+		t.Fatalf("write #1: unexpected error: %v", err)
+	}
+
+	// Third record.
+	ww, err = w.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil {
+		t.Fatalf("write #2: unexpected error: %v", err)
+	}
+
+	if err := w.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	b := buf.Bytes()
+	// Corrupting record #1.
+	x := blockSize/2 + headerSize
+	binary.LittleEndian.PutUint16(b[x+4:], 0xffff)
+
+	r := NewReader(bytes.NewReader(b), dropper{t}, false, true)
+
+	// First read (first record).
+	rr, err := r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	n, err := io.Copy(ioutil.Discard, rr)
+	if err != nil {
+		t.Fatalf("read #0: %v", err)
+	}
+	if want := int64(blockSize / 2); n != want {
+		t.Fatalf("read #0: got %d bytes want %d", n, want)
+	}
+
+	// Second read (third record).
+	rr, err = r.Next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	n, err = io.Copy(ioutil.Discard, rr)
+	if err != nil {
+		t.Fatalf("read #1: %v", err)
+	}
+	if want := int64(blockSize-headerSize) + 1; n != want {
+		t.Fatalf("read #1: got %d bytes want %d", n, want)
+	}
+
+	if _, err := r.Next(); err != io.EOF {
+		t.Fatalf("last next: unexpected error: %v", err)
+	}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go
index b9acf932dab68fa7e66fef23dab3ff8476bfab04..572ae8150c792dd43a3cc0477d1ff44075a51db3 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key.go
@@ -9,15 +9,30 @@ package leveldb
 import (
 	"encoding/binary"
 	"fmt"
+
+	"github.com/syndtr/goleveldb/leveldb/errors"
 )
 
-type vType int
+type ErrIkeyCorrupted struct {
+	Ikey   []byte
+	Reason string
+}
+
+func (e *ErrIkeyCorrupted) Error() string {
+	return fmt.Sprintf("leveldb: iKey %q corrupted: %s", e.Ikey, e.Reason)
+}
+
+func newErrIkeyCorrupted(ikey []byte, reason string) error {
+	return errors.NewErrCorrupted(nil, &ErrIkeyCorrupted{append([]byte{}, ikey...), reason})
+}
+
+type kType int
 
-func (t vType) String() string {
-	switch t {
-	case tDel:
+func (kt kType) String() string {
+	switch kt {
+	case ktDel:
 		return "d"
-	case tVal:
+	case ktVal:
 		return "v"
 	}
 	return "x"
@@ -26,16 +41,16 @@ func (t vType) String() string {
 // Value types encoded as the last component of internal keys.
 // Don't modify; this value are saved to disk.
 const (
-	tDel vType = iota
-	tVal
+	ktDel kType = iota
+	ktVal
 )
 
-// tSeek defines the vType that should be passed when constructing an
+// ktSeek defines the kType that should be passed when constructing an
 // internal key for seeking to a particular sequence number (since we
 // sort sequence numbers in decreasing order and the value type is
 // embedded as the low 8 bits in the sequence number in internal keys,
 // we need to use the highest-numbered ValueType, not the lowest).
-const tSeek = tVal
+const ktSeek = ktVal
 
 const (
 	// Maximum value possible for sequence number; the 8-bits are
@@ -43,7 +58,7 @@ const (
 	// 64-bit integer.
 	kMaxSeq uint64 = (uint64(1) << 56) - 1
 	// Maximum value possible for packed sequence number and type.
-	kMaxNum uint64 = (kMaxSeq << 8) | uint64(tSeek)
+	kMaxNum uint64 = (kMaxSeq << 8) | uint64(ktSeek)
 )
 
 // Maximum number encoded in bytes.
@@ -55,85 +70,73 @@ func init() {
 
 type iKey []byte
 
-func newIKey(ukey []byte, seq uint64, t vType) iKey {
-	if seq > kMaxSeq || t > tVal {
-		panic("invalid seq number or value type")
+func newIkey(ukey []byte, seq uint64, kt kType) iKey {
+	if seq > kMaxSeq {
+		panic("leveldb: invalid sequence number")
+	} else if kt > ktVal {
+		panic("leveldb: invalid type")
 	}
 
-	b := make(iKey, len(ukey)+8)
-	copy(b, ukey)
-	binary.LittleEndian.PutUint64(b[len(ukey):], (seq<<8)|uint64(t))
-	return b
+	ik := make(iKey, len(ukey)+8)
+	copy(ik, ukey)
+	binary.LittleEndian.PutUint64(ik[len(ukey):], (seq<<8)|uint64(kt))
+	return ik
 }
 
-func parseIkey(p []byte) (ukey []byte, seq uint64, t vType, ok bool) {
-	if len(p) < 8 {
-		return
+func parseIkey(ik []byte) (ukey []byte, seq uint64, kt kType, err error) {
+	if len(ik) < 8 {
+		return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid length")
 	}
-	num := binary.LittleEndian.Uint64(p[len(p)-8:])
-	seq, t = uint64(num>>8), vType(num&0xff)
-	if t > tVal {
-		return
+	num := binary.LittleEndian.Uint64(ik[len(ik)-8:])
+	seq, kt = uint64(num>>8), kType(num&0xff)
+	if kt > ktVal {
+		return nil, 0, 0, newErrIkeyCorrupted(ik, "invalid type")
 	}
-	ukey = p[:len(p)-8]
-	ok = true
+	ukey = ik[:len(ik)-8]
 	return
 }
 
-func validIkey(p []byte) bool {
-	_, _, _, ok := parseIkey(p)
-	return ok
+func validIkey(ik []byte) bool {
+	_, _, _, err := parseIkey(ik)
+	return err == nil
 }
 
-func (p iKey) assert() {
-	if p == nil {
-		panic("nil iKey")
+func (ik iKey) assert() {
+	if ik == nil {
+		panic("leveldb: nil iKey")
 	}
-	if len(p) < 8 {
-		panic(fmt.Sprintf("invalid iKey %q, len=%d", []byte(p), len(p)))
+	if len(ik) < 8 {
+		panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid length", []byte(ik), len(ik)))
 	}
 }
 
-func (p iKey) ok() bool {
-	if len(p) < 8 {
-		return false
-	}
-	_, _, ok := p.parseNum()
-	return ok
-}
-
-func (p iKey) ukey() []byte {
-	p.assert()
-	return p[:len(p)-8]
+func (ik iKey) ukey() []byte {
+	ik.assert()
+	return ik[:len(ik)-8]
 }
 
-func (p iKey) num() uint64 {
-	p.assert()
-	return binary.LittleEndian.Uint64(p[len(p)-8:])
+func (ik iKey) num() uint64 {
+	ik.assert()
+	return binary.LittleEndian.Uint64(ik[len(ik)-8:])
 }
 
-func (p iKey) parseNum() (seq uint64, t vType, ok bool) {
-	if p == nil {
-		panic("nil iKey")
+func (ik iKey) parseNum() (seq uint64, kt kType) {
+	num := ik.num()
+	seq, kt = uint64(num>>8), kType(num&0xff)
+	if kt > ktVal {
+		panic(fmt.Sprintf("leveldb: iKey %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt))
 	}
-	if len(p) < 8 {
-		return
-	}
-	num := p.num()
-	seq, t = uint64(num>>8), vType(num&0xff)
-	if t > tVal {
-		return 0, 0, false
-	}
-	ok = true
 	return
 }
 
-func (p iKey) String() string {
-	if len(p) == 0 {
+func (ik iKey) String() string {
+	if ik == nil {
 		return "<nil>"
 	}
-	if seq, t, ok := p.parseNum(); ok {
-		return fmt.Sprintf("%s,%s%d", shorten(string(p.ukey())), t, seq)
+
+	if ukey, seq, kt, err := parseIkey(ik); err == nil {
+		return fmt.Sprintf("%s,%s%d", shorten(string(ukey)), kt, seq)
+	} else {
+		return "<invalid>"
 	}
-	return "<invalid>"
 }
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go
index e307cfc1d938601a6f40815ddaf3c7ec7e1a2333..30eadf7847ed9b35495c2ee56f800de9fff79b84 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/key_test.go
@@ -15,8 +15,8 @@ import (
 
 var defaultIComparer = &iComparer{comparer.DefaultComparer}
 
-func ikey(key string, seq uint64, t vType) iKey {
-	return newIKey([]byte(key), uint64(seq), t)
+func ikey(key string, seq uint64, kt kType) iKey {
+	return newIkey([]byte(key), uint64(seq), kt)
 }
 
 func shortSep(a, b []byte) []byte {
@@ -37,27 +37,37 @@ func shortSuccessor(b []byte) []byte {
 	return dst
 }
 
-func testSingleKey(t *testing.T, key string, seq uint64, vt vType) {
-	ik := ikey(key, seq, vt)
+func testSingleKey(t *testing.T, key string, seq uint64, kt kType) {
+	ik := ikey(key, seq, kt)
 
 	if !bytes.Equal(ik.ukey(), []byte(key)) {
 		t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key)
 	}
 
-	if rseq, rt, ok := ik.parseNum(); ok {
+	rseq, rt := ik.parseNum()
+	if rseq != seq {
+		t.Errorf("seq number does not equal, got %v, want %v", rseq, seq)
+	}
+	if rt != kt {
+		t.Errorf("type does not equal, got %v, want %v", rt, kt)
+	}
+
+	if rukey, rseq, rt, kerr := parseIkey(ik); kerr == nil {
+		if !bytes.Equal(rukey, []byte(key)) {
+			t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key)
+		}
 		if rseq != seq {
 			t.Errorf("seq number does not equal, got %v, want %v", rseq, seq)
 		}
-
-		if rt != vt {
-			t.Errorf("type does not equal, got %v, want %v", rt, vt)
+		if rt != kt {
+			t.Errorf("type does not equal, got %v, want %v", rt, kt)
 		}
 	} else {
-		t.Error("cannot parse seq and type")
+		t.Errorf("key error: %v", kerr)
 	}
 }
 
-func TestIKey_EncodeDecode(t *testing.T) {
+func TestIkey_EncodeDecode(t *testing.T) {
 	keys := []string{"", "k", "hello", "longggggggggggggggggggggg"}
 	seqs := []uint64{
 		1, 2, 3,
@@ -67,8 +77,8 @@ func TestIKey_EncodeDecode(t *testing.T) {
 	}
 	for _, key := range keys {
 		for _, seq := range seqs {
-			testSingleKey(t, key, seq, tVal)
-			testSingleKey(t, "hello", 1, tDel)
+			testSingleKey(t, key, seq, ktVal)
+			testSingleKey(t, "hello", 1, ktDel)
 		}
 	}
 }
@@ -79,45 +89,45 @@ func assertBytes(t *testing.T, want, got []byte) {
 	}
 }
 
-func TestIKeyShortSeparator(t *testing.T) {
+func TestIkeyShortSeparator(t *testing.T) {
 	// When user keys are same
-	assertBytes(t, ikey("foo", 100, tVal),
-		shortSep(ikey("foo", 100, tVal),
-			ikey("foo", 99, tVal)))
-	assertBytes(t, ikey("foo", 100, tVal),
-		shortSep(ikey("foo", 100, tVal),
-			ikey("foo", 101, tVal)))
-	assertBytes(t, ikey("foo", 100, tVal),
-		shortSep(ikey("foo", 100, tVal),
-			ikey("foo", 100, tVal)))
-	assertBytes(t, ikey("foo", 100, tVal),
-		shortSep(ikey("foo", 100, tVal),
-			ikey("foo", 100, tDel)))
+	assertBytes(t, ikey("foo", 100, ktVal),
+		shortSep(ikey("foo", 100, ktVal),
+			ikey("foo", 99, ktVal)))
+	assertBytes(t, ikey("foo", 100, ktVal),
+		shortSep(ikey("foo", 100, ktVal),
+			ikey("foo", 101, ktVal)))
+	assertBytes(t, ikey("foo", 100, ktVal),
+		shortSep(ikey("foo", 100, ktVal),
+			ikey("foo", 100, ktVal)))
+	assertBytes(t, ikey("foo", 100, ktVal),
+		shortSep(ikey("foo", 100, ktVal),
+			ikey("foo", 100, ktDel)))
 
 	// When user keys are misordered
-	assertBytes(t, ikey("foo", 100, tVal),
-		shortSep(ikey("foo", 100, tVal),
-			ikey("bar", 99, tVal)))
+	assertBytes(t, ikey("foo", 100, ktVal),
+		shortSep(ikey("foo", 100, ktVal),
+			ikey("bar", 99, ktVal)))
 
 	// When user keys are different, but correctly ordered
-	assertBytes(t, ikey("g", uint64(kMaxSeq), tSeek),
-		shortSep(ikey("foo", 100, tVal),
-			ikey("hello", 200, tVal)))
+	assertBytes(t, ikey("g", uint64(kMaxSeq), ktSeek),
+		shortSep(ikey("foo", 100, ktVal),
+			ikey("hello", 200, ktVal)))
 
 	// When start user key is prefix of limit user key
-	assertBytes(t, ikey("foo", 100, tVal),
-		shortSep(ikey("foo", 100, tVal),
-			ikey("foobar", 200, tVal)))
+	assertBytes(t, ikey("foo", 100, ktVal),
+		shortSep(ikey("foo", 100, ktVal),
+			ikey("foobar", 200, ktVal)))
 
 	// When limit user key is prefix of start user key
-	assertBytes(t, ikey("foobar", 100, tVal),
-		shortSep(ikey("foobar", 100, tVal),
-			ikey("foo", 200, tVal)))
+	assertBytes(t, ikey("foobar", 100, ktVal),
+		shortSep(ikey("foobar", 100, ktVal),
+			ikey("foo", 200, ktVal)))
 }
 
-func TestIKeyShortestSuccessor(t *testing.T) {
-	assertBytes(t, ikey("g", uint64(kMaxSeq), tSeek),
-		shortSuccessor(ikey("foo", 100, tVal)))
-	assertBytes(t, ikey("\xff\xff", 100, tVal),
-		shortSuccessor(ikey("\xff\xff", 100, tVal)))
+func TestIkeyShortestSuccessor(t *testing.T) {
+	assertBytes(t, ikey("g", uint64(kMaxSeq), ktSeek),
+		shortSuccessor(ikey("foo", 100, ktVal)))
+	assertBytes(t, ikey("\xff\xff", 100, ktVal),
+		shortSuccessor(ikey("\xff\xff", 100, ktVal)))
 }
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go
index 245b1fd4d38cec1729c8f6b4dd3ff4ca9019a7f5..fefa007a7047c7c0f5dcf69769db92d74702a087 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go
@@ -3,18 +3,9 @@ package leveldb
 import (
 	"testing"
 
-	. "github.com/onsi/ginkgo"
-	. "github.com/onsi/gomega"
-
 	"github.com/syndtr/goleveldb/leveldb/testutil"
 )
 
-func TestLeveldb(t *testing.T) {
-	testutil.RunDefer()
-
-	RegisterFailHandler(Fail)
-	RunSpecs(t, "Leveldb Suite")
-
-	RegisterTestingT(t)
-	testutil.RunDefer("teardown")
+func TestLevelDB(t *testing.T) {
+	testutil.RunSuite(t, "LevelDB Suite")
 }
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
index 7bcae992a8411858851158158155bb82da36f5ee..e5398873b79f15dde86afaa33247c502a425f818 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
@@ -12,12 +12,14 @@ import (
 	"sync"
 
 	"github.com/syndtr/goleveldb/leveldb/comparer"
+	"github.com/syndtr/goleveldb/leveldb/errors"
 	"github.com/syndtr/goleveldb/leveldb/iterator"
 	"github.com/syndtr/goleveldb/leveldb/util"
 )
 
 var (
-	ErrNotFound = util.ErrNotFound
+	ErrNotFound     = errors.ErrNotFound
+	ErrIterReleased = errors.New("leveldb/memdb: iterator released")
 )
 
 const tMaxHeight = 12
@@ -29,6 +31,7 @@ type dbIter struct {
 	node       int
 	forward    bool
 	key, value []byte
+	err        error
 }
 
 func (i *dbIter) fill(checkStart, checkLimit bool) bool {
@@ -59,6 +62,11 @@ func (i *dbIter) Valid() bool {
 }
 
 func (i *dbIter) First() bool {
+	if i.Released() {
+		i.err = ErrIterReleased
+		return false
+	}
+
 	i.forward = true
 	i.p.mu.RLock()
 	defer i.p.mu.RUnlock()
@@ -71,9 +79,11 @@ func (i *dbIter) First() bool {
 }
 
 func (i *dbIter) Last() bool {
-	if i.p == nil {
+	if i.Released() {
+		i.err = ErrIterReleased
 		return false
 	}
+
 	i.forward = false
 	i.p.mu.RLock()
 	defer i.p.mu.RUnlock()
@@ -86,9 +96,11 @@ func (i *dbIter) Last() bool {
 }
 
 func (i *dbIter) Seek(key []byte) bool {
-	if i.p == nil {
+	if i.Released() {
+		i.err = ErrIterReleased
 		return false
 	}
+
 	i.forward = true
 	i.p.mu.RLock()
 	defer i.p.mu.RUnlock()
@@ -100,9 +112,11 @@ func (i *dbIter) Seek(key []byte) bool {
 }
 
 func (i *dbIter) Next() bool {
-	if i.p == nil {
+	if i.Released() {
+		i.err = ErrIterReleased
 		return false
 	}
+
 	if i.node == 0 {
 		if !i.forward {
 			return i.First()
@@ -117,9 +131,11 @@ func (i *dbIter) Next() bool {
 }
 
 func (i *dbIter) Prev() bool {
-	if i.p == nil {
+	if i.Released() {
+		i.err = ErrIterReleased
 		return false
 	}
+
 	if i.node == 0 {
 		if i.forward {
 			return i.Last()
@@ -141,10 +157,10 @@ func (i *dbIter) Value() []byte {
 	return i.value
 }
 
-func (i *dbIter) Error() error { return nil }
+func (i *dbIter) Error() error { return i.err }
 
 func (i *dbIter) Release() {
-	if i.p != nil {
+	if !i.Released() {
 		i.p = nil
 		i.node = 0
 		i.key = nil
@@ -437,6 +453,8 @@ func (p *DB) Reset() {
 // New creates a new initalized in-memory key/value DB. The capacity
 // is the initial key/value buffer capacity. The capacity is advisory,
 // not enforced.
+//
+// The returned DB instance is goroutine-safe.
 func New(cmp comparer.BasicComparer, capacity int) *DB {
 	p := &DB{
 		cmp:       cmp,
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go
index 788539a87b5999ab845704c2d9a9aa48898cffc6..18c304b7f19a6c66b008e7fe2f6d879cf0a0a6ad 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go
@@ -3,15 +3,9 @@ package memdb
 import (
 	"testing"
 
-	. "github.com/onsi/ginkgo"
-	. "github.com/onsi/gomega"
-
 	"github.com/syndtr/goleveldb/leveldb/testutil"
 )
 
-func TestMemdb(t *testing.T) {
-	testutil.RunDefer()
-
-	RegisterFailHandler(Fail)
-	RunSpecs(t, "Memdb Suite")
+func TestMemDB(t *testing.T) {
+	testutil.RunSuite(t, "MemDB Suite")
 }
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go
index f96a9d1ea33e6ff5cf18a448381d2dd2a9e9aabd..5dd6dbc7b7bbdb1d3e252c3cc1bb1995eb99ae4a 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go
@@ -129,7 +129,7 @@ var _ = testutil.Defer(func() {
 				}
 
 				return db
-			})
+			}, nil, nil)
 		})
 	})
 })
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go
index b940ce42778684847f17dc1c8d9a704171cf6b24..61f0eadf96f06ba6c26f6fa0286f1f88a5a5ec6b 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go
@@ -11,6 +11,7 @@ import (
 	"github.com/syndtr/goleveldb/leveldb/cache"
 	"github.com/syndtr/goleveldb/leveldb/comparer"
 	"github.com/syndtr/goleveldb/leveldb/filter"
+	"math"
 )
 
 const (
@@ -19,25 +20,57 @@ const (
 	GiB = MiB * 1024
 )
 
-const (
-	DefaultBlockCacheSize       = 8 * MiB
-	DefaultBlockRestartInterval = 16
-	DefaultBlockSize            = 4 * KiB
-	DefaultCompressionType      = SnappyCompression
-	DefaultMaxOpenFiles         = 1000
-	DefaultWriteBuffer          = 4 * MiB
+var (
+	DefaultBlockCacher                   = LRUCacher
+	DefaultBlockCacheCapacity            = 8 * MiB
+	DefaultBlockRestartInterval          = 16
+	DefaultBlockSize                     = 4 * KiB
+	DefaultCompactionExpandLimitFactor   = 25
+	DefaultCompactionGPOverlapsFactor    = 10
+	DefaultCompactionL0Trigger           = 4
+	DefaultCompactionSourceLimitFactor   = 1
+	DefaultCompactionTableSize           = 2 * MiB
+	DefaultCompactionTableSizeMultiplier = 1.0
+	DefaultCompactionTotalSize           = 10 * MiB
+	DefaultCompactionTotalSizeMultiplier = 10.0
+	DefaultCompressionType               = SnappyCompression
+	DefaultIteratorSamplingRate          = 1 * MiB
+	DefaultMaxMemCompationLevel          = 2
+	DefaultNumLevel                      = 7
+	DefaultOpenFilesCacher               = LRUCacher
+	DefaultOpenFilesCacheCapacity        = 500
+	DefaultWriteBuffer                   = 4 * MiB
+	DefaultWriteL0PauseTrigger           = 12
+	DefaultWriteL0SlowdownTrigger        = 8
 )
 
-type noCache struct{}
+// Cacher is a caching algorithm.
+type Cacher interface {
+	New(capacity int) cache.Cacher
+}
+
+type CacherFunc struct {
+	NewFunc func(capacity int) cache.Cacher
+}
+
+func (f *CacherFunc) New(capacity int) cache.Cacher {
+	if f.NewFunc != nil {
+		return f.NewFunc(capacity)
+	}
+	return nil
+}
 
-func (noCache) SetCapacity(capacity int)               {}
-func (noCache) GetNamespace(id uint64) cache.Namespace { return nil }
-func (noCache) Purge(fin cache.PurgeFin)               {}
-func (noCache) Zap(closed bool)                        {}
+func noCacher(int) cache.Cacher { return nil }
 
-var NoCache cache.Cache = noCache{}
+var (
+	// LRUCacher is the LRU-cache algorithm.
+	LRUCacher = &CacherFunc{cache.NewLRU}
 
-// Compression is the per-block compression algorithm to use.
+	// NoCacher is the value to disable caching algorithm.
+	NoCacher = &CacherFunc{}
+)
+
+// Compression is the 'sorted table' block compression algorithm to use.
 type Compression uint
 
 func (c Compression) String() string {
@@ -59,34 +92,47 @@ const (
 	nCompression
 )
 
-// Strict is the DB strict level.
+// Strict is the DB 'strict level'.
 type Strict uint
 
 const (
 	// If present then a corrupted or invalid chunk or block in manifest
-	// journal will cause an error istead of being dropped.
+	// journal will cause an error instead of being dropped.
+	// This will prevent database with corrupted manifest to be opened.
 	StrictManifest Strict = 1 << iota
 
-	// If present then a corrupted or invalid chunk or block in journal
-	// will cause an error istead of being dropped.
-	StrictJournal
-
 	// If present then journal chunk checksum will be verified.
 	StrictJournalChecksum
 
-	// If present then an invalid key/value pair will cause an error
-	// instead of being skipped.
-	StrictIterator
+	// If present then a corrupted or invalid chunk or block in journal
+	// will cause an error instead of being dropped.
+	// This will prevent database with corrupted journal to be opened.
+	StrictJournal
 
 	// If present then 'sorted table' block checksum will be verified.
+	// This has effect on both 'read operation' and compaction.
 	StrictBlockChecksum
 
+	// If present then a corrupted 'sorted table' will fails compaction.
+	// The database will enter read-only mode.
+	StrictCompaction
+
+	// If present then a corrupted 'sorted table' will halts 'read operation'.
+	StrictReader
+
+	// If present then leveldb.Recover will drop corrupted 'sorted table'.
+	StrictRecovery
+
+	// This only applicable for ReadOptions, if present then this ReadOptions
+	// 'strict level' will override global ones.
+	StrictOverride
+
 	// StrictAll enables all strict flags.
-	StrictAll = StrictManifest | StrictJournal | StrictJournalChecksum | StrictIterator | StrictBlockChecksum
+	StrictAll = StrictManifest | StrictJournalChecksum | StrictJournal | StrictBlockChecksum | StrictCompaction | StrictReader | StrictRecovery
 
 	// DefaultStrict is the default strict flags. Specify any strict flags
 	// will override default strict flags as whole (i.e. not OR'ed).
-	DefaultStrict = StrictJournalChecksum | StrictBlockChecksum
+	DefaultStrict = StrictJournalChecksum | StrictBlockChecksum | StrictCompaction | StrictReader
 
 	// NoStrict disables all strict flags. Override default strict flags.
 	NoStrict = ^StrictAll
@@ -101,11 +147,17 @@ type Options struct {
 	// The default value is nil
 	AltFilters []filter.Filter
 
-	// BlockCache provides per-block caching for LevelDB. Specify NoCache to
-	// disable block caching.
+	// BlockCacher provides cache algorithm for LevelDB 'sorted table' block caching.
+	// Specify NoCacher to disable caching algorithm.
 	//
-	// By default LevelDB will create LRU-cache with capacity of 8MiB.
-	BlockCache cache.Cache
+	// The default value is LRUCacher.
+	BlockCacher Cacher
+
+	// BlockCacheCapacity defines the capacity of the 'sorted table' block caching.
+	// Use -1 for zero, this has same effect as specifying NoCacher to BlockCacher.
+	//
+	// The default value is 8MiB.
+	BlockCacheCapacity int
 
 	// BlockRestartInterval is the number of keys between restart points for
 	// delta encoding of keys.
@@ -119,6 +171,73 @@ type Options struct {
 	// The default value is 4KiB.
 	BlockSize int
 
+	// CompactionExpandLimitFactor limits compaction size after expanded.
+	// This will be multiplied by table size limit at compaction target level.
+	//
+	// The default value is 25.
+	CompactionExpandLimitFactor int
+
+	// CompactionGPOverlapsFactor limits overlaps in grandparent (Level + 2) that a
+	// single 'sorted table' generates.
+	// This will be multiplied by table size limit at grandparent level.
+	//
+	// The default value is 10.
+	CompactionGPOverlapsFactor int
+
+	// CompactionL0Trigger defines number of 'sorted table' at level-0 that will
+	// trigger compaction.
+	//
+	// The default value is 4.
+	CompactionL0Trigger int
+
+	// CompactionSourceLimitFactor limits compaction source size. This doesn't apply to
+	// level-0.
+	// This will be multiplied by table size limit at compaction target level.
+	//
+	// The default value is 1.
+	CompactionSourceLimitFactor int
+
+	// CompactionTableSize limits size of 'sorted table' that compaction generates.
+	// The limits for each level will be calculated as:
+	//   CompactionTableSize * (CompactionTableSizeMultiplier ^ Level)
+	// The multiplier for each level can also fine-tuned using CompactionTableSizeMultiplierPerLevel.
+	//
+	// The default value is 2MiB.
+	CompactionTableSize int
+
+	// CompactionTableSizeMultiplier defines multiplier for CompactionTableSize.
+	//
+	// The default value is 1.
+	CompactionTableSizeMultiplier float64
+
+	// CompactionTableSizeMultiplierPerLevel defines per-level multiplier for
+	// CompactionTableSize.
+	// Use zero to skip a level.
+	//
+	// The default value is nil.
+	CompactionTableSizeMultiplierPerLevel []float64
+
+	// CompactionTotalSize limits total size of 'sorted table' for each level.
+	// The limits for each level will be calculated as:
+	//   CompactionTotalSize * (CompactionTotalSizeMultiplier ^ Level)
+	// The multiplier for each level can also fine-tuned using
+	// CompactionTotalSizeMultiplierPerLevel.
+	//
+	// The default value is 10MiB.
+	CompactionTotalSize int
+
+	// CompactionTotalSizeMultiplier defines multiplier for CompactionTotalSize.
+	//
+	// The default value is 10.
+	CompactionTotalSizeMultiplier float64
+
+	// CompactionTotalSizeMultiplierPerLevel defines per-level multiplier for
+	// CompactionTotalSize.
+	// Use zero to skip a level.
+	//
+	// The default value is nil.
+	CompactionTotalSizeMultiplierPerLevel []float64
+
 	// Comparer defines a total ordering over the space of []byte keys: a 'less
 	// than' relationship. The same comparison algorithm must be used for reads
 	// and writes over the lifetime of the DB.
@@ -126,11 +245,22 @@ type Options struct {
 	// The default value uses the same ordering as bytes.Compare.
 	Comparer comparer.Comparer
 
-	// Compression defines the per-block compression to use.
+	// Compression defines the 'sorted table' block compression to use.
 	//
 	// The default value (DefaultCompression) uses snappy compression.
 	Compression Compression
 
+	// DisableBlockCache allows disable use of cache.Cache functionality on
+	// 'sorted table' block.
+	//
+	// The default value is false.
+	DisableBlockCache bool
+
+	// DisableCompactionBackoff allows disable compaction retry backoff.
+	//
+	// The default value is false.
+	DisableCompactionBackoff bool
+
 	// ErrorIfExist defines whether an error should returned if the DB already
 	// exist.
 	//
@@ -159,12 +289,37 @@ type Options struct {
 	// The default value is nil.
 	Filter filter.Filter
 
-	// MaxOpenFiles defines maximum number of open files to kept around
-	// (cached). This is not an hard limit, actual open files may exceed
-	// the defined value.
+	// IteratorSamplingRate defines approximate gap (in bytes) between read
+	// sampling of an iterator. The samples will be used to determine when
+	// compaction should be triggered.
+	//
+	// The default is 1MiB.
+	IteratorSamplingRate int
+
+	// MaxMemCompationLevel defines maximum level a newly compacted 'memdb'
+	// will be pushed into if doesn't creates overlap. This should less than
+	// NumLevel. Use -1 for level-0.
 	//
-	// The default value is 1000.
-	MaxOpenFiles int
+	// The default is 2.
+	MaxMemCompationLevel int
+
+	// NumLevel defines number of database level. The level shouldn't changed
+	// between opens, or the database will panic.
+	//
+	// The default is 7.
+	NumLevel int
+
+	// OpenFilesCacher provides cache algorithm for open files caching.
+	// Specify NoCacher to disable caching algorithm.
+	//
+	// The default value is LRUCacher.
+	OpenFilesCacher Cacher
+
+	// OpenFilesCacheCapacity defines the capacity of the open files caching.
+	// Use -1 for zero, this has same effect as specifying NoCacher to OpenFilesCacher.
+	//
+	// The default value is 500.
+	OpenFilesCacheCapacity int
 
 	// Strict defines the DB strict level.
 	Strict Strict
@@ -177,6 +332,18 @@ type Options struct {
 	//
 	// The default value is 4MiB.
 	WriteBuffer int
+
+	// WriteL0StopTrigger defines number of 'sorted table' at level-0 that will
+	// pause write.
+	//
+	// The default value is 12.
+	WriteL0PauseTrigger int
+
+	// WriteL0SlowdownTrigger defines number of 'sorted table' at level-0 that
+	// will trigger write slowdown.
+	//
+	// The default value is 8.
+	WriteL0SlowdownTrigger int
 }
 
 func (o *Options) GetAltFilters() []filter.Filter {
@@ -186,11 +353,22 @@ func (o *Options) GetAltFilters() []filter.Filter {
 	return o.AltFilters
 }
 
-func (o *Options) GetBlockCache() cache.Cache {
-	if o == nil {
+func (o *Options) GetBlockCacher() Cacher {
+	if o == nil || o.BlockCacher == nil {
+		return DefaultBlockCacher
+	} else if o.BlockCacher == NoCacher {
 		return nil
 	}
-	return o.BlockCache
+	return o.BlockCacher
+}
+
+func (o *Options) GetBlockCacheCapacity() int {
+	if o == nil || o.BlockCacheCapacity == 0 {
+		return DefaultBlockCacheCapacity
+	} else if o.BlockCacheCapacity < 0 {
+		return 0
+	}
+	return o.BlockCacheCapacity
 }
 
 func (o *Options) GetBlockRestartInterval() int {
@@ -207,6 +385,79 @@ func (o *Options) GetBlockSize() int {
 	return o.BlockSize
 }
 
+func (o *Options) GetCompactionExpandLimit(level int) int {
+	factor := DefaultCompactionExpandLimitFactor
+	if o != nil && o.CompactionExpandLimitFactor > 0 {
+		factor = o.CompactionExpandLimitFactor
+	}
+	return o.GetCompactionTableSize(level+1) * factor
+}
+
+func (o *Options) GetCompactionGPOverlaps(level int) int {
+	factor := DefaultCompactionGPOverlapsFactor
+	if o != nil && o.CompactionGPOverlapsFactor > 0 {
+		factor = o.CompactionGPOverlapsFactor
+	}
+	return o.GetCompactionTableSize(level+2) * factor
+}
+
+func (o *Options) GetCompactionL0Trigger() int {
+	if o == nil || o.CompactionL0Trigger == 0 {
+		return DefaultCompactionL0Trigger
+	}
+	return o.CompactionL0Trigger
+}
+
+func (o *Options) GetCompactionSourceLimit(level int) int {
+	factor := DefaultCompactionSourceLimitFactor
+	if o != nil && o.CompactionSourceLimitFactor > 0 {
+		factor = o.CompactionSourceLimitFactor
+	}
+	return o.GetCompactionTableSize(level+1) * factor
+}
+
+func (o *Options) GetCompactionTableSize(level int) int {
+	var (
+		base = DefaultCompactionTableSize
+		mult float64
+	)
+	if o != nil {
+		if o.CompactionTableSize > 0 {
+			base = o.CompactionTableSize
+		}
+		if len(o.CompactionTableSizeMultiplierPerLevel) > level && o.CompactionTableSizeMultiplierPerLevel[level] > 0 {
+			mult = o.CompactionTableSizeMultiplierPerLevel[level]
+		} else if o.CompactionTableSizeMultiplier > 0 {
+			mult = math.Pow(o.CompactionTableSizeMultiplier, float64(level))
+		}
+	}
+	if mult == 0 {
+		mult = math.Pow(DefaultCompactionTableSizeMultiplier, float64(level))
+	}
+	return int(float64(base) * mult)
+}
+
+func (o *Options) GetCompactionTotalSize(level int) int64 {
+	var (
+		base = DefaultCompactionTotalSize
+		mult float64
+	)
+	if o != nil {
+		if o.CompactionTotalSize > 0 {
+			base = o.CompactionTotalSize
+		}
+		if len(o.CompactionTotalSizeMultiplierPerLevel) > level && o.CompactionTotalSizeMultiplierPerLevel[level] > 0 {
+			mult = o.CompactionTotalSizeMultiplierPerLevel[level]
+		} else if o.CompactionTotalSizeMultiplier > 0 {
+			mult = math.Pow(o.CompactionTotalSizeMultiplier, float64(level))
+		}
+	}
+	if mult == 0 {
+		mult = math.Pow(DefaultCompactionTotalSizeMultiplier, float64(level))
+	}
+	return int64(float64(base) * mult)
+}
+
 func (o *Options) GetComparer() comparer.Comparer {
 	if o == nil || o.Comparer == nil {
 		return comparer.DefaultComparer
@@ -221,6 +472,13 @@ func (o *Options) GetCompression() Compression {
 	return o.Compression
 }
 
+func (o *Options) GetDisableCompactionBackoff() bool {
+	if o == nil {
+		return false
+	}
+	return o.DisableCompactionBackoff
+}
+
 func (o *Options) GetErrorIfExist() bool {
 	if o == nil {
 		return false
@@ -242,11 +500,52 @@ func (o *Options) GetFilter() filter.Filter {
 	return o.Filter
 }
 
-func (o *Options) GetMaxOpenFiles() int {
-	if o == nil || o.MaxOpenFiles <= 0 {
-		return DefaultMaxOpenFiles
+func (o *Options) GetIteratorSamplingRate() int {
+	if o == nil || o.IteratorSamplingRate <= 0 {
+		return DefaultIteratorSamplingRate
+	}
+	return o.IteratorSamplingRate
+}
+
+func (o *Options) GetMaxMemCompationLevel() int {
+	level := DefaultMaxMemCompationLevel
+	if o != nil {
+		if o.MaxMemCompationLevel > 0 {
+			level = o.MaxMemCompationLevel
+		} else if o.MaxMemCompationLevel < 0 {
+			level = 0
+		}
+	}
+	if level >= o.GetNumLevel() {
+		return o.GetNumLevel() - 1
+	}
+	return level
+}
+
+func (o *Options) GetNumLevel() int {
+	if o == nil || o.NumLevel <= 0 {
+		return DefaultNumLevel
+	}
+	return o.NumLevel
+}
+
+func (o *Options) GetOpenFilesCacher() Cacher {
+	if o == nil || o.OpenFilesCacher == nil {
+		return DefaultOpenFilesCacher
 	}
-	return o.MaxOpenFiles
+	if o.OpenFilesCacher == NoCacher {
+		return nil
+	}
+	return o.OpenFilesCacher
+}
+
+func (o *Options) GetOpenFilesCacheCapacity() int {
+	if o == nil || o.OpenFilesCacheCapacity == 0 {
+		return DefaultOpenFilesCacheCapacity
+	} else if o.OpenFilesCacheCapacity < 0 {
+		return 0
+	}
+	return o.OpenFilesCacheCapacity
 }
 
 func (o *Options) GetStrict(strict Strict) bool {
@@ -263,6 +562,20 @@ func (o *Options) GetWriteBuffer() int {
 	return o.WriteBuffer
 }
 
+func (o *Options) GetWriteL0PauseTrigger() int {
+	if o == nil || o.WriteL0PauseTrigger == 0 {
+		return DefaultWriteL0PauseTrigger
+	}
+	return o.WriteL0PauseTrigger
+}
+
+func (o *Options) GetWriteL0SlowdownTrigger() int {
+	if o == nil || o.WriteL0SlowdownTrigger == 0 {
+		return DefaultWriteL0SlowdownTrigger
+	}
+	return o.WriteL0SlowdownTrigger
+}
+
 // ReadOptions holds the optional parameters for 'read operation'. The
 // 'read operation' includes Get, Find and NewIterator.
 type ReadOptions struct {
@@ -273,8 +586,8 @@ type ReadOptions struct {
 	// The default value is false.
 	DontFillCache bool
 
-	// Strict overrides global DB strict level. Only StrictIterator and
-	// StrictBlockChecksum that does have effects here.
+	// Strict will be OR'ed with global DB 'strict level' unless StrictOverride
+	// is present. Currently only StrictReader that has effect here.
 	Strict Strict
 }
 
@@ -316,3 +629,11 @@ func (wo *WriteOptions) GetSync() bool {
 	}
 	return wo.Sync
 }
+
+func GetStrict(o *Options, ro *ReadOptions, strict Strict) bool {
+	if ro.GetStrict(StrictOverride) {
+		return ro.GetStrict(strict)
+	} else {
+		return o.GetStrict(strict) || ro.GetStrict(strict)
+	}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go
index fc6a969650e1c01be8a76ad126f3327e331f5326..a3d84ef60d53d4afb73ba7bdf1f848ebbb202808 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/options.go
@@ -7,35 +7,86 @@
 package leveldb
 
 import (
-	"github.com/syndtr/goleveldb/leveldb/cache"
 	"github.com/syndtr/goleveldb/leveldb/filter"
 	"github.com/syndtr/goleveldb/leveldb/opt"
 )
 
-func (s *session) setOptions(o *opt.Options) {
-	s.o = &opt.Options{}
+func dupOptions(o *opt.Options) *opt.Options {
+	newo := &opt.Options{}
 	if o != nil {
-		*s.o = *o
+		*newo = *o
+	}
+	if newo.Strict == 0 {
+		newo.Strict = opt.DefaultStrict
 	}
+	return newo
+}
+
+func (s *session) setOptions(o *opt.Options) {
+	no := dupOptions(o)
 	// Alternative filters.
 	if filters := o.GetAltFilters(); len(filters) > 0 {
-		s.o.AltFilters = make([]filter.Filter, len(filters))
+		no.AltFilters = make([]filter.Filter, len(filters))
 		for i, filter := range filters {
-			s.o.AltFilters[i] = &iFilter{filter}
+			no.AltFilters[i] = &iFilter{filter}
 		}
 	}
-	// Block cache.
-	switch o.GetBlockCache() {
-	case nil:
-		s.o.BlockCache = cache.NewLRUCache(opt.DefaultBlockCacheSize)
-	case opt.NoCache:
-		s.o.BlockCache = nil
-	}
 	// Comparer.
 	s.icmp = &iComparer{o.GetComparer()}
-	s.o.Comparer = s.icmp
+	no.Comparer = s.icmp
 	// Filter.
 	if filter := o.GetFilter(); filter != nil {
-		s.o.Filter = &iFilter{filter}
+		no.Filter = &iFilter{filter}
 	}
+
+	s.o = &cachedOptions{Options: no}
+	s.o.cache()
+}
+
+type cachedOptions struct {
+	*opt.Options
+
+	compactionExpandLimit []int
+	compactionGPOverlaps  []int
+	compactionSourceLimit []int
+	compactionTableSize   []int
+	compactionTotalSize   []int64
+}
+
+func (co *cachedOptions) cache() {
+	numLevel := co.Options.GetNumLevel()
+
+	co.compactionExpandLimit = make([]int, numLevel)
+	co.compactionGPOverlaps = make([]int, numLevel)
+	co.compactionSourceLimit = make([]int, numLevel)
+	co.compactionTableSize = make([]int, numLevel)
+	co.compactionTotalSize = make([]int64, numLevel)
+
+	for level := 0; level < numLevel; level++ {
+		co.compactionExpandLimit[level] = co.Options.GetCompactionExpandLimit(level)
+		co.compactionGPOverlaps[level] = co.Options.GetCompactionGPOverlaps(level)
+		co.compactionSourceLimit[level] = co.Options.GetCompactionSourceLimit(level)
+		co.compactionTableSize[level] = co.Options.GetCompactionTableSize(level)
+		co.compactionTotalSize[level] = co.Options.GetCompactionTotalSize(level)
+	}
+}
+
+func (co *cachedOptions) GetCompactionExpandLimit(level int) int {
+	return co.compactionExpandLimit[level]
+}
+
+func (co *cachedOptions) GetCompactionGPOverlaps(level int) int {
+	return co.compactionGPOverlaps[level]
+}
+
+func (co *cachedOptions) GetCompactionSourceLimit(level int) int {
+	return co.compactionSourceLimit[level]
+}
+
+func (co *cachedOptions) GetCompactionTableSize(level int) int {
+	return co.compactionTableSize[level]
+}
+
+func (co *cachedOptions) GetCompactionTotalSize(level int) int64 {
+	return co.compactionTotalSize[level]
 }
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go
index 6b2a61683e544106bced9cb92705a6b3456e4f7d..b3906f7fc8c07b6e402be12c7326f1e202cdcb15 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go
@@ -7,12 +7,13 @@
 package leveldb
 
 import (
-	"errors"
+	"fmt"
 	"io"
 	"os"
 	"sync"
 	"sync/atomic"
 
+	"github.com/syndtr/goleveldb/leveldb/errors"
 	"github.com/syndtr/goleveldb/leveldb/iterator"
 	"github.com/syndtr/goleveldb/leveldb/journal"
 	"github.com/syndtr/goleveldb/leveldb/opt"
@@ -20,18 +21,31 @@ import (
 	"github.com/syndtr/goleveldb/leveldb/util"
 )
 
+type ErrManifestCorrupted struct {
+	Field  string
+	Reason string
+}
+
+func (e *ErrManifestCorrupted) Error() string {
+	return fmt.Sprintf("leveldb: manifest corrupted (field '%s'): %s", e.Field, e.Reason)
+}
+
+func newErrManifestCorrupted(f storage.File, field, reason string) error {
+	return errors.NewErrCorrupted(f, &ErrManifestCorrupted{field, reason})
+}
+
 // session represent a persistent database session.
 type session struct {
 	// Need 64-bit alignment.
-	stFileNum        uint64 // current unused file number
+	stNextFileNum    uint64 // current unused file number
 	stJournalNum     uint64 // current journal file number; need external synchronization
 	stPrevJournalNum uint64 // prev journal file number; no longer used; for compatibility with older version of leveldb
-	stSeq            uint64 // last mem compacted seq; need external synchronization
+	stSeqNum         uint64 // last mem compacted seq; need external synchronization
 	stTempFileNum    uint64
 
 	stor     storage.Storage
 	storLock util.Releaser
-	o        *opt.Options
+	o        *cachedOptions
 	icmp     *iComparer
 	tops     *tOps
 
@@ -39,11 +53,12 @@ type session struct {
 	manifestWriter storage.Writer
 	manifestFile   storage.File
 
-	stCPtrs   [kNumLevels]iKey // compact pointers; need external synchronization
-	stVersion *version         // current version
-	vmu       sync.Mutex
+	stCompPtrs []iKey   // compaction pointers; need external synchronization
+	stVersion  *version // current version
+	vmu        sync.Mutex
 }
 
+// Creates new initialized session instance.
 func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) {
 	if stor == nil {
 		return nil, os.ErrInvalid
@@ -53,22 +68,20 @@ func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) {
 		return
 	}
 	s = &session{
-		stor:     stor,
-		storLock: storLock,
+		stor:       stor,
+		storLock:   storLock,
+		stCompPtrs: make([]iKey, o.GetNumLevel()),
 	}
 	s.setOptions(o)
-	s.tops = newTableOps(s, s.o.GetMaxOpenFiles())
-	s.setVersion(&version{s: s})
-	s.log("log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock D·DeletedEntry L·Level Q·SeqNum T·TimeElapsed")
+	s.tops = newTableOps(s)
+	s.setVersion(newVersion(s))
+	s.log("log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock Ke·KeyError D·DroppedEntry L·Level Q·SeqNum T·TimeElapsed")
 	return
 }
 
 // Close session.
 func (s *session) close() {
 	s.tops.close()
-	if bc := s.o.GetBlockCache(); bc != nil {
-		bc.Purge(nil)
-	}
 	if s.manifest != nil {
 		s.manifest.Close()
 	}
@@ -81,6 +94,7 @@ func (s *session) close() {
 	s.stVersion = nil
 }
 
+// Release session lock.
 func (s *session) release() {
 	s.storLock.Release()
 }
@@ -98,26 +112,26 @@ func (s *session) recover() (err error) {
 			// Don't return os.ErrNotExist if the underlying storage contains
 			// other files that belong to LevelDB. So the DB won't get trashed.
 			if files, _ := s.stor.GetFiles(storage.TypeAll); len(files) > 0 {
-				err = ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest file missing")}
+				err = &errors.ErrCorrupted{File: &storage.FileInfo{Type: storage.TypeManifest}, Err: &errors.ErrMissingFiles{}}
 			}
 		}
 	}()
 
-	file, err := s.stor.GetManifest()
+	m, err := s.stor.GetManifest()
 	if err != nil {
 		return
 	}
 
-	reader, err := file.Open()
+	reader, err := m.Open()
 	if err != nil {
 		return
 	}
 	defer reader.Close()
 	strict := s.o.GetStrict(opt.StrictManifest)
-	jr := journal.NewReader(reader, dropper{s, file}, strict, true)
+	jr := journal.NewReader(reader, dropper{s, m}, strict, true)
 
-	staging := s.version_NB().newStaging()
-	rec := &sessionRecord{}
+	staging := s.stVersion.newStaging()
+	rec := &sessionRecord{numLevel: s.o.GetNumLevel()}
 	for {
 		var r io.Reader
 		r, err = jr.Next()
@@ -126,51 +140,57 @@ func (s *session) recover() (err error) {
 				err = nil
 				break
 			}
-			return
+			return errors.SetFile(err, m)
 		}
 
 		err = rec.decode(r)
 		if err == nil {
 			// save compact pointers
-			for _, rp := range rec.compactionPointers {
-				s.stCPtrs[rp.level] = iKey(rp.key)
+			for _, r := range rec.compPtrs {
+				s.stCompPtrs[r.level] = iKey(r.ikey)
 			}
 			// commit record to version staging
 			staging.commit(rec)
-		} else if strict {
-			return ErrCorrupted{Type: CorruptedManifest, Err: err}
 		} else {
-			s.logf("manifest error: %v (skipped)", err)
+			err = errors.SetFile(err, m)
+			if strict || !errors.IsCorrupted(err) {
+				return
+			} else {
+				s.logf("manifest error: %v (skipped)", errors.SetFile(err, m))
+			}
 		}
-		rec.resetCompactionPointers()
+		rec.resetCompPtrs()
 		rec.resetAddedTables()
 		rec.resetDeletedTables()
 	}
 
 	switch {
 	case !rec.has(recComparer):
-		return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest missing comparer name")}
+		return newErrManifestCorrupted(m, "comparer", "missing")
 	case rec.comparer != s.icmp.uName():
-		return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: comparer mismatch, " + "want '" + s.icmp.uName() + "', " + "got '" + rec.comparer + "'")}
-	case !rec.has(recNextNum):
-		return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest missing next file number")}
+		return newErrManifestCorrupted(m, "comparer", fmt.Sprintf("mismatch: want '%s', got '%s'", s.icmp.uName(), rec.comparer))
+	case !rec.has(recNextFileNum):
+		return newErrManifestCorrupted(m, "next-file-num", "missing")
 	case !rec.has(recJournalNum):
-		return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest missing journal file number")}
-	case !rec.has(recSeq):
-		return ErrCorrupted{Type: CorruptedManifest, Err: errors.New("leveldb: manifest missing seq number")}
+		return newErrManifestCorrupted(m, "journal-file-num", "missing")
+	case !rec.has(recSeqNum):
+		return newErrManifestCorrupted(m, "seq-num", "missing")
 	}
 
-	s.manifestFile = file
+	s.manifestFile = m
 	s.setVersion(staging.finish())
-	s.setFileNum(rec.nextNum)
+	s.setNextFileNum(rec.nextFileNum)
 	s.recordCommited(rec)
 	return nil
 }
 
 // Commit session; need external synchronization.
 func (s *session) commit(r *sessionRecord) (err error) {
+	v := s.version()
+	defer v.release()
+
 	// spawn new version based on current version
-	nv := s.version_NB().spawn(r)
+	nv := v.spawn(r)
 
 	if s.manifest == nil {
 		// manifest journal writer not yet created, create one
@@ -189,22 +209,22 @@ func (s *session) commit(r *sessionRecord) (err error) {
 
 // Pick a compaction based on current state; need external synchronization.
 func (s *session) pickCompaction() *compaction {
-	v := s.version_NB()
+	v := s.version()
 
 	var level int
 	var t0 tFiles
 	if v.cScore >= 1 {
 		level = v.cLevel
-		cp := s.stCPtrs[level]
-		tt := v.tables[level]
-		for _, t := range tt {
-			if cp == nil || s.icmp.Compare(t.max, cp) > 0 {
+		cptr := s.stCompPtrs[level]
+		tables := v.tables[level]
+		for _, t := range tables {
+			if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 {
 				t0 = append(t0, t)
 				break
 			}
 		}
 		if len(t0) == 0 {
-			t0 = append(t0, tt[0])
+			t0 = append(t0, tables[0])
 		}
 	} else {
 		if p := atomic.LoadPointer(&v.cSeek); p != nil {
@@ -212,29 +232,21 @@ func (s *session) pickCompaction() *compaction {
 			level = ts.level
 			t0 = append(t0, ts.table)
 		} else {
+			v.release()
 			return nil
 		}
 	}
 
-	c := &compaction{s: s, version: v, level: level}
-	if level == 0 {
-		min, max := t0.getRange(s.icmp)
-		t0 = nil
-		v.tables[0].getOverlaps(min.ukey(), max.ukey(), &t0, false, s.icmp.ucmp)
-	}
-
-	c.tables[0] = t0
-	c.expand()
-	return c
+	return newCompaction(s, v, level, t0)
 }
 
 // Create compaction from given level and range; need external synchronization.
-func (s *session) getCompactionRange(level int, min, max []byte) *compaction {
-	v := s.version_NB()
+func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction {
+	v := s.version()
 
-	var t0 tFiles
-	v.tables[level].getOverlaps(min, max, &t0, level != 0, s.icmp.ucmp)
+	t0 := v.tables[level].getOverlaps(nil, s.icmp, umin, umax, level == 0)
 	if len(t0) == 0 {
+		v.release()
 		return nil
 	}
 
@@ -243,7 +255,7 @@ func (s *session) getCompactionRange(level int, min, max []byte) *compaction {
 	// and we must not pick one file and drop another older file if the
 	// two files overlap.
 	if level > 0 {
-		limit := uint64(kMaxTableSize)
+		limit := uint64(v.s.o.GetCompactionSourceLimit(level))
 		total := uint64(0)
 		for i, t := range t0 {
 			total += t.size
@@ -255,90 +267,124 @@ func (s *session) getCompactionRange(level int, min, max []byte) *compaction {
 		}
 	}
 
-	c := &compaction{s: s, version: v, level: level}
-	c.tables[0] = t0
+	return newCompaction(s, v, level, t0)
+}
+
+func newCompaction(s *session, v *version, level int, t0 tFiles) *compaction {
+	c := &compaction{
+		s:             s,
+		v:             v,
+		level:         level,
+		tables:        [2]tFiles{t0, nil},
+		maxGPOverlaps: uint64(s.o.GetCompactionGPOverlaps(level)),
+		tPtrs:         make([]int, s.o.GetNumLevel()),
+	}
 	c.expand()
+	c.save()
 	return c
 }
 
-// compaction represent a compaction state
+// compaction represent a compaction state.
 type compaction struct {
-	s       *session
-	version *version
+	s *session
+	v *version
+
+	level         int
+	tables        [2]tFiles
+	maxGPOverlaps uint64
+
+	gp                tFiles
+	gpi               int
+	seenKey           bool
+	gpOverlappedBytes uint64
+	imin, imax        iKey
+	tPtrs             []int
+	released          bool
+
+	snapGPI               int
+	snapSeenKey           bool
+	snapGPOverlappedBytes uint64
+	snapTPtrs             []int
+}
 
-	level  int
-	tables [2]tFiles
+func (c *compaction) save() {
+	c.snapGPI = c.gpi
+	c.snapSeenKey = c.seenKey
+	c.snapGPOverlappedBytes = c.gpOverlappedBytes
+	c.snapTPtrs = append(c.snapTPtrs[:0], c.tPtrs...)
+}
 
-	gp              tFiles
-	gpidx           int
-	seenKey         bool
-	overlappedBytes uint64
-	min, max        iKey
+func (c *compaction) restore() {
+	c.gpi = c.snapGPI
+	c.seenKey = c.snapSeenKey
+	c.gpOverlappedBytes = c.snapGPOverlappedBytes
+	c.tPtrs = append(c.tPtrs[:0], c.snapTPtrs...)
+}
 
-	tPtrs [kNumLevels]int
+func (c *compaction) release() {
+	if !c.released {
+		c.released = true
+		c.v.release()
+	}
 }
 
 // Expand compacted tables; need external synchronization.
 func (c *compaction) expand() {
-	s := c.s
-	v := c.version
-
-	level := c.level
-	vt0, vt1 := v.tables[level], v.tables[level+1]
+	limit := uint64(c.s.o.GetCompactionExpandLimit(c.level))
+	vt0, vt1 := c.v.tables[c.level], c.v.tables[c.level+1]
 
 	t0, t1 := c.tables[0], c.tables[1]
-	min, max := t0.getRange(s.icmp)
-	vt1.getOverlaps(min.ukey(), max.ukey(), &t1, true, s.icmp.ucmp)
-
-	// Get entire range covered by compaction
-	amin, amax := append(t0, t1...).getRange(s.icmp)
+	imin, imax := t0.getRange(c.s.icmp)
+	// We expand t0 here just incase ukey hop across tables.
+	t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.level == 0)
+	if len(t0) != len(c.tables[0]) {
+		imin, imax = t0.getRange(c.s.icmp)
+	}
+	t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false)
+	// Get entire range covered by compaction.
+	amin, amax := append(t0, t1...).getRange(c.s.icmp)
 
 	// See if we can grow the number of inputs in "level" without
 	// changing the number of "level+1" files we pick up.
 	if len(t1) > 0 {
-		var exp0 tFiles
-		vt0.getOverlaps(amin.ukey(), amax.ukey(), &exp0, level != 0, s.icmp.ucmp)
-		if len(exp0) > len(t0) && t1.size()+exp0.size() < kExpCompactionMaxBytes {
-			var exp1 tFiles
-			xmin, xmax := exp0.getRange(s.icmp)
-			vt1.getOverlaps(xmin.ukey(), xmax.ukey(), &exp1, true, s.icmp.ucmp)
+		exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.level == 0)
+		if len(exp0) > len(t0) && t1.size()+exp0.size() < limit {
+			xmin, xmax := exp0.getRange(c.s.icmp)
+			exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false)
 			if len(exp1) == len(t1) {
-				s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)",
-					level, level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())),
+				c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)",
+					c.level, c.level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())),
 					len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size())))
-				min, max = xmin, xmax
+				imin, imax = xmin, xmax
 				t0, t1 = exp0, exp1
-				amin, amax = append(t0, t1...).getRange(s.icmp)
+				amin, amax = append(t0, t1...).getRange(c.s.icmp)
 			}
 		}
 	}
 
 	// Compute the set of grandparent files that overlap this compaction
 	// (parent == level+1; grandparent == level+2)
-	if level+2 < kNumLevels {
-		v.tables[level+2].getOverlaps(amin.ukey(), amax.ukey(), &c.gp, true, s.icmp.ucmp)
+	if c.level+2 < c.s.o.GetNumLevel() {
+		c.gp = c.v.tables[c.level+2].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false)
 	}
 
 	c.tables[0], c.tables[1] = t0, t1
-	c.min, c.max = min, max
+	c.imin, c.imax = imin, imax
 }
 
 // Check whether compaction is trivial.
 func (c *compaction) trivial() bool {
-	return len(c.tables[0]) == 1 && len(c.tables[1]) == 0 && c.gp.size() <= kMaxGrandParentOverlapBytes
+	return len(c.tables[0]) == 1 && len(c.tables[1]) == 0 && c.gp.size() <= c.maxGPOverlaps
 }
 
-func (c *compaction) isBaseLevelForKey(key []byte) bool {
-	s := c.s
-	v := c.version
-
-	for level, tt := range v.tables[c.level+2:] {
-		for c.tPtrs[level] < len(tt) {
-			t := tt[c.tPtrs[level]]
-			if s.icmp.uCompare(key, t.max.ukey()) <= 0 {
-				// We've advanced far enough
-				if s.icmp.uCompare(key, t.min.ukey()) >= 0 {
-					// Key falls in this file's range, so definitely not base level
+func (c *compaction) baseLevelForKey(ukey []byte) bool {
+	for level, tables := range c.v.tables[c.level+2:] {
+		for c.tPtrs[level] < len(tables) {
+			t := tables[c.tPtrs[level]]
+			if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 {
+				// We've advanced far enough.
+				if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 {
+					// Key falls in this file's range, so definitely not base level.
 					return false
 				}
 				break
@@ -349,55 +395,61 @@ func (c *compaction) isBaseLevelForKey(key []byte) bool {
 	return true
 }
 
-func (c *compaction) shouldStopBefore(key iKey) bool {
-	for ; c.gpidx < len(c.gp); c.gpidx++ {
-		gp := c.gp[c.gpidx]
-		if c.s.icmp.Compare(key, gp.max) <= 0 {
+func (c *compaction) shouldStopBefore(ikey iKey) bool {
+	for ; c.gpi < len(c.gp); c.gpi++ {
+		gp := c.gp[c.gpi]
+		if c.s.icmp.Compare(ikey, gp.imax) <= 0 {
 			break
 		}
 		if c.seenKey {
-			c.overlappedBytes += gp.size
+			c.gpOverlappedBytes += gp.size
 		}
 	}
 	c.seenKey = true
 
-	if c.overlappedBytes > kMaxGrandParentOverlapBytes {
-		// Too much overlap for current output; start new output
-		c.overlappedBytes = 0
+	if c.gpOverlappedBytes > c.maxGPOverlaps {
+		// Too much overlap for current output; start new output.
+		c.gpOverlappedBytes = 0
 		return true
 	}
 	return false
 }
 
+// Creates an iterator.
 func (c *compaction) newIterator() iterator.Iterator {
-	s := c.s
-
-	level := c.level
-	icap := 2
+	// Creates iterator slice.
+	icap := len(c.tables)
 	if c.level == 0 {
+		// Special case for level-0
 		icap = len(c.tables[0]) + 1
 	}
 	its := make([]iterator.Iterator, 0, icap)
 
+	// Options.
 	ro := &opt.ReadOptions{
 		DontFillCache: true,
+		Strict:        opt.StrictOverride,
+	}
+	strict := c.s.o.GetStrict(opt.StrictCompaction)
+	if strict {
+		ro.Strict |= opt.StrictReader
 	}
-	strict := s.o.GetStrict(opt.StrictIterator)
 
-	for i, tt := range c.tables {
-		if len(tt) == 0 {
+	for i, tables := range c.tables {
+		if len(tables) == 0 {
 			continue
 		}
 
-		if level+i == 0 {
-			for _, t := range tt {
-				its = append(its, s.tops.newIterator(t, nil, ro))
+		// Level-0 is not sorted and may overlaps each other.
+		if c.level+i == 0 {
+			for _, t := range tables {
+				its = append(its, c.s.tops.newIterator(t, nil, ro))
 			}
 		} else {
-			it := iterator.NewIndexedIterator(tt.newIndexIterator(s.tops, s.icmp, nil, ro), strict, true)
+			it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict)
 			its = append(its, it)
 		}
 	}
 
-	return iterator.NewMergedIterator(its, s.icmp, true)
+	return iterator.NewMergedIterator(its, c.s.icmp, strict)
 }
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go
index c50fda7379670c3a9475faceed97334e53aa8809..1bdcc68f5f908ecb940f73dd8a9834122d7a9bf9 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go
@@ -9,11 +9,11 @@ package leveldb
 import (
 	"bufio"
 	"encoding/binary"
-	"errors"
 	"io"
-)
+	"strings"
 
-var errCorruptManifest = errors.New("leveldb: corrupt manifest")
+	"github.com/syndtr/goleveldb/leveldb/errors"
+)
 
 type byteReader interface {
 	io.Reader
@@ -22,32 +22,28 @@ type byteReader interface {
 
 // These numbers are written to disk and should not be changed.
 const (
-	recComparer          = 1
-	recJournalNum        = 2
-	recNextNum           = 3
-	recSeq               = 4
-	recCompactionPointer = 5
-	recDeletedTable      = 6
-	recNewTable          = 7
+	recComparer    = 1
+	recJournalNum  = 2
+	recNextFileNum = 3
+	recSeqNum      = 4
+	recCompPtr     = 5
+	recDelTable    = 6
+	recAddTable    = 7
 	// 8 was used for large value refs
 	recPrevJournalNum = 9
 )
 
 type cpRecord struct {
 	level int
-	key   iKey
+	ikey  iKey
 }
 
-type ntRecord struct {
+type atRecord struct {
 	level int
 	num   uint64
 	size  uint64
-	min   iKey
-	max   iKey
-}
-
-func (r ntRecord) makeFile(s *session) *tFile {
-	return newTFile(s.getTableFile(r.num), r.size, r.min, r.max)
+	imin  iKey
+	imax  iKey
 }
 
 type dtRecord struct {
@@ -56,17 +52,20 @@ type dtRecord struct {
 }
 
 type sessionRecord struct {
-	hasRec             int
-	comparer           string
-	journalNum         uint64
-	prevJournalNum     uint64
-	nextNum            uint64
-	seq                uint64
-	compactionPointers []cpRecord
-	addedTables        []ntRecord
-	deletedTables      []dtRecord
-	scratch            [binary.MaxVarintLen64]byte
-	err                error
+	numLevel int
+
+	hasRec         int
+	comparer       string
+	journalNum     uint64
+	prevJournalNum uint64
+	nextFileNum    uint64
+	seqNum         uint64
+	compPtrs       []cpRecord
+	addedTables    []atRecord
+	deletedTables  []dtRecord
+
+	scratch [binary.MaxVarintLen64]byte
+	err     error
 }
 
 func (p *sessionRecord) has(rec int) bool {
@@ -88,47 +87,47 @@ func (p *sessionRecord) setPrevJournalNum(num uint64) {
 	p.prevJournalNum = num
 }
 
-func (p *sessionRecord) setNextNum(num uint64) {
-	p.hasRec |= 1 << recNextNum
-	p.nextNum = num
+func (p *sessionRecord) setNextFileNum(num uint64) {
+	p.hasRec |= 1 << recNextFileNum
+	p.nextFileNum = num
 }
 
-func (p *sessionRecord) setSeq(seq uint64) {
-	p.hasRec |= 1 << recSeq
-	p.seq = seq
+func (p *sessionRecord) setSeqNum(num uint64) {
+	p.hasRec |= 1 << recSeqNum
+	p.seqNum = num
 }
 
-func (p *sessionRecord) addCompactionPointer(level int, key iKey) {
-	p.hasRec |= 1 << recCompactionPointer
-	p.compactionPointers = append(p.compactionPointers, cpRecord{level, key})
+func (p *sessionRecord) addCompPtr(level int, ikey iKey) {
+	p.hasRec |= 1 << recCompPtr
+	p.compPtrs = append(p.compPtrs, cpRecord{level, ikey})
 }
 
-func (p *sessionRecord) resetCompactionPointers() {
-	p.hasRec &= ^(1 << recCompactionPointer)
-	p.compactionPointers = p.compactionPointers[:0]
+func (p *sessionRecord) resetCompPtrs() {
+	p.hasRec &= ^(1 << recCompPtr)
+	p.compPtrs = p.compPtrs[:0]
 }
 
-func (p *sessionRecord) addTable(level int, num, size uint64, min, max iKey) {
-	p.hasRec |= 1 << recNewTable
-	p.addedTables = append(p.addedTables, ntRecord{level, num, size, min, max})
+func (p *sessionRecord) addTable(level int, num, size uint64, imin, imax iKey) {
+	p.hasRec |= 1 << recAddTable
+	p.addedTables = append(p.addedTables, atRecord{level, num, size, imin, imax})
 }
 
 func (p *sessionRecord) addTableFile(level int, t *tFile) {
-	p.addTable(level, t.file.Num(), t.size, t.min, t.max)
+	p.addTable(level, t.file.Num(), t.size, t.imin, t.imax)
 }
 
 func (p *sessionRecord) resetAddedTables() {
-	p.hasRec &= ^(1 << recNewTable)
+	p.hasRec &= ^(1 << recAddTable)
 	p.addedTables = p.addedTables[:0]
 }
 
-func (p *sessionRecord) deleteTable(level int, num uint64) {
-	p.hasRec |= 1 << recDeletedTable
+func (p *sessionRecord) delTable(level int, num uint64) {
+	p.hasRec |= 1 << recDelTable
 	p.deletedTables = append(p.deletedTables, dtRecord{level, num})
 }
 
 func (p *sessionRecord) resetDeletedTables() {
-	p.hasRec &= ^(1 << recDeletedTable)
+	p.hasRec &= ^(1 << recDelTable)
 	p.deletedTables = p.deletedTables[:0]
 }
 
@@ -161,43 +160,45 @@ func (p *sessionRecord) encode(w io.Writer) error {
 		p.putUvarint(w, recJournalNum)
 		p.putUvarint(w, p.journalNum)
 	}
-	if p.has(recNextNum) {
-		p.putUvarint(w, recNextNum)
-		p.putUvarint(w, p.nextNum)
+	if p.has(recNextFileNum) {
+		p.putUvarint(w, recNextFileNum)
+		p.putUvarint(w, p.nextFileNum)
 	}
-	if p.has(recSeq) {
-		p.putUvarint(w, recSeq)
-		p.putUvarint(w, p.seq)
+	if p.has(recSeqNum) {
+		p.putUvarint(w, recSeqNum)
+		p.putUvarint(w, p.seqNum)
 	}
-	for _, cp := range p.compactionPointers {
-		p.putUvarint(w, recCompactionPointer)
-		p.putUvarint(w, uint64(cp.level))
-		p.putBytes(w, cp.key)
+	for _, r := range p.compPtrs {
+		p.putUvarint(w, recCompPtr)
+		p.putUvarint(w, uint64(r.level))
+		p.putBytes(w, r.ikey)
 	}
-	for _, t := range p.deletedTables {
-		p.putUvarint(w, recDeletedTable)
-		p.putUvarint(w, uint64(t.level))
-		p.putUvarint(w, t.num)
+	for _, r := range p.deletedTables {
+		p.putUvarint(w, recDelTable)
+		p.putUvarint(w, uint64(r.level))
+		p.putUvarint(w, r.num)
 	}
-	for _, t := range p.addedTables {
-		p.putUvarint(w, recNewTable)
-		p.putUvarint(w, uint64(t.level))
-		p.putUvarint(w, t.num)
-		p.putUvarint(w, t.size)
-		p.putBytes(w, t.min)
-		p.putBytes(w, t.max)
+	for _, r := range p.addedTables {
+		p.putUvarint(w, recAddTable)
+		p.putUvarint(w, uint64(r.level))
+		p.putUvarint(w, r.num)
+		p.putUvarint(w, r.size)
+		p.putBytes(w, r.imin)
+		p.putBytes(w, r.imax)
 	}
 	return p.err
 }
 
-func (p *sessionRecord) readUvarint(r io.ByteReader) uint64 {
+func (p *sessionRecord) readUvarintMayEOF(field string, r io.ByteReader, mayEOF bool) uint64 {
 	if p.err != nil {
 		return 0
 	}
 	x, err := binary.ReadUvarint(r)
 	if err != nil {
-		if err == io.EOF {
-			p.err = errCorruptManifest
+		if err == io.ErrUnexpectedEOF || (mayEOF == false && err == io.EOF) {
+			p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "short read"})
+		} else if strings.HasPrefix(err.Error(), "binary:") {
+			p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, err.Error()})
 		} else {
 			p.err = err
 		}
@@ -206,35 +207,39 @@ func (p *sessionRecord) readUvarint(r io.ByteReader) uint64 {
 	return x
 }
 
-func (p *sessionRecord) readBytes(r byteReader) []byte {
+func (p *sessionRecord) readUvarint(field string, r io.ByteReader) uint64 {
+	return p.readUvarintMayEOF(field, r, false)
+}
+
+func (p *sessionRecord) readBytes(field string, r byteReader) []byte {
 	if p.err != nil {
 		return nil
 	}
-	n := p.readUvarint(r)
+	n := p.readUvarint(field, r)
 	if p.err != nil {
 		return nil
 	}
 	x := make([]byte, n)
 	_, p.err = io.ReadFull(r, x)
 	if p.err != nil {
-		if p.err == io.EOF {
-			p.err = errCorruptManifest
+		if p.err == io.ErrUnexpectedEOF {
+			p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "short read"})
 		}
 		return nil
 	}
 	return x
 }
 
-func (p *sessionRecord) readLevel(r io.ByteReader) int {
+func (p *sessionRecord) readLevel(field string, r io.ByteReader) int {
 	if p.err != nil {
 		return 0
 	}
-	x := p.readUvarint(r)
+	x := p.readUvarint(field, r)
 	if p.err != nil {
 		return 0
 	}
-	if x >= kNumLevels {
-		p.err = errCorruptManifest
+	if x >= uint64(p.numLevel) {
+		p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "invalid level number"})
 		return 0
 	}
 	return int(x)
@@ -247,59 +252,59 @@ func (p *sessionRecord) decode(r io.Reader) error {
 	}
 	p.err = nil
 	for p.err == nil {
-		rec, err := binary.ReadUvarint(br)
-		if err != nil {
-			if err == io.EOF {
-				err = nil
+		rec := p.readUvarintMayEOF("field-header", br, true)
+		if p.err != nil {
+			if p.err == io.EOF {
+				return nil
 			}
-			return err
+			return p.err
 		}
 		switch rec {
 		case recComparer:
-			x := p.readBytes(br)
+			x := p.readBytes("comparer", br)
 			if p.err == nil {
 				p.setComparer(string(x))
 			}
 		case recJournalNum:
-			x := p.readUvarint(br)
+			x := p.readUvarint("journal-num", br)
 			if p.err == nil {
 				p.setJournalNum(x)
 			}
 		case recPrevJournalNum:
-			x := p.readUvarint(br)
+			x := p.readUvarint("prev-journal-num", br)
 			if p.err == nil {
 				p.setPrevJournalNum(x)
 			}
-		case recNextNum:
-			x := p.readUvarint(br)
+		case recNextFileNum:
+			x := p.readUvarint("next-file-num", br)
 			if p.err == nil {
-				p.setNextNum(x)
+				p.setNextFileNum(x)
 			}
-		case recSeq:
-			x := p.readUvarint(br)
+		case recSeqNum:
+			x := p.readUvarint("seq-num", br)
 			if p.err == nil {
-				p.setSeq(x)
+				p.setSeqNum(x)
 			}
-		case recCompactionPointer:
-			level := p.readLevel(br)
-			key := p.readBytes(br)
+		case recCompPtr:
+			level := p.readLevel("comp-ptr.level", br)
+			ikey := p.readBytes("comp-ptr.ikey", br)
 			if p.err == nil {
-				p.addCompactionPointer(level, iKey(key))
+				p.addCompPtr(level, iKey(ikey))
 			}
-		case recNewTable:
-			level := p.readLevel(br)
-			num := p.readUvarint(br)
-			size := p.readUvarint(br)
-			min := p.readBytes(br)
-			max := p.readBytes(br)
+		case recAddTable:
+			level := p.readLevel("add-table.level", br)
+			num := p.readUvarint("add-table.num", br)
+			size := p.readUvarint("add-table.size", br)
+			imin := p.readBytes("add-table.imin", br)
+			imax := p.readBytes("add-table.imax", br)
 			if p.err == nil {
-				p.addTable(level, num, size, min, max)
+				p.addTable(level, num, size, imin, imax)
 			}
-		case recDeletedTable:
-			level := p.readLevel(br)
-			num := p.readUvarint(br)
+		case recDelTable:
+			level := p.readLevel("del-table.level", br)
+			num := p.readUvarint("del-table.num", br)
 			if p.err == nil {
-				p.deleteTable(level, num)
+				p.delTable(level, num)
 			}
 		}
 	}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go
index 029fabfe6e9690a78a1a191e9abd87935c937a39..c0c035ae39ae2fac2fe6d743f8ad08a1d59cc771 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go
@@ -9,6 +9,8 @@ package leveldb
 import (
 	"bytes"
 	"testing"
+
+	"github.com/syndtr/goleveldb/leveldb/opt"
 )
 
 func decodeEncode(v *sessionRecord) (res bool, err error) {
@@ -17,7 +19,7 @@ func decodeEncode(v *sessionRecord) (res bool, err error) {
 	if err != nil {
 		return
 	}
-	v2 := new(sessionRecord)
+	v2 := &sessionRecord{numLevel: opt.DefaultNumLevel}
 	err = v.decode(b)
 	if err != nil {
 		return
@@ -32,7 +34,7 @@ func decodeEncode(v *sessionRecord) (res bool, err error) {
 
 func TestSessionRecord_EncodeDecode(t *testing.T) {
 	big := uint64(1) << 50
-	v := new(sessionRecord)
+	v := &sessionRecord{numLevel: opt.DefaultNumLevel}
 	i := uint64(0)
 	test := func() {
 		res, err := decodeEncode(v)
@@ -47,16 +49,16 @@ func TestSessionRecord_EncodeDecode(t *testing.T) {
 	for ; i < 4; i++ {
 		test()
 		v.addTable(3, big+300+i, big+400+i,
-			newIKey([]byte("foo"), big+500+1, tVal),
-			newIKey([]byte("zoo"), big+600+1, tDel))
-		v.deleteTable(4, big+700+i)
-		v.addCompactionPointer(int(i), newIKey([]byte("x"), big+900+1, tVal))
+			newIkey([]byte("foo"), big+500+1, ktVal),
+			newIkey([]byte("zoo"), big+600+1, ktDel))
+		v.delTable(4, big+700+i)
+		v.addCompPtr(int(i), newIkey([]byte("x"), big+900+1, ktVal))
 	}
 
 	v.setComparer("foo")
 	v.setJournalNum(big + 100)
 	v.setPrevJournalNum(big + 99)
-	v.setNextNum(big + 200)
-	v.setSeq(big + 1000)
+	v.setNextFileNum(big + 200)
+	v.setSeqNum(big + 1000)
 	test()
 }
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go
index bf412b030a95d96daedbdce8cbc6f9643dce9732..007c02cde9d2859eea809ae4b005118b887aab62 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go
@@ -14,7 +14,7 @@ import (
 	"github.com/syndtr/goleveldb/leveldb/storage"
 )
 
-// logging
+// Logging.
 
 type dropper struct {
 	s    *session
@@ -22,22 +22,17 @@ type dropper struct {
 }
 
 func (d dropper) Drop(err error) {
-	if e, ok := err.(journal.DroppedError); ok {
+	if e, ok := err.(*journal.ErrCorrupted); ok {
 		d.s.logf("journal@drop %s-%d S·%s %q", d.file.Type(), d.file.Num(), shortenb(e.Size), e.Reason)
 	} else {
 		d.s.logf("journal@drop %s-%d %q", d.file.Type(), d.file.Num(), err)
 	}
 }
 
-func (s *session) log(v ...interface{}) {
-	s.stor.Log(fmt.Sprint(v...))
-}
-
-func (s *session) logf(format string, v ...interface{}) {
-	s.stor.Log(fmt.Sprintf(format, v...))
-}
+func (s *session) log(v ...interface{})                 { s.stor.Log(fmt.Sprint(v...)) }
+func (s *session) logf(format string, v ...interface{}) { s.stor.Log(fmt.Sprintf(format, v...)) }
 
-// file utils
+// File utils.
 
 func (s *session) getJournalFile(num uint64) storage.File {
 	return s.stor.GetFile(num, storage.TypeJournal)
@@ -56,9 +51,14 @@ func (s *session) newTemp() storage.File {
 	return s.stor.GetFile(num, storage.TypeTemp)
 }
 
-// session state
+func (s *session) tableFileFromRecord(r atRecord) *tFile {
+	return newTableFile(s.getTableFile(r.num), r.size, r.imin, r.imax)
+}
+
+// Session state.
 
-// Get current version.
+// Get current version. This will incr version ref, must call
+// version.release (exactly once) after use.
 func (s *session) version() *version {
 	s.vmu.Lock()
 	defer s.vmu.Unlock()
@@ -66,85 +66,80 @@ func (s *session) version() *version {
 	return s.stVersion
 }
 
-// Get current version; no barrier.
-func (s *session) version_NB() *version {
-	return s.stVersion
-}
-
 // Set current version to v.
 func (s *session) setVersion(v *version) {
 	s.vmu.Lock()
-	v.ref = 1
+	v.ref = 1 // Holds by session.
 	if old := s.stVersion; old != nil {
-		v.ref++
+		v.ref++ // Holds by old version.
 		old.next = v
-		old.release_NB()
+		old.releaseNB()
 	}
 	s.stVersion = v
 	s.vmu.Unlock()
 }
 
 // Get current unused file number.
-func (s *session) fileNum() uint64 {
-	return atomic.LoadUint64(&s.stFileNum)
+func (s *session) nextFileNum() uint64 {
+	return atomic.LoadUint64(&s.stNextFileNum)
 }
 
-// Get current unused file number to num.
-func (s *session) setFileNum(num uint64) {
-	atomic.StoreUint64(&s.stFileNum, num)
+// Set current unused file number to num.
+func (s *session) setNextFileNum(num uint64) {
+	atomic.StoreUint64(&s.stNextFileNum, num)
 }
 
 // Mark file number as used.
 func (s *session) markFileNum(num uint64) {
-	num += 1
+	nextFileNum := num + 1
 	for {
-		old, x := s.stFileNum, num
+		old, x := s.stNextFileNum, nextFileNum
 		if old > x {
 			x = old
 		}
-		if atomic.CompareAndSwapUint64(&s.stFileNum, old, x) {
+		if atomic.CompareAndSwapUint64(&s.stNextFileNum, old, x) {
 			break
 		}
 	}
 }
 
 // Allocate a file number.
-func (s *session) allocFileNum() (num uint64) {
-	return atomic.AddUint64(&s.stFileNum, 1) - 1
+func (s *session) allocFileNum() uint64 {
+	return atomic.AddUint64(&s.stNextFileNum, 1) - 1
 }
 
 // Reuse given file number.
 func (s *session) reuseFileNum(num uint64) {
 	for {
-		old, x := s.stFileNum, num
+		old, x := s.stNextFileNum, num
 		if old != x+1 {
 			x = old
 		}
-		if atomic.CompareAndSwapUint64(&s.stFileNum, old, x) {
+		if atomic.CompareAndSwapUint64(&s.stNextFileNum, old, x) {
 			break
 		}
 	}
 }
 
-// manifest related utils
+// Manifest related utils.
 
 // Fill given session record obj with current states; need external
 // synchronization.
 func (s *session) fillRecord(r *sessionRecord, snapshot bool) {
-	r.setNextNum(s.fileNum())
+	r.setNextFileNum(s.nextFileNum())
 
 	if snapshot {
 		if !r.has(recJournalNum) {
 			r.setJournalNum(s.stJournalNum)
 		}
 
-		if !r.has(recSeq) {
-			r.setSeq(s.stSeq)
+		if !r.has(recSeqNum) {
+			r.setSeqNum(s.stSeqNum)
 		}
 
-		for level, ik := range s.stCPtrs {
+		for level, ik := range s.stCompPtrs {
 			if ik != nil {
-				r.addCompactionPointer(level, ik)
+				r.addCompPtr(level, ik)
 			}
 		}
 
@@ -152,7 +147,7 @@ func (s *session) fillRecord(r *sessionRecord, snapshot bool) {
 	}
 }
 
-// Mark if record has been commited, this will update session state;
+// Mark if record has been committed, this will update session state;
 // need external synchronization.
 func (s *session) recordCommited(r *sessionRecord) {
 	if r.has(recJournalNum) {
@@ -163,12 +158,12 @@ func (s *session) recordCommited(r *sessionRecord) {
 		s.stPrevJournalNum = r.prevJournalNum
 	}
 
-	if r.has(recSeq) {
-		s.stSeq = r.seq
+	if r.has(recSeqNum) {
+		s.stSeqNum = r.seqNum
 	}
 
-	for _, p := range r.compactionPointers {
-		s.stCPtrs[p.level] = iKey(p.key)
+	for _, p := range r.compPtrs {
+		s.stCompPtrs[p.level] = iKey(p.ikey)
 	}
 }
 
@@ -183,10 +178,11 @@ func (s *session) newManifest(rec *sessionRecord, v *version) (err error) {
 	jw := journal.NewWriter(writer)
 
 	if v == nil {
-		v = s.version_NB()
+		v = s.version()
+		defer v.release()
 	}
 	if rec == nil {
-		rec = new(sessionRecord)
+		rec = &sessionRecord{numLevel: s.o.GetNumLevel()}
 	}
 	s.fillRecord(rec, true)
 	v.fillRecord(rec)
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go
index 75439f6db728d73f0582b05109c2eafbdcf777e5..46cc9d07012e91141cae7c08903f8421c394c412 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go
@@ -221,7 +221,7 @@ func (fs *fileStorage) GetManifest() (f File, err error) {
 					fs.log(fmt.Sprintf("skipping %s: invalid file name", fn))
 					continue
 				}
-				if _, e1 := strconv.ParseUint(fn[7:], 10, 0); e1 != nil {
+				if _, e1 := strconv.ParseUint(fn[8:], 10, 0); e1 != nil {
 					fs.log(fmt.Sprintf("skipping %s: invalid file num: %v", fn, e1))
 					continue
 				}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go
new file mode 100644
index 0000000000000000000000000000000000000000..102031bfd5455256384bf332bc221598162788fa
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go
@@ -0,0 +1,68 @@
+// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build solaris
+
+package storage
+
+import (
+	"os"
+	"syscall"
+)
+
+type unixFileLock struct {
+	f *os.File
+}
+
+func (fl *unixFileLock) release() error {
+	if err := setFileLock(fl.f, false); err != nil {
+		return err
+	}
+	return fl.f.Close()
+}
+
+func newFileLock(path string) (fl fileLock, err error) {
+	f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644)
+	if err != nil {
+		return
+	}
+	err = setFileLock(f, true)
+	if err != nil {
+		f.Close()
+		return
+	}
+	fl = &unixFileLock{f: f}
+	return
+}
+
+func setFileLock(f *os.File, lock bool) error {
+	flock := syscall.Flock_t{
+		Type:   syscall.F_UNLCK,
+		Start:  0,
+		Len:    0,
+		Whence: 1,
+	}
+	if lock {
+		flock.Type = syscall.F_WRLCK
+	}
+	return syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &flock)
+}
+
+func rename(oldpath, newpath string) error {
+	return os.Rename(oldpath, newpath)
+}
+
+func syncDir(name string) error {
+	f, err := os.Open(name)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+	if err := f.Sync(); err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go
index 73499afc8d32696c75280baec10252e242da0da9..d0a604b7ab4c646df9d57b466d0a2205e219e419 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go
@@ -4,7 +4,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-// +build darwin freebsd linux netbsd openbsd
+// +build darwin dragonfly freebsd linux netbsd openbsd
 
 package storage
 
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go
index de56948886aa8330dfeb085661a3e9da6611d3d4..85dd70b06f9f6c8955d60b382d4a8652b1ba1292 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage/storage.go
@@ -67,7 +67,7 @@ type Writer interface {
 	Syncer
 }
 
-// File is the file.
+// File is the file. A file instance must be goroutine-safe.
 type File interface {
 	// Open opens the file for read. Returns os.ErrNotExist error
 	// if the file does not exist.
@@ -94,7 +94,7 @@ type File interface {
 	Remove() error
 }
 
-// Storage is the storage.
+// Storage is the storage. A storage instance must be goroutine-safe.
 type Storage interface {
 	// Lock locks the storage. Any subsequent attempt to call Lock will fail
 	// until the last lock released.
@@ -125,3 +125,33 @@ type Storage interface {
 	// Other methods should not be called after the storage has been closed.
 	Close() error
 }
+
+// FileInfo wraps basic file info.
+type FileInfo struct {
+	Type FileType
+	Num  uint64
+}
+
+func (fi FileInfo) String() string {
+	switch fi.Type {
+	case TypeManifest:
+		return fmt.Sprintf("MANIFEST-%06d", fi.Num)
+	case TypeJournal:
+		return fmt.Sprintf("%06d.log", fi.Num)
+	case TypeTable:
+		return fmt.Sprintf("%06d.ldb", fi.Num)
+	case TypeTemp:
+		return fmt.Sprintf("%06d.tmp", fi.Num)
+	default:
+		return fmt.Sprintf("%#x-%d", fi.Type, fi.Num)
+	}
+}
+
+// NewFileInfo creates new FileInfo from the given File. It will returns nil
+// if File is nil.
+func NewFileInfo(f File) *FileInfo {
+	if f == nil {
+		return nil
+	}
+	return &FileInfo{f.Type(), f.Num()}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go
index 27e76d7077992caf5eff4cea8e1689ba883b2b51..dc1f1fb54b8631bc1a5196f61d8bbae7e4064282 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go
@@ -11,6 +11,7 @@ import (
 	"fmt"
 	"io"
 	"io/ioutil"
+	"math/rand"
 	"os"
 	"path/filepath"
 	"sync"
@@ -28,11 +29,25 @@ var (
 )
 
 var (
-	tsFSEnv  = os.Getenv("GOLEVELDB_USEFS")
-	tsKeepFS = tsFSEnv == "2"
-	tsFS     = tsKeepFS || tsFSEnv == "" || tsFSEnv == "1"
-	tsMU     = &sync.Mutex{}
-	tsNum    = 0
+	tsFSEnv   = os.Getenv("GOLEVELDB_USEFS")
+	tsTempdir = os.Getenv("GOLEVELDB_TEMPDIR")
+	tsKeepFS  = tsFSEnv == "2"
+	tsFS      = tsKeepFS || tsFSEnv == "" || tsFSEnv == "1"
+	tsMU      = &sync.Mutex{}
+	tsNum     = 0
+)
+
+type tsOp uint
+
+const (
+	tsOpOpen tsOp = iota
+	tsOpCreate
+	tsOpRead
+	tsOpReadAt
+	tsOpWrite
+	tsOpSync
+
+	tsOpNum
 )
 
 type tsLock struct {
@@ -53,6 +68,9 @@ type tsReader struct {
 func (tr tsReader) Read(b []byte) (n int, err error) {
 	ts := tr.tf.ts
 	ts.countRead(tr.tf.Type())
+	if tr.tf.shouldErrLocked(tsOpRead) {
+		return 0, errors.New("leveldb.testStorage: emulated read error")
+	}
 	n, err = tr.Reader.Read(b)
 	if err != nil && err != io.EOF {
 		ts.t.Errorf("E: read error, num=%d type=%v n=%d: %v", tr.tf.Num(), tr.tf.Type(), n, err)
@@ -63,6 +81,9 @@ func (tr tsReader) Read(b []byte) (n int, err error) {
 func (tr tsReader) ReadAt(b []byte, off int64) (n int, err error) {
 	ts := tr.tf.ts
 	ts.countRead(tr.tf.Type())
+	if tr.tf.shouldErrLocked(tsOpReadAt) {
+		return 0, errors.New("leveldb.testStorage: emulated readAt error")
+	}
 	n, err = tr.Reader.ReadAt(b, off)
 	if err != nil && err != io.EOF {
 		ts.t.Errorf("E: readAt error, num=%d type=%v off=%d n=%d: %v", tr.tf.Num(), tr.tf.Type(), off, n, err)
@@ -82,15 +103,12 @@ type tsWriter struct {
 }
 
 func (tw tsWriter) Write(b []byte) (n int, err error) {
-	ts := tw.tf.ts
-	ts.mu.Lock()
-	defer ts.mu.Unlock()
-	if ts.emuWriteErr&tw.tf.Type() != 0 {
+	if tw.tf.shouldErrLocked(tsOpWrite) {
 		return 0, errors.New("leveldb.testStorage: emulated write error")
 	}
 	n, err = tw.Writer.Write(b)
 	if err != nil {
-		ts.t.Errorf("E: write error, num=%d type=%v n=%d: %v", tw.tf.Num(), tw.tf.Type(), n, err)
+		tw.tf.ts.t.Errorf("E: write error, num=%d type=%v n=%d: %v", tw.tf.Num(), tw.tf.Type(), n, err)
 	}
 	return
 }
@@ -98,23 +116,23 @@ func (tw tsWriter) Write(b []byte) (n int, err error) {
 func (tw tsWriter) Sync() (err error) {
 	ts := tw.tf.ts
 	ts.mu.Lock()
-	defer ts.mu.Unlock()
 	for ts.emuDelaySync&tw.tf.Type() != 0 {
 		ts.cond.Wait()
 	}
-	if ts.emuSyncErr&tw.tf.Type() != 0 {
+	ts.mu.Unlock()
+	if tw.tf.shouldErrLocked(tsOpSync) {
 		return errors.New("leveldb.testStorage: emulated sync error")
 	}
 	err = tw.Writer.Sync()
 	if err != nil {
-		ts.t.Errorf("E: sync error, num=%d type=%v: %v", tw.tf.Num(), tw.tf.Type(), err)
+		tw.tf.ts.t.Errorf("E: sync error, num=%d type=%v: %v", tw.tf.Num(), tw.tf.Type(), err)
 	}
 	return
 }
 
 func (tw tsWriter) Close() (err error) {
 	err = tw.Writer.Close()
-	tw.tf.close("reader", err)
+	tw.tf.close("writer", err)
 	return
 }
 
@@ -127,6 +145,16 @@ func (tf tsFile) x() uint64 {
 	return tf.Num()<<typeShift | uint64(tf.Type())
 }
 
+func (tf tsFile) shouldErr(op tsOp) bool {
+	return tf.ts.shouldErr(tf, op)
+}
+
+func (tf tsFile) shouldErrLocked(op tsOp) bool {
+	tf.ts.mu.Lock()
+	defer tf.ts.mu.Unlock()
+	return tf.shouldErr(op)
+}
+
 func (tf tsFile) checkOpen(m string) error {
 	ts := tf.ts
 	if writer, ok := ts.opens[tf.x()]; ok {
@@ -163,7 +191,7 @@ func (tf tsFile) Open() (r storage.Reader, err error) {
 	if err != nil {
 		return
 	}
-	if ts.emuOpenErr&tf.Type() != 0 {
+	if tf.shouldErr(tsOpOpen) {
 		err = errors.New("leveldb.testStorage: emulated open error")
 		return
 	}
@@ -190,7 +218,7 @@ func (tf tsFile) Create() (w storage.Writer, err error) {
 	if err != nil {
 		return
 	}
-	if ts.emuCreateErr&tf.Type() != 0 {
+	if tf.shouldErr(tsOpCreate) {
 		err = errors.New("leveldb.testStorage: emulated create error")
 		return
 	}
@@ -205,6 +233,23 @@ func (tf tsFile) Create() (w storage.Writer, err error) {
 	return
 }
 
+func (tf tsFile) Replace(newfile storage.File) (err error) {
+	ts := tf.ts
+	ts.mu.Lock()
+	defer ts.mu.Unlock()
+	err = tf.checkOpen("replace")
+	if err != nil {
+		return
+	}
+	err = tf.File.Replace(newfile.(tsFile).File)
+	if err != nil {
+		ts.t.Errorf("E: cannot replace file, num=%d type=%v: %v", tf.Num(), tf.Type(), err)
+	} else {
+		ts.t.Logf("I: file replace, num=%d type=%v", tf.Num(), tf.Type())
+	}
+	return
+}
+
 func (tf tsFile) Remove() (err error) {
 	ts := tf.ts
 	ts.mu.Lock()
@@ -231,51 +276,75 @@ type testStorage struct {
 	cond sync.Cond
 	// Open files, true=writer, false=reader
 	opens         map[uint64]bool
-	emuOpenErr    storage.FileType
-	emuCreateErr  storage.FileType
 	emuDelaySync  storage.FileType
-	emuWriteErr   storage.FileType
-	emuSyncErr    storage.FileType
 	ignoreOpenErr storage.FileType
 	readCnt       uint64
 	readCntEn     storage.FileType
+
+	emuErr         [tsOpNum]storage.FileType
+	emuErrOnce     [tsOpNum]storage.FileType
+	emuRandErr     [tsOpNum]storage.FileType
+	emuRandErrProb int
+	emuErrOnceMap  map[uint64]uint
+	emuRandRand    *rand.Rand
+}
+
+func (ts *testStorage) shouldErr(tf tsFile, op tsOp) bool {
+	if ts.emuErr[op]&tf.Type() != 0 {
+		return true
+	} else if ts.emuRandErr[op]&tf.Type() != 0 || ts.emuErrOnce[op]&tf.Type() != 0 {
+		sop := uint(1) << op
+		eop := ts.emuErrOnceMap[tf.x()]
+		if eop&sop == 0 && (ts.emuRandRand.Int()%ts.emuRandErrProb == 0 || ts.emuErrOnce[op]&tf.Type() != 0) {
+			ts.emuErrOnceMap[tf.x()] = eop | sop
+			ts.t.Logf("I: emulated error: file=%d type=%v op=%v", tf.Num(), tf.Type(), op)
+			return true
+		}
+	}
+	return false
 }
 
-func (ts *testStorage) SetOpenErr(t storage.FileType) {
+func (ts *testStorage) SetEmuErr(t storage.FileType, ops ...tsOp) {
 	ts.mu.Lock()
-	ts.emuOpenErr = t
+	for _, op := range ops {
+		ts.emuErr[op] = t
+	}
 	ts.mu.Unlock()
 }
 
-func (ts *testStorage) SetCreateErr(t storage.FileType) {
+func (ts *testStorage) SetEmuErrOnce(t storage.FileType, ops ...tsOp) {
 	ts.mu.Lock()
-	ts.emuCreateErr = t
+	for _, op := range ops {
+		ts.emuErrOnce[op] = t
+	}
 	ts.mu.Unlock()
 }
 
-func (ts *testStorage) DelaySync(t storage.FileType) {
+func (ts *testStorage) SetEmuRandErr(t storage.FileType, ops ...tsOp) {
 	ts.mu.Lock()
-	ts.emuDelaySync |= t
-	ts.cond.Broadcast()
+	for _, op := range ops {
+		ts.emuRandErr[op] = t
+	}
 	ts.mu.Unlock()
 }
 
-func (ts *testStorage) ReleaseSync(t storage.FileType) {
+func (ts *testStorage) SetEmuRandErrProb(prob int) {
 	ts.mu.Lock()
-	ts.emuDelaySync &= ^t
-	ts.cond.Broadcast()
+	ts.emuRandErrProb = prob
 	ts.mu.Unlock()
 }
 
-func (ts *testStorage) SetWriteErr(t storage.FileType) {
+func (ts *testStorage) DelaySync(t storage.FileType) {
 	ts.mu.Lock()
-	ts.emuWriteErr = t
+	ts.emuDelaySync |= t
+	ts.cond.Broadcast()
 	ts.mu.Unlock()
 }
 
-func (ts *testStorage) SetSyncErr(t storage.FileType) {
+func (ts *testStorage) ReleaseSync(t storage.FileType) {
 	ts.mu.Lock()
-	ts.emuSyncErr = t
+	ts.emuDelaySync &= ^t
+	ts.cond.Broadcast()
 	ts.mu.Unlock()
 }
 
@@ -413,7 +482,11 @@ func newTestStorage(t *testing.T) *testStorage {
 			num := tsNum
 			tsNum++
 			tsMU.Unlock()
-			path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num))
+			tempdir := tsTempdir
+			if tempdir == "" {
+				tempdir = os.TempDir()
+			}
+			path := filepath.Join(tempdir, fmt.Sprintf("goleveldb-test%d0%d0%d", os.Getuid(), os.Getpid(), num))
 			if _, err := os.Stat(path); err != nil {
 				stor, err = storage.OpenFile(path)
 				if err != nil {
@@ -436,6 +509,10 @@ func newTestStorage(t *testing.T) *testStorage {
 						}
 						f.Close()
 					}
+					if t.Failed() {
+						t.Logf("testing failed, test DB preserved at %s", path)
+						return nil
+					}
 					if tsKeepFS {
 						return nil
 					}
@@ -449,10 +526,13 @@ func newTestStorage(t *testing.T) *testStorage {
 		stor = storage.NewMemStorage()
 	}
 	ts := &testStorage{
-		t:       t,
-		Storage: stor,
-		closeFn: closeFn,
-		opens:   make(map[uint64]bool),
+		t:              t,
+		Storage:        stor,
+		closeFn:        closeFn,
+		opens:          make(map[uint64]bool),
+		emuErrOnceMap:  make(map[uint64]uint),
+		emuRandErrProb: 0x999,
+		emuRandRand:    rand.New(rand.NewSource(0xfacedead)),
 	}
 	ts.cond.L = &ts.mu
 	return ts
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
index fdd5d2bcf61c43cc19158b8b36df336da97a89e1..3e8df6af51cd87c53ce91c297cc31f1749eb8ee5 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
@@ -7,11 +7,11 @@
 package leveldb
 
 import (
+	"fmt"
 	"sort"
 	"sync/atomic"
 
 	"github.com/syndtr/goleveldb/leveldb/cache"
-	"github.com/syndtr/goleveldb/leveldb/comparer"
 	"github.com/syndtr/goleveldb/leveldb/iterator"
 	"github.com/syndtr/goleveldb/leveldb/opt"
 	"github.com/syndtr/goleveldb/leveldb/storage"
@@ -19,34 +19,41 @@ import (
 	"github.com/syndtr/goleveldb/leveldb/util"
 )
 
-// table file
+// tFile holds basic information about a table.
 type tFile struct {
-	file     storage.File
-	seekLeft int32
-	size     uint64
-	min, max iKey
+	file       storage.File
+	seekLeft   int32
+	size       uint64
+	imin, imax iKey
 }
 
-// test if key is after t
-func (t *tFile) isAfter(key []byte, ucmp comparer.BasicComparer) bool {
-	return key != nil && ucmp.Compare(key, t.max.ukey()) > 0
+// Returns true if given key is after largest key of this table.
+func (t *tFile) after(icmp *iComparer, ukey []byte) bool {
+	return ukey != nil && icmp.uCompare(ukey, t.imax.ukey()) > 0
 }
 
-// test if key is before t
-func (t *tFile) isBefore(key []byte, ucmp comparer.BasicComparer) bool {
-	return key != nil && ucmp.Compare(key, t.min.ukey()) < 0
+// Returns true if given key is before smallest key of this table.
+func (t *tFile) before(icmp *iComparer, ukey []byte) bool {
+	return ukey != nil && icmp.uCompare(ukey, t.imin.ukey()) < 0
 }
 
-func (t *tFile) incrSeek() int32 {
+// Returns true if given key range overlaps with this table key range.
+func (t *tFile) overlaps(icmp *iComparer, umin, umax []byte) bool {
+	return !t.after(icmp, umin) && !t.before(icmp, umax)
+}
+
+// Cosumes one seek and return current seeks left.
+func (t *tFile) consumeSeek() int32 {
 	return atomic.AddInt32(&t.seekLeft, -1)
 }
 
-func newTFile(file storage.File, size uint64, min, max iKey) *tFile {
+// Creates new tFile.
+func newTableFile(file storage.File, size uint64, imin, imax iKey) *tFile {
 	f := &tFile{
 		file: file,
 		size: size,
-		min:  min,
-		max:  max,
+		imin: imin,
+		imax: imax,
 	}
 
 	// We arrange to automatically compact this file after
@@ -70,33 +77,52 @@ func newTFile(file storage.File, size uint64, min, max iKey) *tFile {
 	return f
 }
 
-// table files
+// tFiles hold multiple tFile.
 type tFiles []*tFile
 
 func (tf tFiles) Len() int      { return len(tf) }
 func (tf tFiles) Swap(i, j int) { tf[i], tf[j] = tf[j], tf[i] }
 
+func (tf tFiles) nums() string {
+	x := "[ "
+	for i, f := range tf {
+		if i != 0 {
+			x += ", "
+		}
+		x += fmt.Sprint(f.file.Num())
+	}
+	x += " ]"
+	return x
+}
+
+// Returns true if i smallest key is less than j.
+// This used for sort by key in ascending order.
 func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool {
 	a, b := tf[i], tf[j]
-	n := icmp.Compare(a.min, b.min)
+	n := icmp.Compare(a.imin, b.imin)
 	if n == 0 {
 		return a.file.Num() < b.file.Num()
 	}
 	return n < 0
 }
 
+// Returns true if i file number is greater than j.
+// This used for sort by file number in descending order.
 func (tf tFiles) lessByNum(i, j int) bool {
 	return tf[i].file.Num() > tf[j].file.Num()
 }
 
+// Sorts tables by key in ascending order.
 func (tf tFiles) sortByKey(icmp *iComparer) {
 	sort.Sort(&tFilesSortByKey{tFiles: tf, icmp: icmp})
 }
 
+// Sorts tables by file number in descending order.
 func (tf tFiles) sortByNum() {
 	sort.Sort(&tFilesSortByNum{tFiles: tf})
 }
 
+// Returns sum of all tables size.
 func (tf tFiles) size() (sum uint64) {
 	for _, t := range tf {
 		sum += t.size
@@ -104,94 +130,107 @@ func (tf tFiles) size() (sum uint64) {
 	return sum
 }
 
-func (tf tFiles) searchMin(key iKey, icmp *iComparer) int {
+// Searches smallest index of tables whose its smallest
+// key is after or equal with given key.
+func (tf tFiles) searchMin(icmp *iComparer, ikey iKey) int {
 	return sort.Search(len(tf), func(i int) bool {
-		return icmp.Compare(tf[i].min, key) >= 0
+		return icmp.Compare(tf[i].imin, ikey) >= 0
 	})
 }
 
-func (tf tFiles) searchMax(key iKey, icmp *iComparer) int {
+// Searches smallest index of tables whose its largest
+// key is after or equal with given key.
+func (tf tFiles) searchMax(icmp *iComparer, ikey iKey) int {
 	return sort.Search(len(tf), func(i int) bool {
-		return icmp.Compare(tf[i].max, key) >= 0
+		return icmp.Compare(tf[i].imax, ikey) >= 0
 	})
 }
 
-func (tf tFiles) isOverlaps(min, max []byte, disjSorted bool, icmp *iComparer) bool {
-	if !disjSorted {
-		// Need to check against all files
+// Returns true if given key range overlaps with one or more
+// tables key range. If unsorted is true then binary search will not be used.
+func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) bool {
+	if unsorted {
+		// Check against all files.
 		for _, t := range tf {
-			if !t.isAfter(min, icmp.ucmp) && !t.isBefore(max, icmp.ucmp) {
+			if t.overlaps(icmp, umin, umax) {
 				return true
 			}
 		}
 		return false
 	}
 
-	var idx int
-	if len(min) > 0 {
-		// Find the earliest possible internal key for min
-		idx = tf.searchMax(newIKey(min, kMaxSeq, tSeek), icmp)
+	i := 0
+	if len(umin) > 0 {
+		// Find the earliest possible internal key for min.
+		i = tf.searchMax(icmp, newIkey(umin, kMaxSeq, ktSeek))
 	}
-
-	if idx >= len(tf) {
-		// beginning of range is after all files, so no overlap
+	if i >= len(tf) {
+		// Beginning of range is after all files, so no overlap.
 		return false
 	}
-	return !tf[idx].isBefore(max, icmp.ucmp)
+	return !tf[i].before(icmp, umax)
 }
 
-func (tf tFiles) getOverlaps(min, max []byte, r *tFiles, disjSorted bool, ucmp comparer.BasicComparer) {
+// Returns tables whose its key range overlaps with given key range.
+// Range will be expanded if ukey found hop across tables.
+// If overlapped is true then the search will be restarted if umax
+// expanded.
+// The dst content will be overwritten.
+func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, overlapped bool) tFiles {
+	dst = dst[:0]
 	for i := 0; i < len(tf); {
 		t := tf[i]
-		i++
-		if t.isAfter(min, ucmp) || t.isBefore(max, ucmp) {
-			continue
-		}
-
-		*r = append(*r, t)
-		if !disjSorted {
-			// Level-0 files may overlap each other.  So check if the newly
-			// added file has expanded the range.  If so, restart search.
-			if min != nil && ucmp.Compare(t.min.ukey(), min) < 0 {
-				min = t.min.ukey()
-				*r = nil
-				i = 0
-			} else if max != nil && ucmp.Compare(t.max.ukey(), max) > 0 {
-				max = t.max.ukey()
-				*r = nil
+		if t.overlaps(icmp, umin, umax) {
+			if umin != nil && icmp.uCompare(t.imin.ukey(), umin) < 0 {
+				umin = t.imin.ukey()
+				dst = dst[:0]
 				i = 0
+				continue
+			} else if umax != nil && icmp.uCompare(t.imax.ukey(), umax) > 0 {
+				umax = t.imax.ukey()
+				// Restart search if it is overlapped.
+				if overlapped {
+					dst = dst[:0]
+					i = 0
+					continue
+				}
 			}
+
+			dst = append(dst, t)
 		}
+		i++
 	}
 
-	return
+	return dst
 }
 
-func (tf tFiles) getRange(icmp *iComparer) (min, max iKey) {
+// Returns tables key range.
+func (tf tFiles) getRange(icmp *iComparer) (imin, imax iKey) {
 	for i, t := range tf {
 		if i == 0 {
-			min, max = t.min, t.max
+			imin, imax = t.imin, t.imax
 			continue
 		}
-		if icmp.Compare(t.min, min) < 0 {
-			min = t.min
+		if icmp.Compare(t.imin, imin) < 0 {
+			imin = t.imin
 		}
-		if icmp.Compare(t.max, max) > 0 {
-			max = t.max
+		if icmp.Compare(t.imax, imax) > 0 {
+			imax = t.imax
 		}
 	}
 
 	return
 }
 
+// Creates iterator index from tables.
 func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range, ro *opt.ReadOptions) iterator.IteratorIndexer {
 	if slice != nil {
 		var start, limit int
 		if slice.Start != nil {
-			start = tf.searchMax(iKey(slice.Start), icmp)
+			start = tf.searchMax(icmp, iKey(slice.Start))
 		}
 		if slice.Limit != nil {
-			limit = tf.searchMin(iKey(slice.Limit), icmp)
+			limit = tf.searchMin(icmp, iKey(slice.Limit))
 		} else {
 			limit = tf.Len()
 		}
@@ -206,6 +245,7 @@ func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range
 	})
 }
 
+// Tables iterator index.
 type tFilesArrayIndexer struct {
 	tFiles
 	tops  *tOps
@@ -215,7 +255,7 @@ type tFilesArrayIndexer struct {
 }
 
 func (a *tFilesArrayIndexer) Search(key []byte) int {
-	return a.searchMax(iKey(key), a.icmp)
+	return a.searchMax(a.icmp, iKey(key))
 }
 
 func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator {
@@ -225,6 +265,7 @@ func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator {
 	return a.tops.newIterator(a.tFiles[i], nil, a.ro)
 }
 
+// Helper type for sortByKey.
 type tFilesSortByKey struct {
 	tFiles
 	icmp *iComparer
@@ -234,6 +275,7 @@ func (x *tFilesSortByKey) Less(i, j int) bool {
 	return x.lessByKey(x.icmp, i, j)
 }
 
+// Helper type for sortByNum.
 type tFilesSortByNum struct {
 	tFiles
 }
@@ -242,19 +284,15 @@ func (x *tFilesSortByNum) Less(i, j int) bool {
 	return x.lessByNum(i, j)
 }
 
-// table operations
+// Table operations.
 type tOps struct {
-	s       *session
-	cache   cache.Cache
-	cacheNS cache.Namespace
-}
-
-func newTableOps(s *session, cacheCap int) *tOps {
-	c := cache.NewLRUCache(cacheCap)
-	ns := c.GetNamespace(0)
-	return &tOps{s, c, ns}
+	s      *session
+	cache  *cache.Cache
+	bcache *cache.Cache
+	bpool  *util.BufferPool
 }
 
+// Creates an empty table and returns table writer.
 func (t *tOps) create() (*tWriter, error) {
 	file := t.s.getTableFile(t.s.allocFileNum())
 	fw, err := file.Create()
@@ -265,14 +303,15 @@ func (t *tOps) create() (*tWriter, error) {
 		t:    t,
 		file: file,
 		w:    fw,
-		tw:   table.NewWriter(fw, t.s.o),
+		tw:   table.NewWriter(fw, t.s.o.Options),
 	}, nil
 }
 
+// Builds table from src iterator.
 func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) {
 	w, err := t.create()
 	if err != nil {
-		return f, n, err
+		return
 	}
 
 	defer func() {
@@ -282,7 +321,7 @@ func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) {
 	}()
 
 	for src.Next() {
-		err = w.add(src.Key(), src.Value())
+		err = w.append(src.Key(), src.Value())
 		if err != nil {
 			return
 		}
@@ -297,84 +336,132 @@ func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) {
 	return
 }
 
-func (t *tOps) lookup(f *tFile) (c cache.Object, err error) {
+// Opens table. It returns a cache handle, which should
+// be released after use.
+func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) {
 	num := f.file.Num()
-	c, ok := t.cacheNS.Get(num, func() (ok bool, value interface{}, charge int, fin cache.SetFin) {
+	ch = t.cache.Get(0, num, func() (size int, value cache.Value) {
 		var r storage.Reader
 		r, err = f.file.Open()
 		if err != nil {
-			return
+			return 0, nil
 		}
 
-		o := t.s.o
-
-		var cacheNS cache.Namespace
-		if bc := o.GetBlockCache(); bc != nil {
-			cacheNS = bc.GetNamespace(num)
+		var bcache *cache.CacheGetter
+		if t.bcache != nil {
+			bcache = &cache.CacheGetter{Cache: t.bcache, NS: num}
 		}
 
-		ok = true
-		value = table.NewReader(r, int64(f.size), cacheNS, o)
-		charge = 1
-		fin = func() {
+		var tr *table.Reader
+		tr, err = table.NewReader(r, int64(f.size), storage.NewFileInfo(f.file), bcache, t.bpool, t.s.o.Options)
+		if err != nil {
 			r.Close()
+			return 0, nil
 		}
-		return
+		return 1, tr
+
 	})
-	if !ok && err == nil {
+	if ch == nil && err == nil {
 		err = ErrClosed
 	}
 	return
 }
 
-func (t *tOps) get(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []byte, err error) {
-	c, err := t.lookup(f)
+// Finds key/value pair whose key is greater than or equal to the
+// given key.
+func (t *tOps) find(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []byte, err error) {
+	ch, err := t.open(f)
 	if err != nil {
 		return nil, nil, err
 	}
-	defer c.Release()
-	return c.Value().(*table.Reader).Find(key, ro)
+	defer ch.Release()
+	return ch.Value().(*table.Reader).Find(key, true, ro)
+}
+
+// Finds key that is greater than or equal to the given key.
+func (t *tOps) findKey(f *tFile, key []byte, ro *opt.ReadOptions) (rkey []byte, err error) {
+	ch, err := t.open(f)
+	if err != nil {
+		return nil, err
+	}
+	defer ch.Release()
+	return ch.Value().(*table.Reader).FindKey(key, true, ro)
 }
 
+// Returns approximate offset of the given key.
 func (t *tOps) offsetOf(f *tFile, key []byte) (offset uint64, err error) {
-	c, err := t.lookup(f)
+	ch, err := t.open(f)
 	if err != nil {
 		return
 	}
-	_offset, err := c.Value().(*table.Reader).OffsetOf(key)
-	offset = uint64(_offset)
-	c.Release()
-	return
+	defer ch.Release()
+	offset_, err := ch.Value().(*table.Reader).OffsetOf(key)
+	return uint64(offset_), err
 }
 
+// Creates an iterator from the given table.
 func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
-	c, err := t.lookup(f)
+	ch, err := t.open(f)
 	if err != nil {
 		return iterator.NewEmptyIterator(err)
 	}
-	iter := c.Value().(*table.Reader).NewIterator(slice, ro)
-	iter.SetReleaser(c)
+	iter := ch.Value().(*table.Reader).NewIterator(slice, ro)
+	iter.SetReleaser(ch)
 	return iter
 }
 
+// Removes table from persistent storage. It waits until
+// no one use the the table.
 func (t *tOps) remove(f *tFile) {
 	num := f.file.Num()
-	t.cacheNS.Delete(num, func(exist bool) {
+	t.cache.Delete(0, num, func() {
 		if err := f.file.Remove(); err != nil {
 			t.s.logf("table@remove removing @%d %q", num, err)
 		} else {
 			t.s.logf("table@remove removed @%d", num)
 		}
-		if bc := t.s.o.GetBlockCache(); bc != nil {
-			bc.GetNamespace(num).Zap(false)
+		if t.bcache != nil {
+			t.bcache.EvictNS(num)
 		}
 	})
 }
 
+// Closes the table ops instance. It will close all tables,
+// regadless still used or not.
 func (t *tOps) close() {
-	t.cache.Zap(true)
+	t.bpool.Close()
+	t.cache.Close()
+	if t.bcache != nil {
+		t.bcache.Close()
+	}
+}
+
+// Creates new initialized table ops instance.
+func newTableOps(s *session) *tOps {
+	var (
+		cacher cache.Cacher
+		bcache *cache.Cache
+	)
+	if s.o.GetOpenFilesCacheCapacity() > 0 {
+		cacher = cache.NewLRU(s.o.GetOpenFilesCacheCapacity())
+	}
+	if !s.o.DisableBlockCache {
+		var bcacher cache.Cacher
+		if s.o.GetBlockCacheCapacity() > 0 {
+			bcacher = cache.NewLRU(s.o.GetBlockCacheCapacity())
+		}
+		bcache = cache.NewCache(bcacher)
+	}
+	return &tOps{
+		s:      s,
+		cache:  cache.NewCache(cacher),
+		bcache: bcache,
+		bpool:  util.NewBufferPool(s.o.GetBlockSize() + 5),
+	}
 }
 
+// tWriter wraps the table writer. It keep track of file descriptor
+// and added key range.
 type tWriter struct {
 	t *tOps
 
@@ -385,7 +472,8 @@ type tWriter struct {
 	first, last []byte
 }
 
-func (w *tWriter) add(key, value []byte) error {
+// Append key/value pair to the table.
+func (w *tWriter) append(key, value []byte) error {
 	if w.first == nil {
 		w.first = append([]byte{}, key...)
 	}
@@ -393,30 +481,39 @@ func (w *tWriter) add(key, value []byte) error {
 	return w.tw.Append(key, value)
 }
 
+// Returns true if the table is empty.
 func (w *tWriter) empty() bool {
 	return w.first == nil
 }
 
+// Closes the storage.Writer.
+func (w *tWriter) close() {
+	if w.w != nil {
+		w.w.Close()
+		w.w = nil
+	}
+}
+
+// Finalizes the table and returns table file.
 func (w *tWriter) finish() (f *tFile, err error) {
+	defer w.close()
 	err = w.tw.Close()
 	if err != nil {
 		return
 	}
 	err = w.w.Sync()
 	if err != nil {
-		w.w.Close()
 		return
 	}
-	w.w.Close()
-	f = newTFile(w.file, uint64(w.tw.BytesLen()), iKey(w.first), iKey(w.last))
+	f = newTableFile(w.file, uint64(w.tw.BytesLen()), iKey(w.first), iKey(w.last))
 	return
 }
 
+// Drops the table.
 func (w *tWriter) drop() {
-	w.w.Close()
+	w.close()
 	w.file.Remove()
 	w.t.s.reuseFileNum(w.file.Num())
-	w.w = nil
 	w.file = nil
 	w.tw = nil
 	w.first = nil
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go
index ca598f4f5157aec151a03bb49fc0085621829587..00e6f9eea0dd97ae892eab1934d4f096f73a1b65 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/block_test.go
@@ -19,13 +19,18 @@ import (
 	"github.com/syndtr/goleveldb/leveldb/util"
 )
 
-func (b *block) TestNewIterator(slice *util.Range) iterator.Iterator {
-	return b.newIterator(slice, false, nil)
+type blockTesting struct {
+	tr *Reader
+	b  *block
+}
+
+func (t *blockTesting) TestNewIterator(slice *util.Range) iterator.Iterator {
+	return t.tr.newBlockIter(t.b, nil, slice, false)
 }
 
 var _ = testutil.Defer(func() {
 	Describe("Block", func() {
-		Build := func(kv *testutil.KeyValue, restartInterval int) *block {
+		Build := func(kv *testutil.KeyValue, restartInterval int) *blockTesting {
 			// Building the block.
 			bw := &blockWriter{
 				restartInterval: restartInterval,
@@ -39,11 +44,13 @@ var _ = testutil.Defer(func() {
 			// Opening the block.
 			data := bw.buf.Bytes()
 			restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:]))
-			return &block{
-				cmp:            comparer.DefaultComparer,
-				data:           data,
-				restartsLen:    restartsLen,
-				restartsOffset: len(data) - (restartsLen+1)*4,
+			return &blockTesting{
+				tr: &Reader{cmp: comparer.DefaultComparer},
+				b: &block{
+					data:           data,
+					restartsLen:    restartsLen,
+					restartsOffset: len(data) - (restartsLen+1)*4,
+				},
 			}
 		}
 
@@ -59,7 +66,7 @@ var _ = testutil.Defer(func() {
 						// Make block.
 						br := Build(kv, restartInterval)
 						// Do testing.
-						testutil.KeyValueTesting(nil, br, kv.Clone())
+						testutil.KeyValueTesting(nil, kv.Clone(), br, nil, nil)
 					}
 
 					Describe(Text(), Test)
@@ -102,11 +109,11 @@ var _ = testutil.Defer(func() {
 			for restartInterval := 1; restartInterval <= 5; restartInterval++ {
 				Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() {
 					// Make block.
-					br := Build(kv, restartInterval)
+					bt := Build(kv, restartInterval)
 
 					Test := func(r *util.Range) func(done Done) {
 						return func(done Done) {
-							iter := br.newIterator(r, false, nil)
+							iter := bt.TestNewIterator(r)
 							Expect(iter.Error()).ShouldNot(HaveOccurred())
 
 							t := testutil.IteratorTesting{
@@ -115,6 +122,7 @@ var _ = testutil.Defer(func() {
 							}
 
 							testutil.DoIteratorTesting(&t)
+							iter.Release()
 							done <- true
 						}
 					}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go
index 8acb9f720971c1935c09749e49c09f35f275deb8..6f38e84b399b60a996f4a5672b3e0fbb100636c9 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/reader.go
@@ -8,27 +8,41 @@ package table
 
 import (
 	"encoding/binary"
-	"errors"
 	"fmt"
 	"io"
 	"sort"
 	"strings"
+	"sync"
 
-	"code.google.com/p/snappy-go/snappy"
+	"github.com/syndtr/gosnappy/snappy"
 
 	"github.com/syndtr/goleveldb/leveldb/cache"
 	"github.com/syndtr/goleveldb/leveldb/comparer"
+	"github.com/syndtr/goleveldb/leveldb/errors"
 	"github.com/syndtr/goleveldb/leveldb/filter"
 	"github.com/syndtr/goleveldb/leveldb/iterator"
 	"github.com/syndtr/goleveldb/leveldb/opt"
+	"github.com/syndtr/goleveldb/leveldb/storage"
 	"github.com/syndtr/goleveldb/leveldb/util"
 )
 
 var (
-	ErrNotFound     = util.ErrNotFound
-	ErrIterReleased = errors.New("leveldb/table: iterator released")
+	ErrNotFound       = errors.ErrNotFound
+	ErrReaderReleased = errors.New("leveldb/table: reader released")
+	ErrIterReleased   = errors.New("leveldb/table: iterator released")
 )
 
+type ErrCorrupted struct {
+	Pos    int64
+	Size   int64
+	Kind   string
+	Reason string
+}
+
+func (e *ErrCorrupted) Error() string {
+	return fmt.Sprintf("leveldb/table: corruption on %s (pos=%d): %s", e.Kind, e.Pos, e.Reason)
+}
+
 func max(x, y int) int {
 	if x > y {
 		return x
@@ -37,40 +51,33 @@ func max(x, y int) int {
 }
 
 type block struct {
-	cmp            comparer.BasicComparer
+	bpool          *util.BufferPool
+	bh             blockHandle
 	data           []byte
 	restartsLen    int
 	restartsOffset int
-	// Whether checksum is verified and valid.
-	checksum bool
 }
 
-func (b *block) seek(rstart, rlimit int, key []byte) (index, offset int, err error) {
-	n := b.restartsOffset
-	data := b.data
-	cmp := b.cmp
-
+func (b *block) seek(cmp comparer.Comparer, rstart, rlimit int, key []byte) (index, offset int, err error) {
 	index = sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool {
-		offset := int(binary.LittleEndian.Uint32(data[n+4*(rstart+i):]))
-		offset += 1                               // shared always zero, since this is a restart point
-		v1, n1 := binary.Uvarint(data[offset:])   // key length
-		_, n2 := binary.Uvarint(data[offset+n1:]) // value length
+		offset := int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):]))
+		offset += 1                                 // shared always zero, since this is a restart point
+		v1, n1 := binary.Uvarint(b.data[offset:])   // key length
+		_, n2 := binary.Uvarint(b.data[offset+n1:]) // value length
 		m := offset + n1 + n2
-		return cmp.Compare(data[m:m+int(v1)], key) > 0
+		return cmp.Compare(b.data[m:m+int(v1)], key) > 0
 	}) + rstart - 1
 	if index < rstart {
 		// The smallest key is greater-than key sought.
 		index = rstart
 	}
-	offset = int(binary.LittleEndian.Uint32(data[n+4*index:]))
+	offset = int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:]))
 	return
 }
 
 func (b *block) restartIndex(rstart, rlimit, offset int) int {
-	n := b.restartsOffset
-	data := b.data
 	return sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool {
-		return int(binary.LittleEndian.Uint32(data[n+4*(rstart+i):])) > offset
+		return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) > offset
 	}) + rstart - 1
 }
 
@@ -81,7 +88,7 @@ func (b *block) restartOffset(index int) int {
 func (b *block) entry(offset int) (key, value []byte, nShared, n int, err error) {
 	if offset >= b.restartsOffset {
 		if offset != b.restartsOffset {
-			err = errors.New("leveldb/table: Reader: BlockEntry: invalid block (block entries offset not aligned)")
+			err = &ErrCorrupted{Reason: "entries offset not aligned"}
 		}
 		return
 	}
@@ -91,7 +98,7 @@ func (b *block) entry(offset int) (key, value []byte, nShared, n int, err error)
 	m := n0 + n1 + n2
 	n = m + int(v1) + int(v2)
 	if n0 <= 0 || n1 <= 0 || n2 <= 0 || offset+n > b.restartsOffset {
-		err = errors.New("leveldb/table: Reader: invalid block (block entries corrupted)")
+		err = &ErrCorrupted{Reason: "entries corrupted"}
 		return
 	}
 	key = b.data[offset+m : offset+m+int(v1)]
@@ -100,43 +107,10 @@ func (b *block) entry(offset int) (key, value []byte, nShared, n int, err error)
 	return
 }
 
-func (b *block) newIterator(slice *util.Range, inclLimit bool, cache util.Releaser) *blockIter {
-	bi := &blockIter{
-		block: b,
-		cache: cache,
-		// Valid key should never be nil.
-		key:             make([]byte, 0),
-		dir:             dirSOI,
-		riStart:         0,
-		riLimit:         b.restartsLen,
-		offsetStart:     0,
-		offsetRealStart: 0,
-		offsetLimit:     b.restartsOffset,
-	}
-	if slice != nil {
-		if slice.Start != nil {
-			if bi.Seek(slice.Start) {
-				bi.riStart = b.restartIndex(bi.restartIndex, b.restartsLen, bi.prevOffset)
-				bi.offsetStart = b.restartOffset(bi.riStart)
-				bi.offsetRealStart = bi.prevOffset
-			} else {
-				bi.riStart = b.restartsLen
-				bi.offsetStart = b.restartsOffset
-				bi.offsetRealStart = b.restartsOffset
-			}
-		}
-		if slice.Limit != nil {
-			if bi.Seek(slice.Limit) && (!inclLimit || bi.Next()) {
-				bi.offsetLimit = bi.prevOffset
-				bi.riLimit = bi.restartIndex + 1
-			}
-		}
-		bi.reset()
-		if bi.offsetStart > bi.offsetLimit {
-			bi.sErr(errors.New("leveldb/table: Reader: invalid slice range"))
-		}
-	}
-	return bi
+func (b *block) Release() {
+	b.bpool.Put(b.data)
+	b.bpool = nil
+	b.data = nil
 }
 
 type dir int
@@ -150,10 +124,12 @@ const (
 )
 
 type blockIter struct {
-	block           *block
-	cache, releaser util.Releaser
-	key, value      []byte
-	offset          int
+	tr            *Reader
+	block         *block
+	blockReleaser util.Releaser
+	releaser      util.Releaser
+	key, value    []byte
+	offset        int
 	// Previous offset, only filled by Next.
 	prevOffset   int
 	prevNode     []int
@@ -250,7 +226,7 @@ func (i *blockIter) Seek(key []byte) bool {
 		return false
 	}
 
-	ri, offset, err := i.block.seek(i.riStart, i.riLimit, key)
+	ri, offset, err := i.block.seek(i.tr.cmp, i.riStart, i.riLimit, key)
 	if err != nil {
 		i.sErr(err)
 		return false
@@ -261,7 +237,7 @@ func (i *blockIter) Seek(key []byte) bool {
 		i.dir = dirForward
 	}
 	for i.Next() {
-		if i.block.cmp.Compare(i.key, key) >= 0 {
+		if i.tr.cmp.Compare(i.key, key) >= 0 {
 			return true
 		}
 	}
@@ -286,7 +262,7 @@ func (i *blockIter) Next() bool {
 	for i.offset < i.offsetRealStart {
 		key, value, nShared, n, err := i.block.entry(i.offset)
 		if err != nil {
-			i.sErr(err)
+			i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err))
 			return false
 		}
 		if n == 0 {
@@ -300,13 +276,13 @@ func (i *blockIter) Next() bool {
 	if i.offset >= i.offsetLimit {
 		i.dir = dirEOI
 		if i.offset != i.offsetLimit {
-			i.sErr(errors.New("leveldb/table: Reader: Next: invalid block (block entries offset not aligned)"))
+			i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned"))
 		}
 		return false
 	}
 	key, value, nShared, n, err := i.block.entry(i.offset)
 	if err != nil {
-		i.sErr(err)
+		i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err))
 		return false
 	}
 	if n == 0 {
@@ -391,7 +367,7 @@ func (i *blockIter) Prev() bool {
 	for {
 		key, value, nShared, n, err := i.block.entry(offset)
 		if err != nil {
-			i.sErr(err)
+			i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err))
 			return false
 		}
 		if offset >= i.offsetRealStart {
@@ -410,7 +386,7 @@ func (i *blockIter) Prev() bool {
 		// Stop if target offset reached.
 		if offset >= i.offset {
 			if offset != i.offset {
-				i.sErr(errors.New("leveldb/table: Reader: Prev: invalid block (block entries offset not aligned)"))
+				i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned"))
 				return false
 			}
 
@@ -437,25 +413,33 @@ func (i *blockIter) Value() []byte {
 }
 
 func (i *blockIter) Release() {
-	i.prevNode = nil
-	i.prevKeys = nil
-	i.key = nil
-	i.value = nil
-	i.dir = dirReleased
-	if i.cache != nil {
-		i.cache.Release()
-		i.cache = nil
-	}
-	if i.releaser != nil {
-		i.releaser.Release()
-		i.releaser = nil
+	if i.dir != dirReleased {
+		i.tr = nil
+		i.block = nil
+		i.prevNode = nil
+		i.prevKeys = nil
+		i.key = nil
+		i.value = nil
+		i.dir = dirReleased
+		if i.blockReleaser != nil {
+			i.blockReleaser.Release()
+			i.blockReleaser = nil
+		}
+		if i.releaser != nil {
+			i.releaser.Release()
+			i.releaser = nil
+		}
 	}
 }
 
 func (i *blockIter) SetReleaser(releaser util.Releaser) {
-	if i.dir > dirReleased {
-		i.releaser = releaser
+	if i.dir == dirReleased {
+		panic(util.ErrReleased)
 	}
+	if i.releaser != nil && releaser != nil {
+		panic(util.ErrHasReleaser)
+	}
+	i.releaser = releaser
 }
 
 func (i *blockIter) Valid() bool {
@@ -467,21 +451,21 @@ func (i *blockIter) Error() error {
 }
 
 type filterBlock struct {
-	filter     filter.Filter
+	bpool      *util.BufferPool
 	data       []byte
 	oOffset    int
 	baseLg     uint
 	filtersNum int
 }
 
-func (b *filterBlock) contains(offset uint64, key []byte) bool {
+func (b *filterBlock) contains(filter filter.Filter, offset uint64, key []byte) bool {
 	i := int(offset >> b.baseLg)
 	if i < b.filtersNum {
 		o := b.data[b.oOffset+i*4:]
 		n := int(binary.LittleEndian.Uint32(o))
 		m := int(binary.LittleEndian.Uint32(o[4:]))
 		if n < m && m <= b.oOffset {
-			return b.filter.Contains(b.data[n:m], key)
+			return filter.Contains(b.data[n:m], key)
 		} else if n == m {
 			return false
 		}
@@ -489,12 +473,17 @@ func (b *filterBlock) contains(offset uint64, key []byte) bool {
 	return true
 }
 
+func (b *filterBlock) Release() {
+	b.bpool.Put(b.data)
+	b.bpool = nil
+	b.data = nil
+}
+
 type indexIter struct {
-	blockIter
-	tableReader *Reader
-	slice       *util.Range
+	*blockIter
+	tr    *Reader
+	slice *util.Range
 	// Options
-	checksum  bool
 	fillCache bool
 }
 
@@ -505,95 +494,173 @@ func (i *indexIter) Get() iterator.Iterator {
 	}
 	dataBH, n := decodeBlockHandle(value)
 	if n == 0 {
-		return iterator.NewEmptyIterator(errors.New("leveldb/table: Reader: invalid table (bad data block handle)"))
+		return iterator.NewEmptyIterator(i.tr.newErrCorruptedBH(i.tr.indexBH, "bad data block handle"))
 	}
+
 	var slice *util.Range
 	if i.slice != nil && (i.blockIter.isFirst() || i.blockIter.isLast()) {
 		slice = i.slice
 	}
-	return i.tableReader.getDataIter(dataBH, slice, i.checksum, i.fillCache)
+	return i.tr.getDataIterErr(dataBH, slice, i.tr.verifyChecksum, i.fillCache)
 }
 
 // Reader is a table reader.
 type Reader struct {
+	mu     sync.RWMutex
+	fi     *storage.FileInfo
 	reader io.ReaderAt
-	cache  cache.Namespace
+	cache  *cache.CacheGetter
 	err    error
+	bpool  *util.BufferPool
 	// Options
-	cmp        comparer.Comparer
-	filter     filter.Filter
-	checksum   bool
-	strictIter bool
+	o              *opt.Options
+	cmp            comparer.Comparer
+	filter         filter.Filter
+	verifyChecksum bool
 
-	dataEnd     int64
-	indexBlock  *block
-	filterBlock *filterBlock
+	dataEnd                   int64
+	metaBH, indexBH, filterBH blockHandle
+	indexBlock                *block
+	filterBlock               *filterBlock
 }
 
-func verifyChecksum(data []byte) bool {
-	n := len(data) - 4
-	checksum0 := binary.LittleEndian.Uint32(data[n:])
-	checksum1 := util.NewCRC(data[:n]).Value()
-	return checksum0 == checksum1
+func (r *Reader) blockKind(bh blockHandle) string {
+	switch bh.offset {
+	case r.metaBH.offset:
+		return "meta-block"
+	case r.indexBH.offset:
+		return "index-block"
+	case r.filterBH.offset:
+		if r.filterBH.length > 0 {
+			return "filter-block"
+		}
+	}
+	return "data-block"
 }
 
-func (r *Reader) readRawBlock(bh blockHandle, checksum bool) ([]byte, error) {
-	data := make([]byte, bh.length+blockTrailerLen)
+func (r *Reader) newErrCorrupted(pos, size int64, kind, reason string) error {
+	return &errors.ErrCorrupted{File: r.fi, Err: &ErrCorrupted{Pos: pos, Size: size, Kind: kind, Reason: reason}}
+}
+
+func (r *Reader) newErrCorruptedBH(bh blockHandle, reason string) error {
+	return r.newErrCorrupted(int64(bh.offset), int64(bh.length), r.blockKind(bh), reason)
+}
+
+func (r *Reader) fixErrCorruptedBH(bh blockHandle, err error) error {
+	if cerr, ok := err.(*ErrCorrupted); ok {
+		cerr.Pos = int64(bh.offset)
+		cerr.Size = int64(bh.length)
+		cerr.Kind = r.blockKind(bh)
+		return &errors.ErrCorrupted{File: r.fi, Err: cerr}
+	}
+	return err
+}
+
+func (r *Reader) readRawBlock(bh blockHandle, verifyChecksum bool) ([]byte, error) {
+	data := r.bpool.Get(int(bh.length + blockTrailerLen))
 	if _, err := r.reader.ReadAt(data, int64(bh.offset)); err != nil && err != io.EOF {
 		return nil, err
 	}
-	if checksum || r.checksum {
-		if !verifyChecksum(data) {
-			return nil, errors.New("leveldb/table: Reader: invalid block (checksum mismatch)")
+
+	if verifyChecksum {
+		n := bh.length + 1
+		checksum0 := binary.LittleEndian.Uint32(data[n:])
+		checksum1 := util.NewCRC(data[:n]).Value()
+		if checksum0 != checksum1 {
+			r.bpool.Put(data)
+			return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("checksum mismatch, want=%#x got=%#x", checksum0, checksum1))
 		}
 	}
+
 	switch data[bh.length] {
 	case blockTypeNoCompression:
 		data = data[:bh.length]
 	case blockTypeSnappyCompression:
-		var err error
-		data, err = snappy.Decode(nil, data[:bh.length])
+		decLen, err := snappy.DecodedLen(data[:bh.length])
 		if err != nil {
-			return nil, err
+			return nil, r.newErrCorruptedBH(bh, err.Error())
+		}
+		decData := r.bpool.Get(decLen)
+		decData, err = snappy.Decode(decData, data[:bh.length])
+		r.bpool.Put(data)
+		if err != nil {
+			r.bpool.Put(decData)
+			return nil, r.newErrCorruptedBH(bh, err.Error())
 		}
+		data = decData
 	default:
-		return nil, fmt.Errorf("leveldb/table: Reader: unknown block compression type: %d", data[bh.length])
+		r.bpool.Put(data)
+		return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("unknown compression type %#x", data[bh.length]))
 	}
 	return data, nil
 }
 
-func (r *Reader) readBlock(bh blockHandle, checksum bool) (*block, error) {
-	data, err := r.readRawBlock(bh, checksum)
+func (r *Reader) readBlock(bh blockHandle, verifyChecksum bool) (*block, error) {
+	data, err := r.readRawBlock(bh, verifyChecksum)
 	if err != nil {
 		return nil, err
 	}
 	restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:]))
 	b := &block{
-		cmp:            r.cmp,
+		bpool:          r.bpool,
+		bh:             bh,
 		data:           data,
 		restartsLen:    restartsLen,
 		restartsOffset: len(data) - (restartsLen+1)*4,
-		checksum:       checksum || r.checksum,
 	}
 	return b, nil
 }
 
-func (r *Reader) readFilterBlock(bh blockHandle, filter filter.Filter) (*filterBlock, error) {
+func (r *Reader) readBlockCached(bh blockHandle, verifyChecksum, fillCache bool) (*block, util.Releaser, error) {
+	if r.cache != nil {
+		var (
+			err error
+			ch  *cache.Handle
+		)
+		if fillCache {
+			ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) {
+				var b *block
+				b, err = r.readBlock(bh, verifyChecksum)
+				if err != nil {
+					return 0, nil
+				}
+				return cap(b.data), b
+			})
+		} else {
+			ch = r.cache.Get(bh.offset, nil)
+		}
+		if ch != nil {
+			b, ok := ch.Value().(*block)
+			if !ok {
+				ch.Release()
+				return nil, nil, errors.New("leveldb/table: inconsistent block type")
+			}
+			return b, ch, err
+		} else if err != nil {
+			return nil, nil, err
+		}
+	}
+
+	b, err := r.readBlock(bh, verifyChecksum)
+	return b, b, err
+}
+
+func (r *Reader) readFilterBlock(bh blockHandle) (*filterBlock, error) {
 	data, err := r.readRawBlock(bh, true)
 	if err != nil {
 		return nil, err
 	}
 	n := len(data)
 	if n < 5 {
-		return nil, errors.New("leveldb/table: Reader: invalid filter block (too short)")
+		return nil, r.newErrCorruptedBH(bh, "too short")
 	}
 	m := n - 5
 	oOffset := int(binary.LittleEndian.Uint32(data[m:]))
 	if oOffset > m {
-		return nil, errors.New("leveldb/table: Reader: invalid filter block (invalid offset)")
+		return nil, r.newErrCorruptedBH(bh, "invalid data-offsets offset")
 	}
 	b := &filterBlock{
-		filter:     filter,
+		bpool:      r.bpool,
 		data:       data,
 		oOffset:    oOffset,
 		baseLg:     uint(data[n-1]),
@@ -602,44 +669,111 @@ func (r *Reader) readFilterBlock(bh blockHandle, filter filter.Filter) (*filterB
 	return b, nil
 }
 
-func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, checksum, fillCache bool) iterator.Iterator {
+func (r *Reader) readFilterBlockCached(bh blockHandle, fillCache bool) (*filterBlock, util.Releaser, error) {
 	if r.cache != nil {
-		// Get/set block cache.
-		var err error
-		cache, ok := r.cache.Get(dataBH.offset, func() (ok bool, value interface{}, charge int, fin cache.SetFin) {
-			if !fillCache {
-				return
+		var (
+			err error
+			ch  *cache.Handle
+		)
+		if fillCache {
+			ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) {
+				var b *filterBlock
+				b, err = r.readFilterBlock(bh)
+				if err != nil {
+					return 0, nil
+				}
+				return cap(b.data), b
+			})
+		} else {
+			ch = r.cache.Get(bh.offset, nil)
+		}
+		if ch != nil {
+			b, ok := ch.Value().(*filterBlock)
+			if !ok {
+				ch.Release()
+				return nil, nil, errors.New("leveldb/table: inconsistent block type")
 			}
-			var dataBlock *block
-			dataBlock, err = r.readBlock(dataBH, checksum)
-			if err == nil {
-				ok = true
-				value = dataBlock
-				charge = int(dataBH.length)
+			return b, ch, err
+		} else if err != nil {
+			return nil, nil, err
+		}
+	}
+
+	b, err := r.readFilterBlock(bh)
+	return b, b, err
+}
+
+func (r *Reader) getIndexBlock(fillCache bool) (b *block, rel util.Releaser, err error) {
+	if r.indexBlock == nil {
+		return r.readBlockCached(r.indexBH, true, fillCache)
+	}
+	return r.indexBlock, util.NoopReleaser{}, nil
+}
+
+func (r *Reader) getFilterBlock(fillCache bool) (*filterBlock, util.Releaser, error) {
+	if r.filterBlock == nil {
+		return r.readFilterBlockCached(r.filterBH, fillCache)
+	}
+	return r.filterBlock, util.NoopReleaser{}, nil
+}
+
+func (r *Reader) newBlockIter(b *block, bReleaser util.Releaser, slice *util.Range, inclLimit bool) *blockIter {
+	bi := &blockIter{
+		tr:            r,
+		block:         b,
+		blockReleaser: bReleaser,
+		// Valid key should never be nil.
+		key:             make([]byte, 0),
+		dir:             dirSOI,
+		riStart:         0,
+		riLimit:         b.restartsLen,
+		offsetStart:     0,
+		offsetRealStart: 0,
+		offsetLimit:     b.restartsOffset,
+	}
+	if slice != nil {
+		if slice.Start != nil {
+			if bi.Seek(slice.Start) {
+				bi.riStart = b.restartIndex(bi.restartIndex, b.restartsLen, bi.prevOffset)
+				bi.offsetStart = b.restartOffset(bi.riStart)
+				bi.offsetRealStart = bi.prevOffset
+			} else {
+				bi.riStart = b.restartsLen
+				bi.offsetStart = b.restartsOffset
+				bi.offsetRealStart = b.restartsOffset
 			}
-			return
-		})
-		if err != nil {
-			return iterator.NewEmptyIterator(err)
 		}
-		if ok {
-			dataBlock := cache.Value().(*block)
-			if !dataBlock.checksum && (r.checksum || checksum) {
-				if !verifyChecksum(dataBlock.data) {
-					return iterator.NewEmptyIterator(errors.New("leveldb/table: Reader: invalid block (checksum mismatch)"))
-				}
-				dataBlock.checksum = true
+		if slice.Limit != nil {
+			if bi.Seek(slice.Limit) && (!inclLimit || bi.Next()) {
+				bi.offsetLimit = bi.prevOffset
+				bi.riLimit = bi.restartIndex + 1
 			}
-			iter := dataBlock.newIterator(slice, false, cache)
-			return iter
+		}
+		bi.reset()
+		if bi.offsetStart > bi.offsetLimit {
+			bi.sErr(errors.New("leveldb/table: invalid slice range"))
 		}
 	}
-	dataBlock, err := r.readBlock(dataBH, checksum)
+	return bi
+}
+
+func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator {
+	b, rel, err := r.readBlockCached(dataBH, verifyChecksum, fillCache)
 	if err != nil {
 		return iterator.NewEmptyIterator(err)
 	}
-	iter := dataBlock.newIterator(slice, false, nil)
-	return iter
+	return r.newBlockIter(b, rel, slice, false)
+}
+
+func (r *Reader) getDataIterErr(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator {
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+
+	if r.err != nil {
+		return iterator.NewEmptyIterator(r.err)
+	}
+
+	return r.getDataIter(dataBH, slice, verifyChecksum, fillCache)
 }
 
 // NewIterator creates an iterator from the table.
@@ -653,35 +787,44 @@ func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, checksum, fi
 // when not used.
 //
 // Also read Iterator documentation of the leveldb/iterator package.
-
 func (r *Reader) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+
 	if r.err != nil {
 		return iterator.NewEmptyIterator(r.err)
 	}
 
+	fillCache := !ro.GetDontFillCache()
+	indexBlock, rel, err := r.getIndexBlock(fillCache)
+	if err != nil {
+		return iterator.NewEmptyIterator(err)
+	}
 	index := &indexIter{
-		blockIter:   *r.indexBlock.newIterator(slice, true, nil),
-		tableReader: r,
-		slice:       slice,
-		checksum:    ro.GetStrict(opt.StrictBlockChecksum),
-		fillCache:   !ro.GetDontFillCache(),
+		blockIter: r.newBlockIter(indexBlock, rel, slice, true),
+		tr:        r,
+		slice:     slice,
+		fillCache: !ro.GetDontFillCache(),
 	}
-	return iterator.NewIndexedIterator(index, r.strictIter || ro.GetStrict(opt.StrictIterator), false)
+	return iterator.NewIndexedIterator(index, opt.GetStrict(r.o, ro, opt.StrictReader))
 }
 
-// Find finds key/value pair whose key is greater than or equal to the
-// given key. It returns ErrNotFound if the table doesn't contain
-// such pair.
-//
-// The caller should not modify the contents of the returned slice, but
-// it is safe to modify the contents of the argument after Find returns.
-func (r *Reader) Find(key []byte, ro *opt.ReadOptions) (rkey, value []byte, err error) {
+func (r *Reader) find(key []byte, filtered bool, ro *opt.ReadOptions, noValue bool) (rkey, value []byte, err error) {
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+
 	if r.err != nil {
 		err = r.err
 		return
 	}
 
-	index := r.indexBlock.newIterator(nil, true, nil)
+	indexBlock, rel, err := r.getIndexBlock(true)
+	if err != nil {
+		return
+	}
+	defer rel.Release()
+
+	index := r.newBlockIter(indexBlock, nil, nil, true)
 	defer index.Release()
 	if !index.Seek(key) {
 		err = index.Error()
@@ -692,14 +835,23 @@ func (r *Reader) Find(key []byte, ro *opt.ReadOptions) (rkey, value []byte, err
 	}
 	dataBH, n := decodeBlockHandle(index.Value())
 	if n == 0 {
-		err = errors.New("leveldb/table: Reader: invalid table (bad data block handle)")
+		r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle")
 		return
 	}
-	if r.filterBlock != nil && !r.filterBlock.contains(dataBH.offset, key) {
-		err = ErrNotFound
-		return
+	if filtered && r.filter != nil {
+		filterBlock, frel, ferr := r.getFilterBlock(true)
+		if ferr == nil {
+			if !filterBlock.contains(r.filter, dataBH.offset, key) {
+				frel.Release()
+				return nil, nil, ErrNotFound
+			}
+			frel.Release()
+		} else if !errors.IsCorrupted(ferr) {
+			err = ferr
+			return
+		}
 	}
-	data := r.getDataIter(dataBH, nil, ro.GetStrict(opt.StrictBlockChecksum), !ro.GetDontFillCache())
+	data := r.getDataIter(dataBH, nil, r.verifyChecksum, !ro.GetDontFillCache())
 	defer data.Release()
 	if !data.Seek(key) {
 		err = data.Error()
@@ -708,23 +860,64 @@ func (r *Reader) Find(key []byte, ro *opt.ReadOptions) (rkey, value []byte, err
 		}
 		return
 	}
+	// Don't use block buffer, no need to copy the buffer.
 	rkey = data.Key()
-	value = data.Value()
+	if !noValue {
+		if r.bpool == nil {
+			value = data.Value()
+		} else {
+			// Use block buffer, and since the buffer will be recycled, the buffer
+			// need to be copied.
+			value = append([]byte{}, data.Value()...)
+		}
+	}
+	return
+}
+
+// Find finds key/value pair whose key is greater than or equal to the
+// given key. It returns ErrNotFound if the table doesn't contain
+// such pair.
+// If filtered is true then the nearest 'block' will be checked against
+// 'filter data' (if present) and will immediately return ErrNotFound if
+// 'filter data' indicates that such pair doesn't exist.
+//
+// The caller may modify the contents of the returned slice as it is its
+// own copy.
+// It is safe to modify the contents of the argument after Find returns.
+func (r *Reader) Find(key []byte, filtered bool, ro *opt.ReadOptions) (rkey, value []byte, err error) {
+	return r.find(key, filtered, ro, false)
+}
+
+// Find finds key that is greater than or equal to the given key.
+// It returns ErrNotFound if the table doesn't contain such key.
+// If filtered is true then the nearest 'block' will be checked against
+// 'filter data' (if present) and will immediately return ErrNotFound if
+// 'filter data' indicates that such key doesn't exist.
+//
+// The caller may modify the contents of the returned slice as it is its
+// own copy.
+// It is safe to modify the contents of the argument after Find returns.
+func (r *Reader) FindKey(key []byte, filtered bool, ro *opt.ReadOptions) (rkey []byte, err error) {
+	rkey, _, err = r.find(key, filtered, ro, true)
 	return
 }
 
 // Get gets the value for the given key. It returns errors.ErrNotFound
 // if the table does not contain the key.
 //
-// The caller should not modify the contents of the returned slice, but
-// it is safe to modify the contents of the argument after Get returns.
+// The caller may modify the contents of the returned slice as it is its
+// own copy.
+// It is safe to modify the contents of the argument after Find returns.
 func (r *Reader) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+
 	if r.err != nil {
 		err = r.err
 		return
 	}
 
-	rkey, value, err := r.Find(key, ro)
+	rkey, value, err := r.find(key, false, ro, false)
 	if err == nil && r.cmp.Compare(rkey, key) != 0 {
 		value = nil
 		err = ErrNotFound
@@ -736,17 +929,26 @@ func (r *Reader) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error)
 //
 // It is safe to modify the contents of the argument after Get returns.
 func (r *Reader) OffsetOf(key []byte) (offset int64, err error) {
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+
 	if r.err != nil {
 		err = r.err
 		return
 	}
 
-	index := r.indexBlock.newIterator(nil, true, nil)
+	indexBlock, rel, err := r.readBlockCached(r.indexBH, true, true)
+	if err != nil {
+		return
+	}
+	defer rel.Release()
+
+	index := r.newBlockIter(indexBlock, nil, nil, true)
 	defer index.Release()
 	if index.Seek(key) {
 		dataBH, n := decodeBlockHandle(index.Value())
 		if n == 0 {
-			err = errors.New("leveldb/table: Reader: invalid table (bad data block handle)")
+			r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle")
 			return
 		}
 		offset = int64(dataBH.offset)
@@ -759,90 +961,147 @@ func (r *Reader) OffsetOf(key []byte) (offset int64, err error) {
 	return
 }
 
-// NewReader creates a new initialized table reader for the file.
-// The cache is optional and can be nil.
-func NewReader(f io.ReaderAt, size int64, cache cache.Namespace, o *opt.Options) *Reader {
-	r := &Reader{
-		reader:     f,
-		cache:      cache,
-		cmp:        o.GetComparer(),
-		checksum:   o.GetStrict(opt.StrictBlockChecksum),
-		strictIter: o.GetStrict(opt.StrictIterator),
+// Release implements util.Releaser.
+// It also close the file if it is an io.Closer.
+func (r *Reader) Release() {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+
+	if closer, ok := r.reader.(io.Closer); ok {
+		closer.Close()
+	}
+	if r.indexBlock != nil {
+		r.indexBlock.Release()
+		r.indexBlock = nil
 	}
+	if r.filterBlock != nil {
+		r.filterBlock.Release()
+		r.filterBlock = nil
+	}
+	r.reader = nil
+	r.cache = nil
+	r.bpool = nil
+	r.err = ErrReaderReleased
+}
+
+// NewReader creates a new initialized table reader for the file.
+// The fi, cache and bpool is optional and can be nil.
+//
+// The returned table reader instance is goroutine-safe.
+func NewReader(f io.ReaderAt, size int64, fi *storage.FileInfo, cache *cache.CacheGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) {
 	if f == nil {
-		r.err = errors.New("leveldb/table: Reader: nil file")
-		return r
+		return nil, errors.New("leveldb/table: nil file")
 	}
+
+	r := &Reader{
+		fi:             fi,
+		reader:         f,
+		cache:          cache,
+		bpool:          bpool,
+		o:              o,
+		cmp:            o.GetComparer(),
+		verifyChecksum: o.GetStrict(opt.StrictBlockChecksum),
+	}
+
 	if size < footerLen {
-		r.err = errors.New("leveldb/table: Reader: invalid table (file size is too small)")
-		return r
+		r.err = r.newErrCorrupted(0, size, "table", "too small")
+		return r, nil
 	}
+
+	footerPos := size - footerLen
 	var footer [footerLen]byte
-	if _, err := r.reader.ReadAt(footer[:], size-footerLen); err != nil && err != io.EOF {
-		r.err = fmt.Errorf("leveldb/table: Reader: invalid table (could not read footer): %v", err)
+	if _, err := r.reader.ReadAt(footer[:], footerPos); err != nil && err != io.EOF {
+		return nil, err
 	}
 	if string(footer[footerLen-len(magic):footerLen]) != magic {
-		r.err = errors.New("leveldb/table: Reader: invalid table (bad magic number)")
-		return r
+		r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad magic number")
+		return r, nil
 	}
+
+	var n int
 	// Decode the metaindex block handle.
-	metaBH, n := decodeBlockHandle(footer[:])
+	r.metaBH, n = decodeBlockHandle(footer[:])
 	if n == 0 {
-		r.err = errors.New("leveldb/table: Reader: invalid table (bad metaindex block handle)")
-		return r
+		r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad metaindex block handle")
+		return r, nil
 	}
+
 	// Decode the index block handle.
-	indexBH, n := decodeBlockHandle(footer[n:])
+	r.indexBH, n = decodeBlockHandle(footer[n:])
 	if n == 0 {
-		r.err = errors.New("leveldb/table: Reader: invalid table (bad index block handle)")
-		return r
-	}
-	// Read index block.
-	r.indexBlock, r.err = r.readBlock(indexBH, true)
-	if r.err != nil {
-		return r
+		r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad index block handle")
+		return r, nil
 	}
+
 	// Read metaindex block.
-	metaBlock, err := r.readBlock(metaBH, true)
+	metaBlock, err := r.readBlock(r.metaBH, true)
 	if err != nil {
-		r.err = err
-		return r
+		if errors.IsCorrupted(err) {
+			r.err = err
+			return r, nil
+		} else {
+			return nil, err
+		}
 	}
+
 	// Set data end.
-	r.dataEnd = int64(metaBH.offset)
-	metaIter := metaBlock.newIterator(nil, false, nil)
+	r.dataEnd = int64(r.metaBH.offset)
+
+	// Read metaindex.
+	metaIter := r.newBlockIter(metaBlock, nil, nil, true)
 	for metaIter.Next() {
 		key := string(metaIter.Key())
 		if !strings.HasPrefix(key, "filter.") {
 			continue
 		}
 		fn := key[7:]
-		var filter filter.Filter
 		if f0 := o.GetFilter(); f0 != nil && f0.Name() == fn {
-			filter = f0
+			r.filter = f0
 		} else {
 			for _, f0 := range o.GetAltFilters() {
 				if f0.Name() == fn {
-					filter = f0
+					r.filter = f0
 					break
 				}
 			}
 		}
-		if filter != nil {
+		if r.filter != nil {
 			filterBH, n := decodeBlockHandle(metaIter.Value())
 			if n == 0 {
 				continue
 			}
+			r.filterBH = filterBH
 			// Update data end.
 			r.dataEnd = int64(filterBH.offset)
-			filterBlock, err := r.readFilterBlock(filterBH, filter)
-			if err != nil {
-				continue
-			}
-			r.filterBlock = filterBlock
 			break
 		}
 	}
 	metaIter.Release()
-	return r
+	metaBlock.Release()
+
+	// Cache index and filter block locally, since we don't have global cache.
+	if cache == nil {
+		r.indexBlock, err = r.readBlock(r.indexBH, true)
+		if err != nil {
+			if errors.IsCorrupted(err) {
+				r.err = err
+				return r, nil
+			} else {
+				return nil, err
+			}
+		}
+		if r.filter != nil {
+			r.filterBlock, err = r.readFilterBlock(r.filterBH)
+			if err != nil {
+				if !errors.IsCorrupted(err) {
+					return nil, err
+				}
+
+				// Don't use filter then.
+				r.filter = nil
+			}
+		}
+	}
+
+	return r, nil
 }
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go
index c0ac70d9eaa2dd23ff765725dad835280587184a..beacdc1f024a47e4921a721c8225850a2bd3e68b 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table.go
@@ -133,9 +133,9 @@ Filter block trailer:
 
       +- 4-bytes -+
      /             \
-    +---------------+---------------+---------------+-------------------------+------------------+
-    |    offset 1   |      ....     |    offset n   | filter offset (4-bytes) | base Lg (1-byte) |
-    +-------------- +---------------+---------------+-------------------------+------------------+
+    +---------------+---------------+---------------+-------------------------------+------------------+
+    | data 1 offset |      ....     | data n offset | data-offsets offset (4-bytes) | base Lg (1-byte) |
+    +-------------- +---------------+---------------+-------------------------------+------------------+
 
 
 NOTE: All fixed-length integer are little-endian.
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go
index bc9eb83cc30064de16e7e035523ebc0788746444..6465da6e3707c48ab28ebc176c33b80b66c0c8b3 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go
@@ -3,15 +3,9 @@ package table
 import (
 	"testing"
 
-	. "github.com/onsi/ginkgo"
-	. "github.com/onsi/gomega"
-
 	"github.com/syndtr/goleveldb/leveldb/testutil"
 )
 
 func TestTable(t *testing.T) {
-	testutil.RunDefer()
-
-	RegisterFailHandler(Fail)
-	RunSpecs(t, "Table Suite")
+	testutil.RunSuite(t, "Table Suite")
 }
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go
index d7d3b2a4bfd698219d133a90cade2487e48a5742..4b59b31f52b42f12e8e098ea3a20d3b10d6b334e 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/table_test.go
@@ -23,7 +23,7 @@ type tableWrapper struct {
 }
 
 func (t tableWrapper) TestFind(key []byte) (rkey, rvalue []byte, err error) {
-	return t.Reader.Find(key, nil)
+	return t.Reader.Find(key, false, nil)
 }
 
 func (t tableWrapper) TestGet(key []byte) (value []byte, err error) {
@@ -59,7 +59,8 @@ var _ = testutil.Defer(func() {
 			It("Should be able to approximate offset of a key correctly", func() {
 				Expect(err).ShouldNot(HaveOccurred())
 
-				tr := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, o)
+				tr, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, nil, o)
+				Expect(err).ShouldNot(HaveOccurred())
 				CheckOffset := func(key string, expect, threshold int) {
 					offset, err := tr.OffsetOf([]byte(key))
 					Expect(err).ShouldNot(HaveOccurred())
@@ -95,7 +96,7 @@ var _ = testutil.Defer(func() {
 				tw.Close()
 
 				// Opening the table.
-				tr := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, o)
+				tr, _ := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), nil, nil, nil, o)
 				return tableWrapper{tr}
 			}
 			Test := func(kv *testutil.KeyValue, body func(r *Reader)) func() {
@@ -104,14 +105,16 @@ var _ = testutil.Defer(func() {
 					if body != nil {
 						body(db.(tableWrapper).Reader)
 					}
-					testutil.KeyValueTesting(nil, db, *kv)
+					testutil.KeyValueTesting(nil, *kv, db, nil, nil)
 				}
 			}
 
-			testutil.AllKeyValueTesting(nil, Build)
+			testutil.AllKeyValueTesting(nil, Build, nil, nil)
 			Describe("with one key per block", Test(testutil.KeyValue_Generate(nil, 9, 1, 10, 512, 512), func(r *Reader) {
 				It("should have correct blocks number", func() {
-					Expect(r.indexBlock.restartsLen).Should(Equal(9))
+					indexBlock, err := r.readBlock(r.indexBH, true)
+					Expect(err).To(BeNil())
+					Expect(indexBlock.restartsLen).Should(Equal(9))
 				})
 			}))
 		})
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go
index 4e19e93a94464b4492c914561a8e42afd819d47b..274c95fade3b70f878f3ce5092f838eeae9888da 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table/writer.go
@@ -12,7 +12,7 @@ import (
 	"fmt"
 	"io"
 
-	"code.google.com/p/snappy-go/snappy"
+	"github.com/syndtr/gosnappy/snappy"
 
 	"github.com/syndtr/goleveldb/leveldb/comparer"
 	"github.com/syndtr/goleveldb/leveldb/filter"
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go
index 4b87b5ef636c9ce6b0ce906c2282189d8f7897af..ec3f177a12febb5259fb9d88a5b8b58e7d37dca7 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/db.go
@@ -12,6 +12,7 @@ import (
 
 	. "github.com/onsi/gomega"
 
+	"github.com/syndtr/goleveldb/leveldb/errors"
 	"github.com/syndtr/goleveldb/leveldb/iterator"
 	"github.com/syndtr/goleveldb/leveldb/util"
 )
@@ -34,6 +35,10 @@ type Get interface {
 	TestGet(key []byte) (value []byte, err error)
 }
 
+type Has interface {
+	TestHas(key []byte) (ret bool, err error)
+}
+
 type NewIterator interface {
 	TestNewIterator(slice *util.Range) iterator.Iterator
 }
@@ -110,7 +115,7 @@ func (t *DBTesting) TestAllPresent() {
 
 func (t *DBTesting) TestDeletedKey(key []byte) {
 	_, err := t.DB.TestGet(key)
-	Expect(err).Should(Equal(util.ErrNotFound), "Get on deleted key %q, %s", key, t.text())
+	Expect(err).Should(Equal(errors.ErrNotFound), "Get on deleted key %q, %s", key, t.text())
 }
 
 func (t *DBTesting) TestAllDeleted() {
@@ -212,5 +217,6 @@ func DoDBTesting(t *DBTesting) {
 		}
 
 		DoIteratorTesting(&it)
+		iter.Release()
 	}
 }
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go
new file mode 100644
index 0000000000000000000000000000000000000000..82f3d0e81113b7e85bc0f3b07d338333473882cc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/ginkgo.go
@@ -0,0 +1,21 @@
+package testutil
+
+import (
+	. "github.com/onsi/ginkgo"
+	. "github.com/onsi/gomega"
+)
+
+func RunSuite(t GinkgoTestingT, name string) {
+	RunDefer()
+
+	SynchronizedBeforeSuite(func() []byte {
+		RunDefer("setup")
+		return nil
+	}, func(data []byte) {})
+	SynchronizedAfterSuite(func() {
+		RunDefer("teardown")
+	}, func() {})
+
+	RegisterFailHandler(Fail)
+	RunSpecs(t, name)
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go
index 4fc75b6f2343016c4b111415412af5a2f5640fed..a0b58f0e7252b632651a6480f32fc47e973cca01 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/kvtest.go
@@ -13,16 +13,28 @@ import (
 	. "github.com/onsi/ginkgo"
 	. "github.com/onsi/gomega"
 
+	"github.com/syndtr/goleveldb/leveldb/errors"
 	"github.com/syndtr/goleveldb/leveldb/util"
 )
 
-func KeyValueTesting(rnd *rand.Rand, p DB, kv KeyValue) {
+func KeyValueTesting(rnd *rand.Rand, kv KeyValue, p DB, setup func(KeyValue) DB, teardown func(DB)) {
 	if rnd == nil {
 		rnd = NewRand()
 	}
 
-	if db, ok := p.(Find); ok {
-		It("Should find all keys with Find", func() {
+	if p == nil {
+		BeforeEach(func() {
+			p = setup(kv)
+		})
+		if teardown != nil {
+			AfterEach(func() {
+				teardown(p)
+			})
+		}
+	}
+
+	It("Should find all keys with Find", func() {
+		if db, ok := p.(Find); ok {
 			ShuffledIndex(nil, kv.Len(), 1, func(i int) {
 				key_, key, value := kv.IndexInexact(i)
 
@@ -38,9 +50,11 @@ func KeyValueTesting(rnd *rand.Rand, p DB, kv KeyValue) {
 				Expect(rkey).Should(Equal(key))
 				Expect(rvalue).Should(Equal(value), "Value for key %q (%q)", key_, key)
 			})
-		})
+		}
+	})
 
-		It("Should return error if the key is not present", func() {
+	It("Should return error if the key is not present", func() {
+		if db, ok := p.(Find); ok {
 			var key []byte
 			if kv.Len() > 0 {
 				key_, _ := kv.Index(kv.Len() - 1)
@@ -48,12 +62,12 @@ func KeyValueTesting(rnd *rand.Rand, p DB, kv KeyValue) {
 			}
 			rkey, _, err := db.TestFind(key)
 			Expect(err).Should(HaveOccurred(), "Find for key %q yield key %q", key, rkey)
-			Expect(err).Should(Equal(util.ErrNotFound))
-		})
-	}
+			Expect(err).Should(Equal(errors.ErrNotFound))
+		}
+	})
 
-	if db, ok := p.(Get); ok {
-		It("Should only find exact key with Get", func() {
+	It("Should only find exact key with Get", func() {
+		if db, ok := p.(Get); ok {
 			ShuffledIndex(nil, kv.Len(), 1, func(i int) {
 				key_, key, value := kv.IndexInexact(i)
 
@@ -66,14 +80,34 @@ func KeyValueTesting(rnd *rand.Rand, p DB, kv KeyValue) {
 				if len(key_) > 0 {
 					_, err = db.TestGet(key_)
 					Expect(err).Should(HaveOccurred(), "Error for key %q", key_)
-					Expect(err).Should(Equal(util.ErrNotFound))
+					Expect(err).Should(Equal(errors.ErrNotFound))
 				}
 			})
-		})
-	}
+		}
+	})
+
+	It("Should only find present key with Has", func() {
+		if db, ok := p.(Has); ok {
+			ShuffledIndex(nil, kv.Len(), 1, func(i int) {
+				key_, key, _ := kv.IndexInexact(i)
+
+				// Using exact key.
+				ret, err := db.TestHas(key)
+				Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key)
+				Expect(ret).Should(BeTrue(), "False for key %q", key)
 
-	if db, ok := p.(NewIterator); ok {
-		TestIter := func(r *util.Range, _kv KeyValue) {
+				// Using inexact key.
+				if len(key_) > 0 {
+					ret, err = db.TestHas(key_)
+					Expect(err).ShouldNot(HaveOccurred(), "Error for key %q", key_)
+					Expect(ret).ShouldNot(BeTrue(), "True for key %q", key)
+				}
+			})
+		}
+	})
+
+	TestIter := func(r *util.Range, _kv KeyValue) {
+		if db, ok := p.(NewIterator); ok {
 			iter := db.TestNewIterator(r)
 			Expect(iter.Error()).ShouldNot(HaveOccurred())
 
@@ -83,46 +117,62 @@ func KeyValueTesting(rnd *rand.Rand, p DB, kv KeyValue) {
 			}
 
 			DoIteratorTesting(&t)
+			iter.Release()
 		}
+	}
 
-		It("Should iterates and seeks correctly", func(done Done) {
-			TestIter(nil, kv.Clone())
-			done <- true
-		}, 3.0)
-
-		RandomIndex(rnd, kv.Len(), kv.Len(), func(i int) {
-			type slice struct {
-				r            *util.Range
-				start, limit int
-			}
+	It("Should iterates and seeks correctly", func(done Done) {
+		TestIter(nil, kv.Clone())
+		done <- true
+	}, 3.0)
 
-			key_, _, _ := kv.IndexInexact(i)
-			for _, x := range []slice{
-				{&util.Range{Start: key_, Limit: nil}, i, kv.Len()},
-				{&util.Range{Start: nil, Limit: key_}, 0, i},
-			} {
-				It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", x.start, x.limit), func(done Done) {
-					TestIter(x.r, kv.Slice(x.start, x.limit))
-					done <- true
-				}, 3.0)
-			}
-		})
+	RandomIndex(rnd, kv.Len(), Min(kv.Len(), 50), func(i int) {
+		type slice struct {
+			r            *util.Range
+			start, limit int
+		}
 
-		RandomRange(rnd, kv.Len(), kv.Len(), func(start, limit int) {
-			It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", start, limit), func(done Done) {
-				r := kv.Range(start, limit)
-				TestIter(&r, kv.Slice(start, limit))
+		key_, _, _ := kv.IndexInexact(i)
+		for _, x := range []slice{
+			{&util.Range{Start: key_, Limit: nil}, i, kv.Len()},
+			{&util.Range{Start: nil, Limit: key_}, 0, i},
+		} {
+			It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", x.start, x.limit), func(done Done) {
+				TestIter(x.r, kv.Slice(x.start, x.limit))
 				done <- true
 			}, 3.0)
-		})
-	}
+		}
+	})
+
+	RandomRange(rnd, kv.Len(), Min(kv.Len(), 50), func(start, limit int) {
+		It(fmt.Sprintf("Should iterates and seeks correctly of a slice %d .. %d", start, limit), func(done Done) {
+			r := kv.Range(start, limit)
+			TestIter(&r, kv.Slice(start, limit))
+			done <- true
+		}, 3.0)
+	})
 }
 
-func AllKeyValueTesting(rnd *rand.Rand, body func(kv KeyValue) DB) {
+func AllKeyValueTesting(rnd *rand.Rand, body, setup func(KeyValue) DB, teardown func(DB)) {
 	Test := func(kv *KeyValue) func() {
 		return func() {
-			db := body(*kv)
-			KeyValueTesting(rnd, db, *kv)
+			var p DB
+			if setup != nil {
+				Defer("setup", func() {
+					p = setup(*kv)
+				})
+			}
+			if teardown != nil {
+				Defer("teardown", func() {
+					teardown(p)
+				})
+			}
+			if body != nil {
+				p = body(*kv)
+			}
+			KeyValueTesting(rnd, *kv, p, func(KeyValue) DB {
+				return p
+			}, nil)
 		}
 	}
 
@@ -133,4 +183,5 @@ func AllKeyValueTesting(rnd *rand.Rand, body func(kv KeyValue) DB) {
 	Describe("with big value", Test(KeyValue_BigValue()))
 	Describe("with special key", Test(KeyValue_SpecialKey()))
 	Describe("with multiple key/value", Test(KeyValue_MultipleKeyValue()))
+	Describe("with generated key/value", Test(KeyValue_Generate(nil, 120, 1, 50, 10, 120)))
 }
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go
index 0f8d77a73d7484467203067e6eee3a3ecc3f4aaa..59c496d54c62e90afe4cd057d81f412b10edef6b 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/storage.go
@@ -397,6 +397,7 @@ func (s *Storage) logI(format string, args ...interface{}) {
 
 func (s *Storage) Log(str string) {
 	s.log(1, "Log: "+str)
+	s.Storage.Log(str)
 }
 
 func (s *Storage) Lock() (r util.Releaser, err error) {
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go
index 38fe25d523f07e5c89b9e8fb1c639ed61171a4d5..97c5294b1b5dd0c16162cd49fb55ad27274cb89a 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil/util.go
@@ -155,3 +155,17 @@ func RandomRange(rnd *rand.Rand, n, round int, fn func(start, limit int)) {
 	}
 	return
 }
+
+func Max(x, y int) int {
+	if x > y {
+		return x
+	}
+	return y
+}
+
+func Min(x, y int) int {
+	if x < y {
+		return x
+	}
+	return y
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go
index c1402fda342a806f4400072af004f5c3623b01df..25bf2b29f99310e6af6ab1532ccfc9cdf1621e87 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/testutil_test.go
@@ -34,6 +34,10 @@ func (t *testingDB) TestGet(key []byte) (value []byte, err error) {
 	return t.Get(key, t.ro)
 }
 
+func (t *testingDB) TestHas(key []byte) (ret bool, err error) {
+	return t.Has(key, t.ro)
+}
+
 func (t *testingDB) TestNewIterator(slice *util.Range) iterator.Iterator {
 	return t.NewIterator(slice, t.ro)
 }
@@ -48,6 +52,7 @@ func (t *testingDB) TestClose() {
 func newTestingDB(o *opt.Options, ro *opt.ReadOptions, wo *opt.WriteOptions) *testingDB {
 	stor := testutil.NewStorage()
 	db, err := Open(stor, o)
+	// FIXME: This may be called from outside It, which may cause panic.
 	Expect(err).NotTo(HaveOccurred())
 	return &testingDB{
 		DB:   db,
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go
index a43d2e460f0e3ab11032f50f7117cba76b2fc252..1a5bf71a3254044964beba9aee5362e5da1ed763 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util.go
@@ -14,10 +14,10 @@ import (
 )
 
 func shorten(str string) string {
-	if len(str) <= 4 {
+	if len(str) <= 8 {
 		return str
 	}
-	return str[:1] + ".." + str[len(str)-1:]
+	return str[:3] + ".." + str[len(str)-3:]
 }
 
 var bunits = [...]string{"", "Ki", "Mi", "Gi"}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go
new file mode 100644
index 0000000000000000000000000000000000000000..2b8453d759817fe01e9e87a91c93499b2e6d4ee8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go
@@ -0,0 +1,238 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package util
+
+import (
+	"fmt"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+type buffer struct {
+	b    []byte
+	miss int
+}
+
+// BufferPool is a 'buffer pool'.
+type BufferPool struct {
+	pool      [6]chan []byte
+	size      [5]uint32
+	sizeMiss  [5]uint32
+	sizeHalf  [5]uint32
+	baseline  [4]int
+	baseline0 int
+
+	mu     sync.RWMutex
+	closed bool
+	closeC chan struct{}
+
+	get     uint32
+	put     uint32
+	half    uint32
+	less    uint32
+	equal   uint32
+	greater uint32
+	miss    uint32
+}
+
+func (p *BufferPool) poolNum(n int) int {
+	if n <= p.baseline0 && n > p.baseline0/2 {
+		return 0
+	}
+	for i, x := range p.baseline {
+		if n <= x {
+			return i + 1
+		}
+	}
+	return len(p.baseline) + 1
+}
+
+// Get returns buffer with length of n.
+func (p *BufferPool) Get(n int) []byte {
+	if p == nil {
+		return make([]byte, n)
+	}
+
+	p.mu.RLock()
+	defer p.mu.RUnlock()
+
+	if p.closed {
+		return make([]byte, n)
+	}
+
+	atomic.AddUint32(&p.get, 1)
+
+	poolNum := p.poolNum(n)
+	pool := p.pool[poolNum]
+	if poolNum == 0 {
+		// Fast path.
+		select {
+		case b := <-pool:
+			switch {
+			case cap(b) > n:
+				if cap(b)-n >= n {
+					atomic.AddUint32(&p.half, 1)
+					select {
+					case pool <- b:
+					default:
+					}
+					return make([]byte, n)
+				} else {
+					atomic.AddUint32(&p.less, 1)
+					return b[:n]
+				}
+			case cap(b) == n:
+				atomic.AddUint32(&p.equal, 1)
+				return b[:n]
+			default:
+				atomic.AddUint32(&p.greater, 1)
+			}
+		default:
+			atomic.AddUint32(&p.miss, 1)
+		}
+
+		return make([]byte, n, p.baseline0)
+	} else {
+		sizePtr := &p.size[poolNum-1]
+
+		select {
+		case b := <-pool:
+			switch {
+			case cap(b) > n:
+				if cap(b)-n >= n {
+					atomic.AddUint32(&p.half, 1)
+					sizeHalfPtr := &p.sizeHalf[poolNum-1]
+					if atomic.AddUint32(sizeHalfPtr, 1) == 20 {
+						atomic.StoreUint32(sizePtr, uint32(cap(b)/2))
+						atomic.StoreUint32(sizeHalfPtr, 0)
+					} else {
+						select {
+						case pool <- b:
+						default:
+						}
+					}
+					return make([]byte, n)
+				} else {
+					atomic.AddUint32(&p.less, 1)
+					return b[:n]
+				}
+			case cap(b) == n:
+				atomic.AddUint32(&p.equal, 1)
+				return b[:n]
+			default:
+				atomic.AddUint32(&p.greater, 1)
+				if uint32(cap(b)) >= atomic.LoadUint32(sizePtr) {
+					select {
+					case pool <- b:
+					default:
+					}
+				}
+			}
+		default:
+			atomic.AddUint32(&p.miss, 1)
+		}
+
+		if size := atomic.LoadUint32(sizePtr); uint32(n) > size {
+			if size == 0 {
+				atomic.CompareAndSwapUint32(sizePtr, 0, uint32(n))
+			} else {
+				sizeMissPtr := &p.sizeMiss[poolNum-1]
+				if atomic.AddUint32(sizeMissPtr, 1) == 20 {
+					atomic.StoreUint32(sizePtr, uint32(n))
+					atomic.StoreUint32(sizeMissPtr, 0)
+				}
+			}
+			return make([]byte, n)
+		} else {
+			return make([]byte, n, size)
+		}
+	}
+}
+
+// Put adds given buffer to the pool.
+func (p *BufferPool) Put(b []byte) {
+	if p == nil {
+		return
+	}
+
+	p.mu.RLock()
+	defer p.mu.RUnlock()
+
+	if p.closed {
+		return
+	}
+
+	atomic.AddUint32(&p.put, 1)
+
+	pool := p.pool[p.poolNum(cap(b))]
+	select {
+	case pool <- b:
+	default:
+	}
+
+}
+
+func (p *BufferPool) Close() {
+	if p == nil {
+		return
+	}
+
+	p.mu.Lock()
+	if !p.closed {
+		p.closed = true
+		p.closeC <- struct{}{}
+	}
+	p.mu.Unlock()
+}
+
+func (p *BufferPool) String() string {
+	if p == nil {
+		return "<nil>"
+	}
+
+	return fmt.Sprintf("BufferPool{B·%d Z·%v Zm·%v Zh·%v G·%d P·%d H·%d <·%d =·%d >·%d M·%d}",
+		p.baseline0, p.size, p.sizeMiss, p.sizeHalf, p.get, p.put, p.half, p.less, p.equal, p.greater, p.miss)
+}
+
+func (p *BufferPool) drain() {
+	ticker := time.NewTicker(2 * time.Second)
+	for {
+		select {
+		case <-ticker.C:
+			for _, ch := range p.pool {
+				select {
+				case <-ch:
+				default:
+				}
+			}
+		case <-p.closeC:
+			close(p.closeC)
+			for _, ch := range p.pool {
+				close(ch)
+			}
+			return
+		}
+	}
+}
+
+// NewBufferPool creates a new initialized 'buffer pool'.
+func NewBufferPool(baseline int) *BufferPool {
+	if baseline <= 0 {
+		panic("baseline can't be <= 0")
+	}
+	p := &BufferPool{
+		baseline0: baseline,
+		baseline:  [...]int{baseline / 4, baseline / 2, baseline * 2, baseline * 4},
+		closeC:    make(chan struct{}, 1),
+	}
+	for i, cap := range []int{2, 2, 4, 4, 2, 1} {
+		p.pool[i] = make(chan []byte, cap)
+	}
+	go p.drain()
+	return p
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go
new file mode 100644
index 0000000000000000000000000000000000000000..1f7fdd41fe4edda0c6083fac96fa16d71ab80d59
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool.go
@@ -0,0 +1,21 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build go1.3
+
+package util
+
+import (
+	"sync"
+)
+
+type Pool struct {
+	sync.Pool
+}
+
+func NewPool(cap int) *Pool {
+	return &Pool{}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go
new file mode 100644
index 0000000000000000000000000000000000000000..27b8d03be9477703757e0043fe70c28a05c27756
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/pool_legacy.go
@@ -0,0 +1,33 @@
+// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build !go1.3
+
+package util
+
+type Pool struct {
+	pool chan interface{}
+}
+
+func (p *Pool) Get() interface{} {
+	select {
+	case x := <-p.pool:
+		return x
+	default:
+		return nil
+	}
+}
+
+func (p *Pool) Put(x interface{}) {
+	select {
+	case p.pool <- x:
+	default:
+	}
+}
+
+func NewPool(cap int) *Pool {
+	return &Pool{pool: make(chan interface{}, cap)}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go
index da0583123a0cb961307a1b3e5a930511aacbb4ea..85159583d2c36581bb6967e7ee610467b2154e30 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/range.go
@@ -14,3 +14,19 @@ type Range struct {
 	// Limit of the key range, not include in the range.
 	Limit []byte
 }
+
+// BytesPrefix returns key range that satisfy the given prefix.
+// This only applicable for the standard 'bytes comparer'.
+func BytesPrefix(prefix []byte) *Range {
+	var limit []byte
+	for i := len(prefix) - 1; i >= 0; i-- {
+		c := prefix[i]
+		if c < 0xff {
+			limit = make([]byte, i+1)
+			copy(limit, prefix)
+			limit[i] = c + 1
+			break
+		}
+	}
+	return &Range{prefix, limit}
+}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go
index 229c7d41f65d32962c9e76c66d086090595ada5a..f35976865bff172970e4cafc293814d40542e859 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/util/util.go
@@ -12,7 +12,8 @@ import (
 )
 
 var (
-	ErrNotFound = errors.New("leveldb: not found")
+	ErrReleased    = errors.New("leveldb: resource already relesed")
+	ErrHasReleaser = errors.New("leveldb: releaser already defined")
 )
 
 // Releaser is the interface that wraps the basic Release method.
@@ -27,23 +28,46 @@ type ReleaseSetter interface {
 	// SetReleaser associates the given releaser to the resources. The
 	// releaser will be called once coresponding resources released.
 	// Calling SetReleaser with nil will clear the releaser.
+	//
+	// This will panic if a releaser already present or coresponding
+	// resource is already released. Releaser should be cleared first
+	// before assigned a new one.
 	SetReleaser(releaser Releaser)
 }
 
 // BasicReleaser provides basic implementation of Releaser and ReleaseSetter.
 type BasicReleaser struct {
 	releaser Releaser
+	released bool
+}
+
+// Released returns whether Release method already called.
+func (r *BasicReleaser) Released() bool {
+	return r.released
 }
 
 // Release implements Releaser.Release.
 func (r *BasicReleaser) Release() {
-	if r.releaser != nil {
-		r.releaser.Release()
-		r.releaser = nil
+	if !r.released {
+		if r.releaser != nil {
+			r.releaser.Release()
+			r.releaser = nil
+		}
+		r.released = true
 	}
 }
 
 // SetReleaser implements ReleaseSetter.SetReleaser.
 func (r *BasicReleaser) SetReleaser(releaser Releaser) {
+	if r.released {
+		panic(ErrReleased)
+	}
+	if r.releaser != nil && releaser != nil {
+		panic(ErrHasReleaser)
+	}
 	r.releaser = releaser
 }
+
+type NoopReleaser struct{}
+
+func (NoopReleaser) Release() {}
diff --git a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go
index 4c54d648014ffbb71dfaae06954057d863306fdc..88a52f53e78d687f349af6ad2592cdfc9ef7f232 100644
--- a/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go
+++ b/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go
@@ -7,7 +7,6 @@
 package leveldb
 
 import (
-	"errors"
 	"sync/atomic"
 	"unsafe"
 
@@ -16,19 +15,6 @@ import (
 	"github.com/syndtr/goleveldb/leveldb/util"
 )
 
-var levelMaxSize [kNumLevels]float64
-
-func init() {
-	// Precompute max size of each level
-	for level := range levelMaxSize {
-		res := float64(10 * 1048576)
-		for n := level; n > 1; n-- {
-			res *= 10
-		}
-		levelMaxSize[level] = res
-	}
-}
-
 type tSet struct {
 	level int
 	table *tFile
@@ -37,21 +23,26 @@ type tSet struct {
 type version struct {
 	s *session
 
-	tables [kNumLevels]tFiles
+	tables []tFiles
 
 	// Level that should be compacted next and its compaction score.
-	// Score < 1 means compaction is not strictly needed.  These fields
-	// are initialized by ComputeCompaction()
+	// Score < 1 means compaction is not strictly needed. These fields
+	// are initialized by computeCompaction()
 	cLevel int
 	cScore float64
 
 	cSeek unsafe.Pointer
 
-	ref  int
+	ref int
+	// Succeeding version.
 	next *version
 }
 
-func (v *version) release_NB() {
+func newVersion(s *session) *version {
+	return &version{s: s, tables: make([]tFiles, s.o.GetNumLevel())}
+}
+
+func (v *version) releaseNB() {
 	v.ref--
 	if v.ref > 0 {
 		return
@@ -60,8 +51,6 @@ func (v *version) release_NB() {
 		panic("negative version ref")
 	}
 
-	s := v.s
-
 	tables := make(map[uint64]bool)
 	for _, tt := range v.next.tables {
 		for _, t := range tt {
@@ -74,145 +63,184 @@ func (v *version) release_NB() {
 		for _, t := range tt {
 			num := t.file.Num()
 			if _, ok := tables[num]; !ok {
-				s.tops.remove(t)
+				v.s.tops.remove(t)
 			}
 		}
 	}
 
-	v.next.release_NB()
+	v.next.releaseNB()
 	v.next = nil
 }
 
 func (v *version) release() {
 	v.s.vmu.Lock()
-	v.release_NB()
+	v.releaseNB()
 	v.s.vmu.Unlock()
 }
 
-func (v *version) get(key iKey, ro *opt.ReadOptions) (value []byte, cstate bool, err error) {
-	s := v.s
-
-	ukey := key.ukey()
+func (v *version) walkOverlapping(ikey iKey, f func(level int, t *tFile) bool, lf func(level int) bool) {
+	ukey := ikey.ukey()
 
-	var tset *tSet
-	tseek := true
-
-	// We can search level-by-level since entries never hop across
-	// levels. Therefore we are guaranteed that if we find data
-	// in an smaller level, later levels are irrelevant.
-	for level, ts := range v.tables {
-		if len(ts) == 0 {
+	// Walk tables level-by-level.
+	for level, tables := range v.tables {
+		if len(tables) == 0 {
 			continue
 		}
 
 		if level == 0 {
 			// Level-0 files may overlap each other. Find all files that
-			// overlap user_key and process them in order from newest to
-			var tmp tFiles
-			for _, t := range ts {
-				if s.icmp.uCompare(ukey, t.min.ukey()) >= 0 &&
-					s.icmp.uCompare(ukey, t.max.ukey()) <= 0 {
-					tmp = append(tmp, t)
+			// overlap ukey.
+			for _, t := range tables {
+				if t.overlaps(v.s.icmp, ukey, ukey) {
+					if !f(level, t) {
+						return
+					}
 				}
 			}
-
-			if len(tmp) == 0 {
-				continue
-			}
-
-			tmp.sortByNum()
-			ts = tmp
 		} else {
-			i := ts.searchMax(key, s.icmp)
-			if i >= len(ts) || s.icmp.uCompare(ukey, ts[i].min.ukey()) < 0 {
-				continue
+			if i := tables.searchMax(v.s.icmp, ikey); i < len(tables) {
+				t := tables[i]
+				if v.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 {
+					if !f(level, t) {
+						return
+					}
+				}
 			}
+		}
 
-			ts = ts[i : i+1]
+		if lf != nil && !lf(level) {
+			return
 		}
+	}
+}
 
-		var l0found bool
-		var l0seq uint64
-		var l0type vType
-		var l0value []byte
-		for _, t := range ts {
-			if tseek {
-				if tset == nil {
-					tset = &tSet{level, t}
-				} else if tset.table.incrSeek() <= 0 {
-					cstate = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
-					tseek = false
-				}
-			}
+func (v *version) get(ikey iKey, ro *opt.ReadOptions, noValue bool) (value []byte, tcomp bool, err error) {
+	ukey := ikey.ukey()
 
-			var _rkey, rval []byte
-			_rkey, rval, err = s.tops.get(t, key, ro)
-			if err == ErrNotFound {
-				continue
-			} else if err != nil {
-				return
+	var (
+		tset  *tSet
+		tseek bool
+
+		// Level-0.
+		zfound bool
+		zseq   uint64
+		zkt    kType
+		zval   []byte
+	)
+
+	err = ErrNotFound
+
+	// Since entries never hope across level, finding key/value
+	// in smaller level make later levels irrelevant.
+	v.walkOverlapping(ikey, func(level int, t *tFile) bool {
+		if !tseek {
+			if tset == nil {
+				tset = &tSet{level, t}
+			} else {
+				tseek = true
 			}
+		}
 
-			rkey := iKey(_rkey)
-			if seq, t, ok := rkey.parseNum(); ok {
-				if s.icmp.uCompare(ukey, rkey.ukey()) == 0 {
-					if level == 0 {
-						if seq >= l0seq {
-							l0found = true
-							l0seq = seq
-							l0type = t
-							l0value = rval
-						}
-					} else {
-						switch t {
-						case tVal:
-							value = rval
-						case tDel:
-							err = ErrNotFound
-						default:
-							panic("invalid type")
-						}
-						return
+		var (
+			fikey, fval []byte
+			ferr        error
+		)
+		if noValue {
+			fikey, ferr = v.s.tops.findKey(t, ikey, ro)
+		} else {
+			fikey, fval, ferr = v.s.tops.find(t, ikey, ro)
+		}
+		switch ferr {
+		case nil:
+		case ErrNotFound:
+			return true
+		default:
+			err = ferr
+			return false
+		}
+
+		if fukey, fseq, fkt, fkerr := parseIkey(fikey); fkerr == nil {
+			if v.s.icmp.uCompare(ukey, fukey) == 0 {
+				if level == 0 {
+					if fseq >= zseq {
+						zfound = true
+						zseq = fseq
+						zkt = fkt
+						zval = fval
 					}
+				} else {
+					switch fkt {
+					case ktVal:
+						value = fval
+						err = nil
+					case ktDel:
+					default:
+						panic("leveldb: invalid iKey type")
+					}
+					return false
 				}
-			} else {
-				err = errors.New("leveldb: internal key corrupted")
-				return
 			}
+		} else {
+			err = fkerr
+			return false
 		}
-		if level == 0 && l0found {
-			switch l0type {
-			case tVal:
-				value = l0value
-			case tDel:
-				err = ErrNotFound
+
+		return true
+	}, func(level int) bool {
+		if zfound {
+			switch zkt {
+			case ktVal:
+				value = zval
+				err = nil
+			case ktDel:
 			default:
-				panic("invalid type")
+				panic("leveldb: invalid iKey type")
 			}
-			return
+			return false
 		}
+
+		return true
+	})
+
+	if tseek && tset.table.consumeSeek() <= 0 {
+		tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
 	}
 
-	err = ErrNotFound
 	return
 }
 
-func (v *version) getIterators(slice *util.Range, ro *opt.ReadOptions) (its []iterator.Iterator) {
-	s := v.s
+func (v *version) sampleSeek(ikey iKey) (tcomp bool) {
+	var tset *tSet
 
+	v.walkOverlapping(ikey, func(level int, t *tFile) bool {
+		if tset == nil {
+			tset = &tSet{level, t}
+			return true
+		} else {
+			if tset.table.consumeSeek() <= 0 {
+				tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
+			}
+			return false
+		}
+	}, nil)
+
+	return
+}
+
+func (v *version) getIterators(slice *util.Range, ro *opt.ReadOptions) (its []iterator.Iterator) {
 	// Merge all level zero files together since they may overlap
 	for _, t := range v.tables[0] {
-		it := s.tops.newIterator(t, slice, ro)
+		it := v.s.tops.newIterator(t, slice, ro)
 		its = append(its, it)
 	}
 
-	strict := s.o.GetStrict(opt.StrictIterator) || ro.GetStrict(opt.StrictIterator)
-	for _, tt := range v.tables[1:] {
-		if len(tt) == 0 {
+	strict := opt.GetStrict(v.s.o.Options, ro, opt.StrictReader)
+	for _, tables := range v.tables[1:] {
+		if len(tables) == 0 {
 			continue
 		}
 
-		it := iterator.NewIndexedIterator(tt.newIndexIterator(s.tops, s.icmp, slice, ro), strict, true)
+		it := iterator.NewIndexedIterator(tables.newIndexIterator(v.s.tops, v.s.icmp, slice, ro), strict)
 		its = append(its, it)
 	}
 
@@ -220,7 +248,7 @@ func (v *version) getIterators(slice *util.Range, ro *opt.ReadOptions) (its []it
 }
 
 func (v *version) newStaging() *versionStaging {
-	return &versionStaging{base: v}
+	return &versionStaging{base: v, tables: make([]tablesScratch, v.s.o.GetNumLevel())}
 }
 
 // Spawn a new version based on this version.
@@ -242,25 +270,25 @@ func (v *version) tLen(level int) int {
 	return len(v.tables[level])
 }
 
-func (v *version) offsetOf(key iKey) (n uint64, err error) {
-	for level, tt := range v.tables {
-		for _, t := range tt {
-			if v.s.icmp.Compare(t.max, key) <= 0 {
-				// Entire file is before "key", so just add the file size
+func (v *version) offsetOf(ikey iKey) (n uint64, err error) {
+	for level, tables := range v.tables {
+		for _, t := range tables {
+			if v.s.icmp.Compare(t.imax, ikey) <= 0 {
+				// Entire file is before "ikey", so just add the file size
 				n += t.size
-			} else if v.s.icmp.Compare(t.min, key) > 0 {
-				// Entire file is after "key", so ignore
+			} else if v.s.icmp.Compare(t.imin, ikey) > 0 {
+				// Entire file is after "ikey", so ignore
 				if level > 0 {
 					// Files other than level 0 are sorted by meta->min, so
 					// no further files in this level will contain data for
-					// "key".
+					// "ikey".
 					break
 				}
 			} else {
-				// "key" falls in the range for this table.  Add the
-				// approximate offset of "key" within the table.
+				// "ikey" falls in the range for this table. Add the
+				// approximate offset of "ikey" within the table.
 				var nn uint64
-				nn, err = v.s.tops.offsetOf(t, key)
+				nn, err = v.s.tops.offsetOf(t, ikey)
 				if err != nil {
 					return 0, err
 				}
@@ -272,15 +300,16 @@ func (v *version) offsetOf(key iKey) (n uint64, err error) {
 	return
 }
 
-func (v *version) pickLevel(min, max []byte) (level int) {
-	if !v.tables[0].isOverlaps(min, max, false, v.s.icmp) {
-		var r tFiles
-		for ; level < kMaxMemCompactLevel; level++ {
-			if v.tables[level+1].isOverlaps(min, max, true, v.s.icmp) {
+func (v *version) pickLevel(umin, umax []byte) (level int) {
+	if !v.tables[0].overlaps(v.s.icmp, umin, umax, true) {
+		var overlaps tFiles
+		maxLevel := v.s.o.GetMaxMemCompationLevel()
+		for ; level < maxLevel; level++ {
+			if v.tables[level+1].overlaps(v.s.icmp, umin, umax, false) {
 				break
 			}
-			v.tables[level+2].getOverlaps(min, max, &r, true, v.s.icmp.ucmp)
-			if r.size() > kMaxGrandParentOverlapBytes {
+			overlaps = v.tables[level+2].getOverlaps(overlaps, v.s.icmp, umin, umax, false)
+			if overlaps.size() > uint64(v.s.o.GetCompactionGPOverlaps(level)) {
 				break
 			}
 		}
@@ -294,7 +323,7 @@ func (v *version) computeCompaction() {
 	var bestLevel int = -1
 	var bestScore float64 = -1
 
-	for level, ff := range v.tables {
+	for level, tables := range v.tables {
 		var score float64
 		if level == 0 {
 			// We treat level-0 specially by bounding the number of files
@@ -308,9 +337,9 @@ func (v *version) computeCompaction() {
 			// file size is small (perhaps because of a small write-buffer
 			// setting, or very high compression ratios, or lots of
 			// overwrites/deletions).
-			score = float64(len(ff)) / kL0_CompactionTrigger
+			score = float64(len(tables)) / float64(v.s.o.GetCompactionL0Trigger())
 		} else {
-			score = float64(ff.size()) / levelMaxSize[level]
+			score = float64(tables.size()) / float64(v.s.o.GetCompactionTotalSize(level))
 		}
 
 		if score > bestScore {
@@ -327,66 +356,62 @@ func (v *version) needCompaction() bool {
 	return v.cScore >= 1 || atomic.LoadPointer(&v.cSeek) != nil
 }
 
+type tablesScratch struct {
+	added   map[uint64]atRecord
+	deleted map[uint64]struct{}
+}
+
 type versionStaging struct {
 	base   *version
-	tables [kNumLevels]struct {
-		added   map[uint64]ntRecord
-		deleted map[uint64]struct{}
-	}
+	tables []tablesScratch
 }
 
 func (p *versionStaging) commit(r *sessionRecord) {
-	btt := p.base.tables
-
-	// deleted tables
-	for _, tr := range r.deletedTables {
-		tm := &(p.tables[tr.level])
+	// Deleted tables.
+	for _, r := range r.deletedTables {
+		tm := &(p.tables[r.level])
 
-		bt := btt[tr.level]
-		if len(bt) > 0 {
+		if len(p.base.tables[r.level]) > 0 {
 			if tm.deleted == nil {
 				tm.deleted = make(map[uint64]struct{})
 			}
-			tm.deleted[tr.num] = struct{}{}
+			tm.deleted[r.num] = struct{}{}
 		}
 
 		if tm.added != nil {
-			delete(tm.added, tr.num)
+			delete(tm.added, r.num)
 		}
 	}
 
-	// new tables
-	for _, tr := range r.addedTables {
-		tm := &(p.tables[tr.level])
+	// New tables.
+	for _, r := range r.addedTables {
+		tm := &(p.tables[r.level])
 
 		if tm.added == nil {
-			tm.added = make(map[uint64]ntRecord)
+			tm.added = make(map[uint64]atRecord)
 		}
-		tm.added[tr.num] = tr
+		tm.added[r.num] = r
 
 		if tm.deleted != nil {
-			delete(tm.deleted, tr.num)
+			delete(tm.deleted, r.num)
 		}
 	}
 }
 
 func (p *versionStaging) finish() *version {
-	s := p.base.s
-	btt := p.base.tables
-
-	// build new version
-	nv := &version{s: s}
+	// Build new version.
+	nv := newVersion(p.base.s)
 	for level, tm := range p.tables {
-		bt := btt[level]
+		btables := p.base.tables[level]
 
-		n := len(bt) + len(tm.added) - len(tm.deleted)
+		n := len(btables) + len(tm.added) - len(tm.deleted)
 		if n < 0 {
 			n = 0
 		}
 		nt := make(tFiles, 0, n)
 
-		// base tables
-		for _, t := range bt {
+		// Base tables.
+		for _, t := range btables {
 			if _, ok := tm.deleted[t.file.Num()]; ok {
 				continue
 			}
@@ -396,17 +421,21 @@ func (p *versionStaging) finish() *version {
 			nt = append(nt, t)
 		}
 
-		// new tables
-		for _, tr := range tm.added {
-			nt = append(nt, tr.makeFile(s))
+		// New tables.
+		for _, r := range tm.added {
+			nt = append(nt, p.base.s.tableFileFromRecord(r))
 		}
 
-		// sort tables
-		nt.sortByKey(s.icmp)
+		// Sort tables.
+		if level == 0 {
+			nt.sortByNum()
+		} else {
+			nt.sortByKey(p.base.s.icmp)
+		}
 		nv.tables[level] = nt
 	}
 
-	// compute compaction score for new version
+	// Compute compaction score for new version.
 	nv.computeCompaction()
 
 	return nv
@@ -421,7 +450,7 @@ func (vr *versionReleaser) Release() {
 	v := vr.v
 	v.s.vmu.Lock()
 	if !vr.once {
-		v.release_NB()
+		v.releaseNB()
 		vr.once = true
 	}
 	v.s.vmu.Unlock()
diff --git a/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/decode.go b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/decode.go
new file mode 100644
index 0000000000000000000000000000000000000000..552a17bfb0500fe0617753c9ca7df0263ec363db
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/decode.go
@@ -0,0 +1,292 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+	"encoding/binary"
+	"errors"
+	"io"
+)
+
+var (
+	// ErrCorrupt reports that the input is invalid.
+	ErrCorrupt = errors.New("snappy: corrupt input")
+	// ErrUnsupported reports that the input isn't supported.
+	ErrUnsupported = errors.New("snappy: unsupported input")
+)
+
+// DecodedLen returns the length of the decoded block.
+func DecodedLen(src []byte) (int, error) {
+	v, _, err := decodedLen(src)
+	return v, err
+}
+
+// decodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func decodedLen(src []byte) (blockLen, headerLen int, err error) {
+	v, n := binary.Uvarint(src)
+	if n == 0 {
+		return 0, 0, ErrCorrupt
+	}
+	if uint64(int(v)) != v {
+		return 0, 0, errors.New("snappy: decoded block is too large")
+	}
+	return int(v), n, nil
+}
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+// It is valid to pass a nil dst.
+func Decode(dst, src []byte) ([]byte, error) {
+	dLen, s, err := decodedLen(src)
+	if err != nil {
+		return nil, err
+	}
+	if len(dst) < dLen {
+		dst = make([]byte, dLen)
+	}
+
+	var d, offset, length int
+	for s < len(src) {
+		switch src[s] & 0x03 {
+		case tagLiteral:
+			x := uint(src[s] >> 2)
+			switch {
+			case x < 60:
+				s += 1
+			case x == 60:
+				s += 2
+				if s > len(src) {
+					return nil, ErrCorrupt
+				}
+				x = uint(src[s-1])
+			case x == 61:
+				s += 3
+				if s > len(src) {
+					return nil, ErrCorrupt
+				}
+				x = uint(src[s-2]) | uint(src[s-1])<<8
+			case x == 62:
+				s += 4
+				if s > len(src) {
+					return nil, ErrCorrupt
+				}
+				x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16
+			case x == 63:
+				s += 5
+				if s > len(src) {
+					return nil, ErrCorrupt
+				}
+				x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24
+			}
+			length = int(x + 1)
+			if length <= 0 {
+				return nil, errors.New("snappy: unsupported literal length")
+			}
+			if length > len(dst)-d || length > len(src)-s {
+				return nil, ErrCorrupt
+			}
+			copy(dst[d:], src[s:s+length])
+			d += length
+			s += length
+			continue
+
+		case tagCopy1:
+			s += 2
+			if s > len(src) {
+				return nil, ErrCorrupt
+			}
+			length = 4 + int(src[s-2])>>2&0x7
+			offset = int(src[s-2])&0xe0<<3 | int(src[s-1])
+
+		case tagCopy2:
+			s += 3
+			if s > len(src) {
+				return nil, ErrCorrupt
+			}
+			length = 1 + int(src[s-3])>>2
+			offset = int(src[s-2]) | int(src[s-1])<<8
+
+		case tagCopy4:
+			return nil, errors.New("snappy: unsupported COPY_4 tag")
+		}
+
+		end := d + length
+		if offset > d || end > len(dst) {
+			return nil, ErrCorrupt
+		}
+		for ; d < end; d++ {
+			dst[d] = dst[d-offset]
+		}
+	}
+	if d != dLen {
+		return nil, ErrCorrupt
+	}
+	return dst[:d], nil
+}
+
+// NewReader returns a new Reader that decompresses from r, using the framing
+// format described at
+// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
+func NewReader(r io.Reader) *Reader {
+	return &Reader{
+		r:       r,
+		decoded: make([]byte, maxUncompressedChunkLen),
+		buf:     make([]byte, MaxEncodedLen(maxUncompressedChunkLen)+checksumSize),
+	}
+}
+
+// Reader is an io.Reader than can read Snappy-compressed bytes.
+type Reader struct {
+	r       io.Reader
+	err     error
+	decoded []byte
+	buf     []byte
+	// decoded[i:j] contains decoded bytes that have not yet been passed on.
+	i, j       int
+	readHeader bool
+}
+
+// Reset discards any buffered data, resets all state, and switches the Snappy
+// reader to read from r. This permits reusing a Reader rather than allocating
+// a new one.
+func (r *Reader) Reset(reader io.Reader) {
+	r.r = reader
+	r.err = nil
+	r.i = 0
+	r.j = 0
+	r.readHeader = false
+}
+
+func (r *Reader) readFull(p []byte) (ok bool) {
+	if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+		if r.err == io.ErrUnexpectedEOF {
+			r.err = ErrCorrupt
+		}
+		return false
+	}
+	return true
+}
+
+// Read satisfies the io.Reader interface.
+func (r *Reader) Read(p []byte) (int, error) {
+	if r.err != nil {
+		return 0, r.err
+	}
+	for {
+		if r.i < r.j {
+			n := copy(p, r.decoded[r.i:r.j])
+			r.i += n
+			return n, nil
+		}
+		if !r.readFull(r.buf[:4]) {
+			return 0, r.err
+		}
+		chunkType := r.buf[0]
+		if !r.readHeader {
+			if chunkType != chunkTypeStreamIdentifier {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			r.readHeader = true
+		}
+		chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+		if chunkLen > len(r.buf) {
+			r.err = ErrUnsupported
+			return 0, r.err
+		}
+
+		// The chunk types are specified at
+		// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
+		switch chunkType {
+		case chunkTypeCompressedData:
+			// Section 4.2. Compressed data (chunk type 0x00).
+			if chunkLen < checksumSize {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			buf := r.buf[:chunkLen]
+			if !r.readFull(buf) {
+				return 0, r.err
+			}
+			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+			buf = buf[checksumSize:]
+
+			n, err := DecodedLen(buf)
+			if err != nil {
+				r.err = err
+				return 0, r.err
+			}
+			if n > len(r.decoded) {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			if _, err := Decode(r.decoded, buf); err != nil {
+				r.err = err
+				return 0, r.err
+			}
+			if crc(r.decoded[:n]) != checksum {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			r.i, r.j = 0, n
+			continue
+
+		case chunkTypeUncompressedData:
+			// Section 4.3. Uncompressed data (chunk type 0x01).
+			if chunkLen < checksumSize {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			buf := r.buf[:checksumSize]
+			if !r.readFull(buf) {
+				return 0, r.err
+			}
+			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+			// Read directly into r.decoded instead of via r.buf.
+			n := chunkLen - checksumSize
+			if !r.readFull(r.decoded[:n]) {
+				return 0, r.err
+			}
+			if crc(r.decoded[:n]) != checksum {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			r.i, r.j = 0, n
+			continue
+
+		case chunkTypeStreamIdentifier:
+			// Section 4.1. Stream identifier (chunk type 0xff).
+			if chunkLen != len(magicBody) {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			if !r.readFull(r.buf[:len(magicBody)]) {
+				return 0, r.err
+			}
+			for i := 0; i < len(magicBody); i++ {
+				if r.buf[i] != magicBody[i] {
+					r.err = ErrCorrupt
+					return 0, r.err
+				}
+			}
+			continue
+		}
+
+		if chunkType <= 0x7f {
+			// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+			r.err = ErrUnsupported
+			return 0, r.err
+
+		} else {
+			// Section 4.4 Padding (chunk type 0xfe).
+			// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+			if !r.readFull(r.buf[:chunkLen]) {
+				return 0, r.err
+			}
+		}
+	}
+}
diff --git a/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/encode.go b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/encode.go
similarity index 69%
rename from Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/encode.go
rename to Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/encode.go
index b2371db11c8f0c15a4be374eed72f96bd42b864c..dda372422d437441e1932607737295eae4bd30fd 100644
--- a/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/encode.go
+++ b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/encode.go
@@ -6,6 +6,7 @@ package snappy
 
 import (
 	"encoding/binary"
+	"io"
 )
 
 // We limit how far copy back-references can go, the same as the C++ code.
@@ -172,3 +173,86 @@ func MaxEncodedLen(srcLen int) int {
 	// This last factor dominates the blowup, so the final estimate is:
 	return 32 + srcLen + srcLen/6
 }
+
+// NewWriter returns a new Writer that compresses to w, using the framing
+// format described at
+// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
+func NewWriter(w io.Writer) *Writer {
+	return &Writer{
+		w:   w,
+		enc: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)),
+	}
+}
+
+// Writer is an io.Writer than can write Snappy-compressed bytes.
+type Writer struct {
+	w           io.Writer
+	err         error
+	enc         []byte
+	buf         [checksumSize + chunkHeaderSize]byte
+	wroteHeader bool
+}
+
+// Reset discards the writer's state and switches the Snappy writer to write to
+// w. This permits reusing a Writer rather than allocating a new one.
+func (w *Writer) Reset(writer io.Writer) {
+	w.w = writer
+	w.err = nil
+	w.wroteHeader = false
+}
+
+// Write satisfies the io.Writer interface.
+func (w *Writer) Write(p []byte) (n int, errRet error) {
+	if w.err != nil {
+		return 0, w.err
+	}
+	if !w.wroteHeader {
+		copy(w.enc, magicChunk)
+		if _, err := w.w.Write(w.enc[:len(magicChunk)]); err != nil {
+			w.err = err
+			return n, err
+		}
+		w.wroteHeader = true
+	}
+	for len(p) > 0 {
+		var uncompressed []byte
+		if len(p) > maxUncompressedChunkLen {
+			uncompressed, p = p[:maxUncompressedChunkLen], p[maxUncompressedChunkLen:]
+		} else {
+			uncompressed, p = p, nil
+		}
+		checksum := crc(uncompressed)
+
+		// Compress the buffer, discarding the result if the improvement
+		// isn't at least 12.5%.
+		chunkType := uint8(chunkTypeCompressedData)
+		chunkBody, err := Encode(w.enc, uncompressed)
+		if err != nil {
+			w.err = err
+			return n, err
+		}
+		if len(chunkBody) >= len(uncompressed)-len(uncompressed)/8 {
+			chunkType, chunkBody = chunkTypeUncompressedData, uncompressed
+		}
+
+		chunkLen := 4 + len(chunkBody)
+		w.buf[0] = chunkType
+		w.buf[1] = uint8(chunkLen >> 0)
+		w.buf[2] = uint8(chunkLen >> 8)
+		w.buf[3] = uint8(chunkLen >> 16)
+		w.buf[4] = uint8(checksum >> 0)
+		w.buf[5] = uint8(checksum >> 8)
+		w.buf[6] = uint8(checksum >> 16)
+		w.buf[7] = uint8(checksum >> 24)
+		if _, err = w.w.Write(w.buf[:]); err != nil {
+			w.err = err
+			return n, err
+		}
+		if _, err = w.w.Write(chunkBody); err != nil {
+			w.err = err
+			return n, err
+		}
+		n += len(uncompressed)
+	}
+	return n, nil
+}
diff --git a/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy.go b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy.go
similarity index 68%
rename from Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy.go
rename to Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy.go
index 2f1b790d0b7170df134cdf3f95786120a1f54dae..043bf3d81a949af84bafdf12a7b1e06f93e75bb9 100644
--- a/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy.go
+++ b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy.go
@@ -8,6 +8,10 @@
 // The C++ snappy implementation is at http://code.google.com/p/snappy/
 package snappy
 
+import (
+	"hash/crc32"
+)
+
 /*
 Each encoded block begins with the varint-encoded length of the decoded data,
 followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
@@ -36,3 +40,29 @@ const (
 	tagCopy2   = 0x02
 	tagCopy4   = 0x03
 )
+
+const (
+	checksumSize    = 4
+	chunkHeaderSize = 4
+	magicChunk      = "\xff\x06\x00\x00" + magicBody
+	magicBody       = "sNaPpY"
+	// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt says
+	// that "the uncompressed data in a chunk must be no longer than 65536 bytes".
+	maxUncompressedChunkLen = 65536
+)
+
+const (
+	chunkTypeCompressedData   = 0x00
+	chunkTypeUncompressedData = 0x01
+	chunkTypePadding          = 0xfe
+	chunkTypeStreamIdentifier = 0xff
+)
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
+func crc(b []byte) uint32 {
+	c := crc32.Update(0, crcTable, b)
+	return uint32(c>>15|c<<17) + 0xa282ead8
+}
diff --git a/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy_test.go b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy_test.go
similarity index 57%
rename from Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy_test.go
rename to Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy_test.go
index 7ba839244e9bb8b75ec49b2083eb428549190784..0623385b71d22ebe18ae8b5aae94e64d0825580d 100644
--- a/Godeps/_workspace/src/code.google.com/p/snappy-go/snappy/snappy_test.go
+++ b/Godeps/_workspace/src/github.com/syndtr/gosnappy/snappy/snappy_test.go
@@ -18,7 +18,10 @@ import (
 	"testing"
 )
 
-var download = flag.Bool("download", false, "If true, download any missing files before running benchmarks")
+var (
+	download = flag.Bool("download", false, "If true, download any missing files before running benchmarks")
+	testdata = flag.String("testdata", "testdata", "Directory containing the test data")
+)
 
 func roundtrip(b, ebuf, dbuf []byte) error {
 	e, err := Encode(ebuf, b)
@@ -55,11 +58,11 @@ func TestSmallCopy(t *testing.T) {
 }
 
 func TestSmallRand(t *testing.T) {
-	rand.Seed(27354294)
+	rng := rand.New(rand.NewSource(27354294))
 	for n := 1; n < 20000; n += 23 {
 		b := make([]byte, n)
-		for i, _ := range b {
-			b[i] = uint8(rand.Uint32())
+		for i := range b {
+			b[i] = uint8(rng.Uint32())
 		}
 		if err := roundtrip(b, nil, nil); err != nil {
 			t.Fatal(err)
@@ -70,7 +73,7 @@ func TestSmallRand(t *testing.T) {
 func TestSmallRegular(t *testing.T) {
 	for n := 1; n < 20000; n += 23 {
 		b := make([]byte, n)
-		for i, _ := range b {
+		for i := range b {
 			b[i] = uint8(i%10 + 'a')
 		}
 		if err := roundtrip(b, nil, nil); err != nil {
@@ -79,6 +82,120 @@ func TestSmallRegular(t *testing.T) {
 	}
 }
 
+func cmp(a, b []byte) error {
+	if len(a) != len(b) {
+		return fmt.Errorf("got %d bytes, want %d", len(a), len(b))
+	}
+	for i := range a {
+		if a[i] != b[i] {
+			return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i])
+		}
+	}
+	return nil
+}
+
+func TestFramingFormat(t *testing.T) {
+	// src is comprised of alternating 1e5-sized sequences of random
+	// (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen
+	// because it is larger than maxUncompressedChunkLen (64k).
+	src := make([]byte, 1e6)
+	rng := rand.New(rand.NewSource(1))
+	for i := 0; i < 10; i++ {
+		if i%2 == 0 {
+			for j := 0; j < 1e5; j++ {
+				src[1e5*i+j] = uint8(rng.Intn(256))
+			}
+		} else {
+			for j := 0; j < 1e5; j++ {
+				src[1e5*i+j] = uint8(i)
+			}
+		}
+	}
+
+	buf := new(bytes.Buffer)
+	if _, err := NewWriter(buf).Write(src); err != nil {
+		t.Fatalf("Write: encoding: %v", err)
+	}
+	dst, err := ioutil.ReadAll(NewReader(buf))
+	if err != nil {
+		t.Fatalf("ReadAll: decoding: %v", err)
+	}
+	if err := cmp(dst, src); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestReaderReset(t *testing.T) {
+	gold := bytes.Repeat([]byte("All that is gold does not glitter,\n"), 10000)
+	buf := new(bytes.Buffer)
+	if _, err := NewWriter(buf).Write(gold); err != nil {
+		t.Fatalf("Write: %v", err)
+	}
+	encoded, invalid, partial := buf.String(), "invalid", "partial"
+	r := NewReader(nil)
+	for i, s := range []string{encoded, invalid, partial, encoded, partial, invalid, encoded, encoded} {
+		if s == partial {
+			r.Reset(strings.NewReader(encoded))
+			if _, err := r.Read(make([]byte, 101)); err != nil {
+				t.Errorf("#%d: %v", i, err)
+				continue
+			}
+			continue
+		}
+		r.Reset(strings.NewReader(s))
+		got, err := ioutil.ReadAll(r)
+		switch s {
+		case encoded:
+			if err != nil {
+				t.Errorf("#%d: %v", i, err)
+				continue
+			}
+			if err := cmp(got, gold); err != nil {
+				t.Errorf("#%d: %v", i, err)
+				continue
+			}
+		case invalid:
+			if err == nil {
+				t.Errorf("#%d: got nil error, want non-nil", i)
+				continue
+			}
+		}
+	}
+}
+
+func TestWriterReset(t *testing.T) {
+	gold := bytes.Repeat([]byte("Not all those who wander are lost;\n"), 10000)
+	var gots, wants [][]byte
+	const n = 20
+	w, failed := NewWriter(nil), false
+	for i := 0; i <= n; i++ {
+		buf := new(bytes.Buffer)
+		w.Reset(buf)
+		want := gold[:len(gold)*i/n]
+		if _, err := w.Write(want); err != nil {
+			t.Errorf("#%d: Write: %v", i, err)
+			failed = true
+			continue
+		}
+		got, err := ioutil.ReadAll(NewReader(buf))
+		if err != nil {
+			t.Errorf("#%d: ReadAll: %v", i, err)
+			failed = true
+			continue
+		}
+		gots = append(gots, got)
+		wants = append(wants, want)
+	}
+	if failed {
+		return
+	}
+	for i := range gots {
+		if err := cmp(gots[i], wants[i]); err != nil {
+			t.Errorf("#%d: %v", i, err)
+		}
+	}
+}
+
 func benchDecode(b *testing.B, src []byte) {
 	encoded, err := Encode(nil, src)
 	if err != nil {
@@ -102,7 +219,7 @@ func benchEncode(b *testing.B, src []byte) {
 	}
 }
 
-func readFile(b *testing.B, filename string) []byte {
+func readFile(b testing.TB, filename string) []byte {
 	src, err := ioutil.ReadFile(filename)
 	if err != nil {
 		b.Fatalf("failed reading %s: %s", filename, err)
@@ -144,7 +261,7 @@ func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) }
 func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) }
 
 // testFiles' values are copied directly from
-// https://code.google.com/p/snappy/source/browse/trunk/snappy_unittest.cc.
+// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc
 // The label field is unused in snappy-go.
 var testFiles = []struct {
 	label    string
@@ -152,29 +269,36 @@ var testFiles = []struct {
 }{
 	{"html", "html"},
 	{"urls", "urls.10K"},
-	{"jpg", "house.jpg"},
-	{"pdf", "mapreduce-osdi-1.pdf"},
+	{"jpg", "fireworks.jpeg"},
+	{"jpg_200", "fireworks.jpeg"},
+	{"pdf", "paper-100k.pdf"},
 	{"html4", "html_x_4"},
-	{"cp", "cp.html"},
-	{"c", "fields.c"},
-	{"lsp", "grammar.lsp"},
-	{"xls", "kennedy.xls"},
 	{"txt1", "alice29.txt"},
 	{"txt2", "asyoulik.txt"},
 	{"txt3", "lcet10.txt"},
 	{"txt4", "plrabn12.txt"},
-	{"bin", "ptt5"},
-	{"sum", "sum"},
-	{"man", "xargs.1"},
 	{"pb", "geo.protodata"},
 	{"gaviota", "kppkn.gtb"},
 }
 
 // The test data files are present at this canonical URL.
-const baseURL = "https://snappy.googlecode.com/svn/trunk/testdata/"
+const baseURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/"
 
 func downloadTestdata(basename string) (errRet error) {
-	filename := filepath.Join("testdata", basename)
+	filename := filepath.Join(*testdata, basename)
+	if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 {
+		return nil
+	}
+
+	if !*download {
+		return fmt.Errorf("test data not found; skipping benchmark without the -download flag")
+	}
+	// Download the official snappy C++ implementation reference test data
+	// files for benchmarking.
+	if err := os.Mkdir(*testdata, 0777); err != nil && !os.IsExist(err) {
+		return fmt.Errorf("failed to create testdata: %s", err)
+	}
+
 	f, err := os.Create(filename)
 	if err != nil {
 		return fmt.Errorf("failed to create %s: %s", filename, err)
@@ -185,36 +309,27 @@ func downloadTestdata(basename string) (errRet error) {
 			os.Remove(filename)
 		}
 	}()
-	resp, err := http.Get(baseURL + basename)
+	url := baseURL + basename
+	resp, err := http.Get(url)
 	if err != nil {
-		return fmt.Errorf("failed to download %s: %s", baseURL+basename, err)
+		return fmt.Errorf("failed to download %s: %s", url, err)
 	}
 	defer resp.Body.Close()
+	if s := resp.StatusCode; s != http.StatusOK {
+		return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s))
+	}
 	_, err = io.Copy(f, resp.Body)
 	if err != nil {
-		return fmt.Errorf("failed to write %s: %s", filename, err)
+		return fmt.Errorf("failed to download %s to %s: %s", url, filename, err)
 	}
 	return nil
 }
 
 func benchFile(b *testing.B, n int, decode bool) {
-	filename := filepath.Join("testdata", testFiles[n].filename)
-	if stat, err := os.Stat(filename); err != nil || stat.Size() == 0 {
-		if !*download {
-			b.Fatal("test data not found; skipping benchmark without the -download flag")
-		}
-		// Download the official snappy C++ implementation reference test data
-		// files for benchmarking.
-		if err := os.Mkdir("testdata", 0777); err != nil && !os.IsExist(err) {
-			b.Fatalf("failed to create testdata: %s", err)
-		}
-		for _, tf := range testFiles {
-			if err := downloadTestdata(tf.filename); err != nil {
-				b.Fatalf("failed to download testdata: %s", err)
-			}
-		}
+	if err := downloadTestdata(testFiles[n].filename); err != nil {
+		b.Fatalf("failed to download testdata: %s", err)
 	}
-	data := readFile(b, filename)
+	data := readFile(b, filepath.Join(*testdata, testFiles[n].filename))
 	if decode {
 		benchDecode(b, data)
 	} else {
@@ -235,12 +350,6 @@ func Benchmark_UFlat8(b *testing.B)  { benchFile(b, 8, true) }
 func Benchmark_UFlat9(b *testing.B)  { benchFile(b, 9, true) }
 func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) }
 func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) }
-func Benchmark_UFlat12(b *testing.B) { benchFile(b, 12, true) }
-func Benchmark_UFlat13(b *testing.B) { benchFile(b, 13, true) }
-func Benchmark_UFlat14(b *testing.B) { benchFile(b, 14, true) }
-func Benchmark_UFlat15(b *testing.B) { benchFile(b, 15, true) }
-func Benchmark_UFlat16(b *testing.B) { benchFile(b, 16, true) }
-func Benchmark_UFlat17(b *testing.B) { benchFile(b, 17, true) }
 func Benchmark_ZFlat0(b *testing.B)  { benchFile(b, 0, false) }
 func Benchmark_ZFlat1(b *testing.B)  { benchFile(b, 1, false) }
 func Benchmark_ZFlat2(b *testing.B)  { benchFile(b, 2, false) }
@@ -253,9 +362,3 @@ func Benchmark_ZFlat8(b *testing.B)  { benchFile(b, 8, false) }
 func Benchmark_ZFlat9(b *testing.B)  { benchFile(b, 9, false) }
 func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) }
 func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) }
-func Benchmark_ZFlat12(b *testing.B) { benchFile(b, 12, false) }
-func Benchmark_ZFlat13(b *testing.B) { benchFile(b, 13, false) }
-func Benchmark_ZFlat14(b *testing.B) { benchFile(b, 14, false) }
-func Benchmark_ZFlat15(b *testing.B) { benchFile(b, 15, false) }
-func Benchmark_ZFlat16(b *testing.B) { benchFile(b, 16, false) }
-func Benchmark_ZFlat17(b *testing.B) { benchFile(b, 17, false) }