From f91312dbdbb9e04ef578946226e5d8069d5dfd5a Mon Sep 17 00:00:00 2001
From: Kris Shinn <raggamuffin.music@gmail.com>
Date: Mon, 21 Jan 2019 06:38:13 -0800
Subject: [PATCH] GraphQL master FF for review  (#18445)

* Initial work on a graphql API

* Added receipts, and more transaction fields.

* Finish receipts, add logs

* Add transactionCount to block

* Add types  and .

* Update Block type to be compatible with ethql

* Rename nonce to transactionCount in Account, to be compatible with ethql

* Update transaction, receipt and log to match ethql

* Add  query operator, for a range of blocks

* Added ommerCount to Block

* Add transactionAt and ommerAt to Block

* Added sendRawTransaction mutation

* Add Call and EstimateGas to graphQL API

* Refactored to use hexutil.Bytes instead of HexBytes

* Replace BigNum with hexutil.Big

* Refactor call and estimateGas to use ethapi struct type

* Replace ethgraphql.Address with common.Address

* Replace ethgraphql.Hash with common.Hash

* Converted most quantities to Long instead of Int

* Add support for logs

* Fix bug in runFilter

* Restructured Transaction to work primarily with headers, so uncle data is reported properly

* Add gasPrice API

* Add protocolVersion API

* Add syncing API

* Moved schema into its own source file

* Move some single use args types into anonymous structs

* Add doc-comments

* Fixed backend fetching to use context

* Added (very) basic tests

* Add documentation to the graphql schema

* Fix reversion for formatting of big numbers

* Correct spelling error

* s/BigInt/Long/

* Update common/types.go

* Fixes in response to review

* Fix lint error

* Updated calls on private functions

* Fix typo in graphql.go

* Rollback ethapi breaking changes for graphql support
Co-Authored-By: Arachnid <arachnid@notdot.net>
---
 cmd/geth/config.go                            |    8 +
 cmd/geth/main.go                              |    5 +
 cmd/utils/flags.go                            |   43 +
 common/hexutil/json.go                        |   55 +
 common/types.go                               |   30 +
 graphql/grahpql.go                            | 1104 +++++++++++++++++
 graphql/graphiql.go                           |   95 ++
 graphql/graphql_test.go                       |   29 +
 graphql/schema.go                             |  305 +++++
 internal/ethapi/api.go                        |   83 +-
 node/config.go                                |   32 +
 node/defaults.go                              |   10 +-
 rpc/http.go                                   |    4 +-
 .../graph-gophers/graphql-go/Gopkg.lock       |   25 +
 .../graph-gophers/graphql-go/Gopkg.toml       |   10 +
 .../graph-gophers/graphql-go/LICENSE          |   24 +
 .../graph-gophers/graphql-go/README.md        |  100 ++
 .../graph-gophers/graphql-go/errors/errors.go |   41 +
 .../graph-gophers/graphql-go/graphql.go       |  205 +++
 .../github.com/graph-gophers/graphql-go/id.go |   30 +
 .../graphql-go/internal/common/directive.go   |   32 +
 .../graphql-go/internal/common/lexer.go       |  161 +++
 .../graphql-go/internal/common/literals.go    |  206 +++
 .../graphql-go/internal/common/types.go       |   80 ++
 .../graphql-go/internal/common/values.go      |   78 ++
 .../graphql-go/internal/exec/exec.go          |  305 +++++
 .../graphql-go/internal/exec/packer/packer.go |  371 ++++++
 .../internal/exec/resolvable/meta.go          |   58 +
 .../internal/exec/resolvable/resolvable.go    |  331 +++++
 .../internal/exec/selected/selected.go        |  238 ++++
 .../graphql-go/internal/query/query.go        |  234 ++++
 .../graphql-go/internal/schema/meta.go        |  190 +++
 .../graphql-go/internal/schema/schema.go      |  570 +++++++++
 .../internal/validation/suggestion.go         |   71 ++
 .../internal/validation/validation.go         |  909 ++++++++++++++
 .../graph-gophers/graphql-go/introspection.go |  117 ++
 .../graphql-go/introspection/introspection.go |  313 +++++
 .../graph-gophers/graphql-go/log/log.go       |   23 +
 .../graph-gophers/graphql-go/relay/relay.go   |   70 ++
 .../graph-gophers/graphql-go/time.go          |   51 +
 .../graph-gophers/graphql-go/trace/trace.go   |   80 ++
 .../graphql-go/trace/validation_trace.go      |   17 +
 42 files changed, 6704 insertions(+), 39 deletions(-)
 create mode 100644 graphql/grahpql.go
 create mode 100644 graphql/graphiql.go
 create mode 100644 graphql/graphql_test.go
 create mode 100644 graphql/schema.go
 create mode 100644 vendor/github.com/graph-gophers/graphql-go/Gopkg.lock
 create mode 100644 vendor/github.com/graph-gophers/graphql-go/Gopkg.toml
 create mode 100644 vendor/github.com/graph-gophers/graphql-go/LICENSE
 create mode 100644 vendor/github.com/graph-gophers/graphql-go/README.md
 create mode 100644 vendor/github.com/graph-gophers/graphql-go/errors/errors.go
 create mode 100644 vendor/github.com/graph-gophers/graphql-go/graphql.go
 create mode 100644 vendor/github.com/graph-gophers/graphql-go/id.go
 create mode 100644 vendor/github.com/graph-gophers/graphql-go/internal/common/directive.go
 create mode 100644 vendor/github.com/graph-gophers/graphql-go/internal/common/lexer.go
 create mode 100644 vendor/github.com/graph-gophers/graphql-go/internal/common/literals.go
 create mode 100644 vendor/github.com/graph-gophers/graphql-go/internal/common/types.go
 create mode 100644 vendor/github.com/graph-gophers/graphql-go/internal/common/values.go
 create mode 100644 vendor/github.com/graph-gophers/graphql-go/internal/exec/exec.go
 create mode 100644 vendor/github.com/graph-gophers/graphql-go/internal/exec/packer/packer.go
 create mode 100644 vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/meta.go
 create mode 100644 vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/resolvable.go
 create mode 100644 vendor/github.com/graph-gophers/graphql-go/internal/exec/selected/selected.go
 create mode 100644 vendor/github.com/graph-gophers/graphql-go/internal/query/query.go
 create mode 100644 vendor/github.com/graph-gophers/graphql-go/internal/schema/meta.go
 create mode 100644 vendor/github.com/graph-gophers/graphql-go/internal/schema/schema.go
 create mode 100644 vendor/github.com/graph-gophers/graphql-go/internal/validation/suggestion.go
 create mode 100644 vendor/github.com/graph-gophers/graphql-go/internal/validation/validation.go
 create mode 100644 vendor/github.com/graph-gophers/graphql-go/introspection.go
 create mode 100644 vendor/github.com/graph-gophers/graphql-go/introspection/introspection.go
 create mode 100644 vendor/github.com/graph-gophers/graphql-go/log/log.go
 create mode 100644 vendor/github.com/graph-gophers/graphql-go/relay/relay.go
 create mode 100644 vendor/github.com/graph-gophers/graphql-go/time.go
 create mode 100644 vendor/github.com/graph-gophers/graphql-go/trace/trace.go
 create mode 100644 vendor/github.com/graph-gophers/graphql-go/trace/validation_trace.go

diff --git a/cmd/geth/config.go b/cmd/geth/config.go
index f1e281196..62eeef701 100644
--- a/cmd/geth/config.go
+++ b/cmd/geth/config.go
@@ -30,6 +30,7 @@ import (
 	"github.com/ethereum/go-ethereum/cmd/utils"
 	"github.com/ethereum/go-ethereum/dashboard"
 	"github.com/ethereum/go-ethereum/eth"
+	"github.com/ethereum/go-ethereum/graphql"
 	"github.com/ethereum/go-ethereum/node"
 	"github.com/ethereum/go-ethereum/params"
 	whisper "github.com/ethereum/go-ethereum/whisper/whisperv6"
@@ -176,6 +177,13 @@ func makeFullNode(ctx *cli.Context) *node.Node {
 		utils.RegisterShhService(stack, &cfg.Shh)
 	}
 
+	// Configure GraphQL if required
+	if ctx.GlobalIsSet(utils.GraphQLEnabledFlag.Name) {
+		if err := graphql.RegisterGraphQLService(stack, cfg.Node.GraphQLEndpoint(), cfg.Node.GraphQLCors, cfg.Node.GraphQLVirtualHosts, cfg.Node.HTTPTimeouts); err != nil {
+			utils.Fatalf("Failed to register the Ethereum service: %v", err)
+		}
+	}
+
 	// Add the Ethereum Stats daemon if requested.
 	if cfg.Ethstats.URL != "" {
 		utils.RegisterEthStatsService(stack, cfg.Ethstats.URL)
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index 97033c692..fb5ec20eb 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -141,6 +141,11 @@ var (
 		utils.RPCEnabledFlag,
 		utils.RPCListenAddrFlag,
 		utils.RPCPortFlag,
+		utils.GraphQLEnabledFlag,
+		utils.GraphQLListenAddrFlag,
+		utils.GraphQLPortFlag,
+		utils.GraphQLCORSDomainFlag,
+		utils.GraphQLVirtualHostsFlag,
 		utils.RPCApiFlag,
 		utils.WSEnabledFlag,
 		utils.WSListenAddrFlag,
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 60e45d095..33650685c 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -435,6 +435,30 @@ var (
 		Usage: "HTTP-RPC server listening port",
 		Value: node.DefaultHTTPPort,
 	}
+	GraphQLEnabledFlag = cli.BoolFlag{
+		Name:  "graphql",
+		Usage: "Enable the GraphQL server",
+	}
+	GraphQLListenAddrFlag = cli.StringFlag{
+		Name:  "graphql.addr",
+		Usage: "GraphQL server listening interface",
+		Value: node.DefaultGraphQLHost,
+	}
+	GraphQLPortFlag = cli.IntFlag{
+		Name:  "graphql.port",
+		Usage: "GraphQL server listening port",
+		Value: node.DefaultGraphQLPort,
+	}
+	GraphQLCORSDomainFlag = cli.StringFlag{
+		Name:  "graphql.rpccorsdomain",
+		Usage: "Comma separated list of domains from which to accept cross origin requests (browser enforced)",
+		Value: "",
+	}
+	GraphQLVirtualHostsFlag = cli.StringFlag{
+		Name:  "graphql.rpcvhosts",
+		Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.",
+		Value: strings.Join(node.DefaultConfig.HTTPVirtualHosts, ","),
+	}
 	RPCCORSDomainFlag = cli.StringFlag{
 		Name:  "rpccorsdomain",
 		Usage: "Comma separated list of domains from which to accept cross origin requests (browser enforced)",
@@ -796,6 +820,24 @@ func setHTTP(ctx *cli.Context, cfg *node.Config) {
 	}
 }
 
+// setGraphQL creates the GraphQL listener interface string from the set
+// command line flags, returning empty if the GraphQL endpoint is disabled.
+func setGraphQL(ctx *cli.Context, cfg *node.Config) {
+	if ctx.GlobalBool(GraphQLEnabledFlag.Name) && cfg.GraphQLHost == "" {
+		cfg.GraphQLHost = "127.0.0.1"
+		if ctx.GlobalIsSet(GraphQLListenAddrFlag.Name) {
+			cfg.GraphQLHost = ctx.GlobalString(GraphQLListenAddrFlag.Name)
+		}
+	}
+	cfg.GraphQLPort = ctx.GlobalInt(GraphQLPortFlag.Name)
+	if ctx.GlobalIsSet(GraphQLCORSDomainFlag.Name) {
+		cfg.GraphQLCors = splitAndTrim(ctx.GlobalString(GraphQLCORSDomainFlag.Name))
+	}
+	if ctx.GlobalIsSet(GraphQLVirtualHostsFlag.Name) {
+		cfg.GraphQLVirtualHosts = splitAndTrim(ctx.GlobalString(GraphQLVirtualHostsFlag.Name))
+	}
+}
+
 // setWS creates the WebSocket RPC listener interface string from the set
 // command line flags, returning empty if the HTTP endpoint is disabled.
 func setWS(ctx *cli.Context, cfg *node.Config) {
@@ -978,6 +1020,7 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
 	SetP2PConfig(ctx, &cfg.P2P)
 	setIPC(ctx, cfg)
 	setHTTP(ctx, cfg)
+	setGraphQL(ctx, cfg)
 	setWS(ctx, cfg)
 	setNodeUserIdent(ctx, cfg)
 
diff --git a/common/hexutil/json.go b/common/hexutil/json.go
index fbc21241c..777b08eca 100644
--- a/common/hexutil/json.go
+++ b/common/hexutil/json.go
@@ -72,6 +72,25 @@ func (b Bytes) String() string {
 	return Encode(b)
 }
 
+// ImplementsGraphQLType returns true if Bytes implements the specified GraphQL type.
+func (b Bytes) ImplementsGraphQLType(name string) bool { return name == "Bytes" }
+
+// UnmarshalGraphQL unmarshals the provided GraphQL query data.
+func (b *Bytes) UnmarshalGraphQL(input interface{}) error {
+	var err error
+	switch input := input.(type) {
+	case string:
+		data, err := Decode(input)
+		if err != nil {
+			return err
+		}
+		*b = data
+	default:
+		err = fmt.Errorf("Unexpected type for Bytes: %v", input)
+	}
+	return err
+}
+
 // UnmarshalFixedJSON decodes the input as a string with 0x prefix. The length of out
 // determines the required input length. This function is commonly used to implement the
 // UnmarshalJSON method for fixed-size types.
@@ -187,6 +206,25 @@ func (b *Big) String() string {
 	return EncodeBig(b.ToInt())
 }
 
+// ImplementsGraphQLType returns true if Big implements the provided GraphQL type.
+func (b Big) ImplementsGraphQLType(name string) bool { return name == "BigInt" }
+
+// UnmarshalGraphQL unmarshals the provided GraphQL query data.
+func (b *Big) UnmarshalGraphQL(input interface{}) error {
+	var err error
+	switch input := input.(type) {
+	case string:
+		return b.UnmarshalText([]byte(input))
+	case int32:
+		var num big.Int
+		num.SetInt64(int64(input))
+		*b = Big(num)
+	default:
+		err = fmt.Errorf("Unexpected type for BigInt: %v", input)
+	}
+	return err
+}
+
 // Uint64 marshals/unmarshals as a JSON string with 0x prefix.
 // The zero value marshals as "0x0".
 type Uint64 uint64
@@ -234,6 +272,23 @@ func (b Uint64) String() string {
 	return EncodeUint64(uint64(b))
 }
 
+// ImplementsGraphQLType returns true if Uint64 implements the provided GraphQL type.
+func (b Uint64) ImplementsGraphQLType(name string) bool { return name == "Long" }
+
+// UnmarshalGraphQL unmarshals the provided GraphQL query data.
+func (b *Uint64) UnmarshalGraphQL(input interface{}) error {
+	var err error
+	switch input := input.(type) {
+	case string:
+		return b.UnmarshalText([]byte(input))
+	case int32:
+		*b = Uint64(input)
+	default:
+		err = fmt.Errorf("Unexpected type for Long: %v", input)
+	}
+	return err
+}
+
 // Uint marshals/unmarshals as a JSON string with 0x prefix.
 // The zero value marshals as "0x0".
 type Uint uint
diff --git a/common/types.go b/common/types.go
index 0f4892d28..48043788f 100644
--- a/common/types.go
+++ b/common/types.go
@@ -141,6 +141,21 @@ func (h Hash) Value() (driver.Value, error) {
 	return h[:], nil
 }
 
+// ImplementsGraphQLType returns true if Hash implements the specified GraphQL type.
+func (_ Hash) ImplementsGraphQLType(name string) bool { return name == "Bytes32" }
+
+// UnmarshalGraphQL unmarshals the provided GraphQL query data.
+func (h *Hash) UnmarshalGraphQL(input interface{}) error {
+	var err error
+	switch input := input.(type) {
+	case string:
+		*h = HexToHash(input)
+	default:
+		err = fmt.Errorf("Unexpected type for Bytes32: %v", input)
+	}
+	return err
+}
+
 // UnprefixedHash allows marshaling a Hash without 0x prefix.
 type UnprefixedHash Hash
 
@@ -268,6 +283,21 @@ func (a Address) Value() (driver.Value, error) {
 	return a[:], nil
 }
 
+// ImplementsGraphQLType returns true if Hash implements the specified GraphQL type.
+func (a Address) ImplementsGraphQLType(name string) bool { return name == "Address" }
+
+// UnmarshalGraphQL unmarshals the provided GraphQL query data.
+func (a *Address) UnmarshalGraphQL(input interface{}) error {
+	var err error
+	switch input := input.(type) {
+	case string:
+		*a = HexToAddress(input)
+	default:
+		err = fmt.Errorf("Unexpected type for Address: %v", input)
+	}
+	return err
+}
+
 // UnprefixedAddress allows marshaling an Address without 0x prefix.
 type UnprefixedAddress Address
 
diff --git a/graphql/grahpql.go b/graphql/grahpql.go
new file mode 100644
index 000000000..1eca78956
--- /dev/null
+++ b/graphql/grahpql.go
@@ -0,0 +1,1104 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// Package graphql provides a GraphQL interface to Ethereum node data.
+package graphql
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"net/http"
+	"time"
+
+	"github.com/ethereum/go-ethereum"
+	"github.com/ethereum/go-ethereum/common"
+	"github.com/ethereum/go-ethereum/common/hexutil"
+	"github.com/ethereum/go-ethereum/core/rawdb"
+	"github.com/ethereum/go-ethereum/core/state"
+	"github.com/ethereum/go-ethereum/core/types"
+	"github.com/ethereum/go-ethereum/core/vm"
+	"github.com/ethereum/go-ethereum/eth"
+	"github.com/ethereum/go-ethereum/eth/filters"
+	"github.com/ethereum/go-ethereum/internal/ethapi"
+	"github.com/ethereum/go-ethereum/log"
+	"github.com/ethereum/go-ethereum/node"
+	"github.com/ethereum/go-ethereum/p2p"
+	"github.com/ethereum/go-ethereum/rlp"
+	"github.com/ethereum/go-ethereum/rpc"
+	graphqlgo "github.com/graph-gophers/graphql-go"
+	"github.com/graph-gophers/graphql-go/relay"
+)
+
+// Account represents an Ethereum account at a particular block.
+type Account struct {
+	backend     *eth.EthAPIBackend
+	address     common.Address
+	blockNumber rpc.BlockNumber
+}
+
+// getState fetches the StateDB object for an account.
+func (a *Account) getState(ctx context.Context) (*state.StateDB, error) {
+	state, _, err := a.backend.StateAndHeaderByNumber(ctx, a.blockNumber)
+	return state, err
+}
+
+func (a *Account) Address(ctx context.Context) (common.Address, error) {
+	return a.address, nil
+}
+
+func (a *Account) Balance(ctx context.Context) (hexutil.Big, error) {
+	state, err := a.getState(ctx)
+	if err != nil {
+		return hexutil.Big{}, err
+	}
+
+	return hexutil.Big(*state.GetBalance(a.address)), nil
+}
+
+func (a *Account) TransactionCount(ctx context.Context) (hexutil.Uint64, error) {
+	state, err := a.getState(ctx)
+	if err != nil {
+		return 0, err
+	}
+
+	return hexutil.Uint64(state.GetNonce(a.address)), nil
+}
+
+func (a *Account) Code(ctx context.Context) (hexutil.Bytes, error) {
+	state, err := a.getState(ctx)
+	if err != nil {
+		return hexutil.Bytes{}, err
+	}
+
+	return hexutil.Bytes(state.GetCode(a.address)), nil
+}
+
+func (a *Account) Storage(ctx context.Context, args struct{ Slot common.Hash }) (common.Hash, error) {
+	state, err := a.getState(ctx)
+	if err != nil {
+		return common.Hash{}, err
+	}
+
+	return state.GetState(a.address, args.Slot), nil
+}
+
+// Log represents an individual log message. All arguments are mandatory.
+type Log struct {
+	backend     *eth.EthAPIBackend
+	transaction *Transaction
+	log         *types.Log
+}
+
+func (l *Log) Transaction(ctx context.Context) *Transaction {
+	return l.transaction
+}
+
+func (l *Log) Account(ctx context.Context, args BlockNumberArgs) *Account {
+	return &Account{
+		backend:     l.backend,
+		address:     l.log.Address,
+		blockNumber: args.Number(),
+	}
+}
+
+func (l *Log) Index(ctx context.Context) int32 {
+	return int32(l.log.Index)
+}
+
+func (l *Log) Topics(ctx context.Context) []common.Hash {
+	return l.log.Topics
+}
+
+func (l *Log) Data(ctx context.Context) hexutil.Bytes {
+	return hexutil.Bytes(l.log.Data)
+}
+
+// Transactionn represents an Ethereum transaction.
+// backend and hash are mandatory; all others will be fetched when required.
+type Transaction struct {
+	backend *eth.EthAPIBackend
+	hash    common.Hash
+	tx      *types.Transaction
+	block   *Block
+	index   uint64
+}
+
+// resolve returns the internal transaction object, fetching it if needed.
+func (t *Transaction) resolve(ctx context.Context) (*types.Transaction, error) {
+	if t.tx == nil {
+		tx, blockHash, _, index := rawdb.ReadTransaction(t.backend.ChainDb(), t.hash)
+		if tx != nil {
+			t.tx = tx
+			t.block = &Block{
+				backend: t.backend,
+				hash:    blockHash,
+			}
+			t.index = index
+		} else {
+			t.tx = t.backend.GetPoolTransaction(t.hash)
+		}
+	}
+	return t.tx, nil
+}
+
+func (tx *Transaction) Hash(ctx context.Context) common.Hash {
+	return tx.hash
+}
+
+func (t *Transaction) InputData(ctx context.Context) (hexutil.Bytes, error) {
+	tx, err := t.resolve(ctx)
+	if err != nil || tx == nil {
+		return hexutil.Bytes{}, err
+	}
+	return hexutil.Bytes(tx.Data()), nil
+}
+
+func (t *Transaction) Gas(ctx context.Context) (hexutil.Uint64, error) {
+	tx, err := t.resolve(ctx)
+	if err != nil || tx == nil {
+		return 0, err
+	}
+	return hexutil.Uint64(tx.Gas()), nil
+}
+
+func (t *Transaction) GasPrice(ctx context.Context) (hexutil.Big, error) {
+	tx, err := t.resolve(ctx)
+	if err != nil || tx == nil {
+		return hexutil.Big{}, err
+	}
+	return hexutil.Big(*tx.GasPrice()), nil
+}
+
+func (t *Transaction) Value(ctx context.Context) (hexutil.Big, error) {
+	tx, err := t.resolve(ctx)
+	if err != nil || tx == nil {
+		return hexutil.Big{}, err
+	}
+	return hexutil.Big(*tx.Value()), nil
+}
+
+func (t *Transaction) Nonce(ctx context.Context) (hexutil.Uint64, error) {
+	tx, err := t.resolve(ctx)
+	if err != nil || tx == nil {
+		return 0, err
+	}
+	return hexutil.Uint64(tx.Nonce()), nil
+}
+
+func (t *Transaction) To(ctx context.Context, args BlockNumberArgs) (*Account, error) {
+	tx, err := t.resolve(ctx)
+	if err != nil || tx == nil {
+		return nil, err
+	}
+
+	to := tx.To()
+	if to == nil {
+		return nil, nil
+	}
+
+	return &Account{
+		backend:     t.backend,
+		address:     *to,
+		blockNumber: args.Number(),
+	}, nil
+}
+
+func (t *Transaction) From(ctx context.Context, args BlockNumberArgs) (*Account, error) {
+	tx, err := t.resolve(ctx)
+	if err != nil || tx == nil {
+		return nil, err
+	}
+
+	var signer types.Signer = types.FrontierSigner{}
+	if tx.Protected() {
+		signer = types.NewEIP155Signer(tx.ChainId())
+	}
+	from, _ := types.Sender(signer, tx)
+
+	return &Account{
+		backend:     t.backend,
+		address:     from,
+		blockNumber: args.Number(),
+	}, nil
+}
+
+func (t *Transaction) Block(ctx context.Context) (*Block, error) {
+	if _, err := t.resolve(ctx); err != nil {
+		return nil, err
+	}
+	return t.block, nil
+}
+
+func (t *Transaction) Index(ctx context.Context) (*int32, error) {
+	if _, err := t.resolve(ctx); err != nil {
+		return nil, err
+	}
+	if t.block == nil {
+		return nil, nil
+	}
+	index := int32(t.index)
+	return &index, nil
+}
+
+// getReceipt returns the receipt associated with this transaction, if any.
+func (t *Transaction) getReceipt(ctx context.Context) (*types.Receipt, error) {
+	if _, err := t.resolve(ctx); err != nil {
+		return nil, err
+	}
+
+	if t.block == nil {
+		return nil, nil
+	}
+
+	receipts, err := t.block.resolveReceipts(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	return receipts[t.index], nil
+}
+
+func (t *Transaction) Status(ctx context.Context) (*hexutil.Uint64, error) {
+	receipt, err := t.getReceipt(ctx)
+	if err != nil || receipt == nil {
+		return nil, err
+	}
+
+	ret := hexutil.Uint64(receipt.Status)
+	return &ret, nil
+}
+
+func (t *Transaction) GasUsed(ctx context.Context) (*hexutil.Uint64, error) {
+	receipt, err := t.getReceipt(ctx)
+	if err != nil || receipt == nil {
+		return nil, err
+	}
+
+	ret := hexutil.Uint64(receipt.GasUsed)
+	return &ret, nil
+}
+
+func (t *Transaction) CumulativeGasUsed(ctx context.Context) (*hexutil.Uint64, error) {
+	receipt, err := t.getReceipt(ctx)
+	if err != nil || receipt == nil {
+		return nil, err
+	}
+
+	ret := hexutil.Uint64(receipt.CumulativeGasUsed)
+	return &ret, nil
+}
+
+func (t *Transaction) CreatedContract(ctx context.Context, args BlockNumberArgs) (*Account, error) {
+	receipt, err := t.getReceipt(ctx)
+	if err != nil || receipt == nil || receipt.ContractAddress == (common.Address{}) {
+		return nil, err
+	}
+
+	return &Account{
+		backend:     t.backend,
+		address:     receipt.ContractAddress,
+		blockNumber: args.Number(),
+	}, nil
+}
+
+func (t *Transaction) Logs(ctx context.Context) (*[]*Log, error) {
+	receipt, err := t.getReceipt(ctx)
+	if err != nil || receipt == nil {
+		return nil, err
+	}
+
+	ret := make([]*Log, 0, len(receipt.Logs))
+	for _, log := range receipt.Logs {
+		ret = append(ret, &Log{
+			backend:     t.backend,
+			transaction: t,
+			log:         log,
+		})
+	}
+	return &ret, nil
+}
+
+// Block represennts an Ethereum block.
+// backend, and either num or hash are mandatory. All other fields are lazily fetched
+// when required.
+type Block struct {
+	backend  *eth.EthAPIBackend
+	num      *rpc.BlockNumber
+	hash     common.Hash
+	header   *types.Header
+	block    *types.Block
+	receipts []*types.Receipt
+}
+
+// resolve returns the internal Block object representing this block, fetching
+// it if necessary.
+func (b *Block) resolve(ctx context.Context) (*types.Block, error) {
+	if b.block != nil {
+		return b.block, nil
+	}
+
+	var err error
+	if b.hash != (common.Hash{}) {
+		b.block, err = b.backend.GetBlock(ctx, b.hash)
+	} else {
+		b.block, err = b.backend.BlockByNumber(ctx, *b.num)
+	}
+	if b.block != nil {
+		b.header = b.block.Header()
+	}
+	return b.block, err
+}
+
+// resolveHeader returns the internal Header object for this block, fetching it
+// if necessary. Call this function instead of `resolve` unless you need the
+// additional data (transactions and uncles).
+func (b *Block) resolveHeader(ctx context.Context) (*types.Header, error) {
+	if b.header == nil {
+		if _, err := b.resolve(ctx); err != nil {
+			return nil, err
+		}
+	}
+	return b.header, nil
+}
+
+// resolveReceipts returns the list of receipts for this block, fetching them
+// if necessary.
+func (b *Block) resolveReceipts(ctx context.Context) ([]*types.Receipt, error) {
+	if b.receipts == nil {
+		hash := b.hash
+		if hash == (common.Hash{}) {
+			header, err := b.resolveHeader(ctx)
+			if err != nil {
+				return nil, err
+			}
+			hash = header.Hash()
+		}
+
+		receipts, err := b.backend.GetReceipts(ctx, hash)
+		if err != nil {
+			return nil, err
+		}
+		b.receipts = []*types.Receipt(receipts)
+	}
+	return b.receipts, nil
+}
+
+func (b *Block) Number(ctx context.Context) (hexutil.Uint64, error) {
+	if b.num == nil || *b.num == rpc.LatestBlockNumber {
+		header, err := b.resolveHeader(ctx)
+		if err != nil {
+			return 0, err
+		}
+		num := rpc.BlockNumber(header.Number.Uint64())
+		b.num = &num
+	}
+	return hexutil.Uint64(*b.num), nil
+}
+
+func (b *Block) Hash(ctx context.Context) (common.Hash, error) {
+	if b.hash == (common.Hash{}) {
+		header, err := b.resolveHeader(ctx)
+		if err != nil {
+			return common.Hash{}, err
+		}
+		b.hash = header.Hash()
+	}
+	return b.hash, nil
+}
+
+func (b *Block) GasLimit(ctx context.Context) (hexutil.Uint64, error) {
+	header, err := b.resolveHeader(ctx)
+	if err != nil {
+		return 0, err
+	}
+	return hexutil.Uint64(header.GasLimit), nil
+}
+
+func (b *Block) GasUsed(ctx context.Context) (hexutil.Uint64, error) {
+	header, err := b.resolveHeader(ctx)
+	if err != nil {
+		return 0, err
+	}
+	return hexutil.Uint64(header.GasUsed), nil
+}
+
+func (b *Block) Parent(ctx context.Context) (*Block, error) {
+	// If the block hasn't been fetched, and we'll need it, fetch it.
+	if b.num == nil && b.hash != (common.Hash{}) && b.header == nil {
+		if _, err := b.resolve(ctx); err != nil {
+			return nil, err
+		}
+	}
+
+	if b.header != nil && b.block.NumberU64() > 0 {
+		num := rpc.BlockNumber(b.header.Number.Uint64() - 1)
+		return &Block{
+			backend: b.backend,
+			num:     &num,
+			hash:    b.header.ParentHash,
+		}, nil
+	} else if b.num != nil && *b.num != 0 {
+		num := *b.num - 1
+		return &Block{
+			backend: b.backend,
+			num:     &num,
+		}, nil
+	}
+	return nil, nil
+}
+
+func (b *Block) Difficulty(ctx context.Context) (hexutil.Big, error) {
+	header, err := b.resolveHeader(ctx)
+	if err != nil {
+		return hexutil.Big{}, err
+	}
+	return hexutil.Big(*header.Difficulty), nil
+}
+
+func (b *Block) Timestamp(ctx context.Context) (hexutil.Big, error) {
+	header, err := b.resolveHeader(ctx)
+	if err != nil {
+		return hexutil.Big{}, err
+	}
+	return hexutil.Big(*header.Time), nil
+}
+
+func (b *Block) Nonce(ctx context.Context) (hexutil.Bytes, error) {
+	header, err := b.resolveHeader(ctx)
+	if err != nil {
+		return hexutil.Bytes{}, err
+	}
+	return hexutil.Bytes(header.Nonce[:]), nil
+}
+
+func (b *Block) MixHash(ctx context.Context) (common.Hash, error) {
+	header, err := b.resolveHeader(ctx)
+	if err != nil {
+		return common.Hash{}, err
+	}
+	return header.MixDigest, nil
+}
+
+func (b *Block) TransactionsRoot(ctx context.Context) (common.Hash, error) {
+	header, err := b.resolveHeader(ctx)
+	if err != nil {
+		return common.Hash{}, err
+	}
+	return header.TxHash, nil
+}
+
+func (b *Block) StateRoot(ctx context.Context) (common.Hash, error) {
+	header, err := b.resolveHeader(ctx)
+	if err != nil {
+		return common.Hash{}, err
+	}
+	return header.Root, nil
+}
+
+func (b *Block) ReceiptsRoot(ctx context.Context) (common.Hash, error) {
+	header, err := b.resolveHeader(ctx)
+	if err != nil {
+		return common.Hash{}, err
+	}
+	return header.ReceiptHash, nil
+}
+
+func (b *Block) OmmerHash(ctx context.Context) (common.Hash, error) {
+	header, err := b.resolveHeader(ctx)
+	if err != nil {
+		return common.Hash{}, err
+	}
+	return header.UncleHash, nil
+}
+
+func (b *Block) OmmerCount(ctx context.Context) (*int32, error) {
+	block, err := b.resolve(ctx)
+	if err != nil || block == nil {
+		return nil, err
+	}
+	count := int32(len(block.Uncles()))
+	return &count, err
+}
+
+func (b *Block) Ommers(ctx context.Context) (*[]*Block, error) {
+	block, err := b.resolve(ctx)
+	if err != nil || block == nil {
+		return nil, err
+	}
+
+	ret := make([]*Block, 0, len(block.Uncles()))
+	for _, uncle := range block.Uncles() {
+		blockNumber := rpc.BlockNumber(uncle.Number.Uint64())
+		ret = append(ret, &Block{
+			backend: b.backend,
+			num:     &blockNumber,
+			hash:    uncle.Hash(),
+			header:  uncle,
+		})
+	}
+	return &ret, nil
+}
+
+func (b *Block) ExtraData(ctx context.Context) (hexutil.Bytes, error) {
+	header, err := b.resolveHeader(ctx)
+	if err != nil {
+		return hexutil.Bytes{}, err
+	}
+	return hexutil.Bytes(header.Extra), nil
+}
+
+func (b *Block) LogsBloom(ctx context.Context) (hexutil.Bytes, error) {
+	header, err := b.resolveHeader(ctx)
+	if err != nil {
+		return hexutil.Bytes{}, err
+	}
+	return hexutil.Bytes(header.Bloom.Bytes()), nil
+}
+
+func (b *Block) TotalDifficulty(ctx context.Context) (hexutil.Big, error) {
+	h := b.hash
+	if h == (common.Hash{}) {
+		header, err := b.resolveHeader(ctx)
+		if err != nil {
+			return hexutil.Big{}, err
+		}
+		h = header.Hash()
+	}
+
+	return hexutil.Big(*b.backend.GetTd(h)), nil
+}
+
+// BlockNumberArgs encapsulates arguments to accessors that specify a block number.
+type BlockNumberArgs struct {
+	Block *hexutil.Uint64
+}
+
+// Number returns the provided block number, or rpc.LatestBlockNumber if none
+// was provided.
+func (a BlockNumberArgs) Number() rpc.BlockNumber {
+	if a.Block != nil {
+		return rpc.BlockNumber(*a.Block)
+	}
+	return rpc.LatestBlockNumber
+}
+
+func (b *Block) Miner(ctx context.Context, args BlockNumberArgs) (*Account, error) {
+	block, err := b.resolve(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	return &Account{
+		backend:     b.backend,
+		address:     block.Coinbase(),
+		blockNumber: args.Number(),
+	}, nil
+}
+
+func (b *Block) TransactionCount(ctx context.Context) (*int32, error) {
+	block, err := b.resolve(ctx)
+	if err != nil || block == nil {
+		return nil, err
+	}
+	count := int32(len(block.Transactions()))
+	return &count, err
+}
+
+func (b *Block) Transactions(ctx context.Context) (*[]*Transaction, error) {
+	block, err := b.resolve(ctx)
+	if err != nil || block == nil {
+		return nil, err
+	}
+
+	ret := make([]*Transaction, 0, len(block.Transactions()))
+	for i, tx := range block.Transactions() {
+		ret = append(ret, &Transaction{
+			backend: b.backend,
+			hash:    tx.Hash(),
+			tx:      tx,
+			block:   b,
+			index:   uint64(i),
+		})
+	}
+	return &ret, nil
+}
+
+func (b *Block) TransactionAt(ctx context.Context, args struct{ Index int32 }) (*Transaction, error) {
+	block, err := b.resolve(ctx)
+	if err != nil || block == nil {
+		return nil, err
+	}
+
+	txes := block.Transactions()
+	if args.Index < 0 || int(args.Index) >= len(txes) {
+		return nil, nil
+	}
+
+	tx := txes[args.Index]
+	return &Transaction{
+		backend: b.backend,
+		hash:    tx.Hash(),
+		tx:      tx,
+		block:   b,
+		index:   uint64(args.Index),
+	}, nil
+}
+
+func (b *Block) OmmerAt(ctx context.Context, args struct{ Index int32 }) (*Block, error) {
+	block, err := b.resolve(ctx)
+	if err != nil || block == nil {
+		return nil, err
+	}
+
+	uncles := block.Uncles()
+	if args.Index < 0 || int(args.Index) >= len(uncles) {
+		return nil, nil
+	}
+
+	uncle := uncles[args.Index]
+	blockNumber := rpc.BlockNumber(uncle.Number.Uint64())
+	return &Block{
+		backend: b.backend,
+		num:     &blockNumber,
+		hash:    uncle.Hash(),
+		header:  uncle,
+	}, nil
+}
+
+// BlockFilterCriteria encapsulates criteria passed to a `logs` accessor inside
+// a block.
+type BlockFilterCriteria struct {
+	Addresses *[]common.Address // restricts matches to events created by specific contracts
+
+	// The Topic list restricts matches to particular event topics. Each event has a list
+	// of topics. Topics matches a prefix of that list. An empty element slice matches any
+	// topic. Non-empty elements represent an alternative that matches any of the
+	// contained topics.
+	//
+	// Examples:
+	// {} or nil          matches any topic list
+	// {{A}}              matches topic A in first position
+	// {{}, {B}}          matches any topic in first position, B in second position
+	// {{A}, {B}}         matches topic A in first position, B in second position
+	// {{A, B}}, {C, D}}  matches topic (A OR B) in first position, (C OR D) in second position
+	Topics *[][]common.Hash
+}
+
+// runFilter accepts a filter and executes it, returning all its results as
+// `Log` objects.
+func runFilter(ctx context.Context, be *eth.EthAPIBackend, filter *filters.Filter) ([]*Log, error) {
+	logs, err := filter.Logs(ctx)
+	if err != nil || logs == nil {
+		return nil, err
+	}
+
+	ret := make([]*Log, 0, len(logs))
+	for _, log := range logs {
+		ret = append(ret, &Log{
+			backend:     be,
+			transaction: &Transaction{backend: be, hash: log.TxHash},
+			log:         log,
+		})
+	}
+	return ret, nil
+}
+
+func (b *Block) Logs(ctx context.Context, args struct{ Filter BlockFilterCriteria }) ([]*Log, error) {
+	var addresses []common.Address
+	if args.Filter.Addresses != nil {
+		addresses = *args.Filter.Addresses
+	}
+
+	var topics [][]common.Hash
+	if args.Filter.Topics != nil {
+		topics = *args.Filter.Topics
+	}
+
+	hash := b.hash
+	if hash == (common.Hash{}) {
+		block, err := b.resolve(ctx)
+		if err != nil {
+			return nil, err
+		}
+		hash = block.Hash()
+	}
+
+	// Construct the range filter
+	filter := filters.NewBlockFilter(b.backend, hash, addresses, topics)
+
+	// Run the filter and return all the logs
+	return runFilter(ctx, b.backend, filter)
+}
+
+// Resolver is the top-level object in the GraphQL hierarchy.
+type Resolver struct {
+	backend *eth.EthAPIBackend
+}
+
+func (r *Resolver) Block(ctx context.Context, args struct {
+	Number *hexutil.Uint64
+	Hash   *common.Hash
+}) (*Block, error) {
+	var block *Block
+	if args.Number != nil {
+		num := rpc.BlockNumber(uint64(*args.Number))
+		block = &Block{
+			backend: r.backend,
+			num:     &num,
+		}
+	} else if args.Hash != nil {
+		block = &Block{
+			backend: r.backend,
+			hash:    *args.Hash,
+		}
+	} else {
+		num := rpc.LatestBlockNumber
+		block = &Block{
+			backend: r.backend,
+			num:     &num,
+		}
+	}
+
+	// Resolve the block; if it doesn't exist, return nil.
+	b, err := block.resolve(ctx)
+	if err != nil {
+		return nil, err
+	} else if b == nil {
+		return nil, nil
+	}
+	return block, nil
+}
+
+func (r *Resolver) Blocks(ctx context.Context, args struct {
+	From hexutil.Uint64
+	To   *hexutil.Uint64
+}) ([]*Block, error) {
+	from := rpc.BlockNumber(args.From)
+
+	var to rpc.BlockNumber
+	if args.To != nil {
+		to = rpc.BlockNumber(*args.To)
+	} else {
+		to = rpc.BlockNumber(r.backend.CurrentBlock().Number().Int64())
+	}
+
+	if to < from {
+		return []*Block{}, nil
+	}
+
+	ret := make([]*Block, 0, to-from+1)
+	for i := from; i <= to; i++ {
+		num := i
+		ret = append(ret, &Block{
+			backend: r.backend,
+			num:     &num,
+		})
+	}
+	return ret, nil
+}
+
+func (r *Resolver) Account(ctx context.Context, args struct {
+	Address     common.Address
+	BlockNumber *hexutil.Uint64
+}) *Account {
+	blockNumber := rpc.LatestBlockNumber
+	if args.BlockNumber != nil {
+		blockNumber = rpc.BlockNumber(*args.BlockNumber)
+	}
+
+	return &Account{
+		backend:     r.backend,
+		address:     args.Address,
+		blockNumber: blockNumber,
+	}
+}
+
+func (r *Resolver) Transaction(ctx context.Context, args struct{ Hash common.Hash }) (*Transaction, error) {
+	tx := &Transaction{
+		backend: r.backend,
+		hash:    args.Hash,
+	}
+
+	// Resolve the transaction; if it doesn't exist, return nil.
+	t, err := tx.resolve(ctx)
+	if err != nil {
+		return nil, err
+	} else if t == nil {
+		return nil, nil
+	}
+	return tx, nil
+}
+
+func (r *Resolver) SendRawTransaction(ctx context.Context, args struct{ Data hexutil.Bytes }) (common.Hash, error) {
+	tx := new(types.Transaction)
+	if err := rlp.DecodeBytes(args.Data, tx); err != nil {
+		return common.Hash{}, err
+	}
+	hash, err := ethapi.SubmitTransaction(ctx, r.backend, tx)
+	return hash, err
+}
+
+// CallData encapsulates arguments to `call` or `estimateGas`.
+// All arguments are optional.
+type CallData struct {
+	From     *common.Address // The Ethereum address the call is from.
+	To       *common.Address // The Ethereum address the call is to.
+	Gas      *hexutil.Uint64 // The amount of gas provided for the call.
+	GasPrice *hexutil.Big    // The price of each unit of gas, in wei.
+	Value    *hexutil.Big    // The value sent along with the call.
+	Data     *hexutil.Bytes  // Any data sent with the call.
+}
+
+// CallResult encapsulates the result of an invocation of the `call` accessor.
+type CallResult struct {
+	data    hexutil.Bytes  // The return data from the call
+	gasUsed hexutil.Uint64 // The amount of gas used
+	status  hexutil.Uint64 // The return status of the call - 0 for failure or 1 for success.
+}
+
+func (c *CallResult) Data() hexutil.Bytes {
+	return c.data
+}
+
+func (c *CallResult) GasUsed() hexutil.Uint64 {
+	return c.gasUsed
+}
+
+func (c *CallResult) Status() hexutil.Uint64 {
+	return c.status
+}
+
+func (r *Resolver) Call(ctx context.Context, args struct {
+	Data        ethapi.CallArgs
+	BlockNumber *hexutil.Uint64
+}) (*CallResult, error) {
+	blockNumber := rpc.LatestBlockNumber
+	if args.BlockNumber != nil {
+		blockNumber = rpc.BlockNumber(*args.BlockNumber)
+	}
+
+	result, gas, failed, err := ethapi.DoCall(ctx, r.backend, args.Data, blockNumber, vm.Config{}, 5*time.Second)
+	status := hexutil.Uint64(1)
+	if failed {
+		status = 0
+	}
+	return &CallResult{
+		data:    hexutil.Bytes(result),
+		gasUsed: hexutil.Uint64(gas),
+		status:  status,
+	}, err
+}
+
+func (r *Resolver) EstimateGas(ctx context.Context, args struct {
+	Data        ethapi.CallArgs
+	BlockNumber *hexutil.Uint64
+}) (hexutil.Uint64, error) {
+	blockNumber := rpc.LatestBlockNumber
+	if args.BlockNumber != nil {
+		blockNumber = rpc.BlockNumber(*args.BlockNumber)
+	}
+
+	gas, err := ethapi.DoEstimateGas(ctx, r.backend, args.Data, blockNumber)
+	return gas, err
+}
+
+// FilterCritera encapsulates the arguments to `logs` on the root resolver object.
+type FilterCriteria struct {
+	FromBlock *hexutil.Uint64   // beginning of the queried range, nil means genesis block
+	ToBlock   *hexutil.Uint64   // end of the range, nil means latest block
+	Addresses *[]common.Address // restricts matches to events created by specific contracts
+
+	// The Topic list restricts matches to particular event topics. Each event has a list
+	// of topics. Topics matches a prefix of that list. An empty element slice matches any
+	// topic. Non-empty elements represent an alternative that matches any of the
+	// contained topics.
+	//
+	// Examples:
+	// {} or nil          matches any topic list
+	// {{A}}              matches topic A in first position
+	// {{}, {B}}          matches any topic in first position, B in second position
+	// {{A}, {B}}         matches topic A in first position, B in second position
+	// {{A, B}}, {C, D}}  matches topic (A OR B) in first position, (C OR D) in second position
+	Topics *[][]common.Hash
+}
+
+func (r *Resolver) Logs(ctx context.Context, args struct{ Filter FilterCriteria }) ([]*Log, error) {
+	// Convert the RPC block numbers into internal representations
+	begin := rpc.LatestBlockNumber.Int64()
+	if args.Filter.FromBlock != nil {
+		begin = int64(*args.Filter.FromBlock)
+	}
+	end := rpc.LatestBlockNumber.Int64()
+	if args.Filter.ToBlock != nil {
+		end = int64(*args.Filter.ToBlock)
+	}
+
+	var addresses []common.Address
+	if args.Filter.Addresses != nil {
+		addresses = *args.Filter.Addresses
+	}
+
+	var topics [][]common.Hash
+	if args.Filter.Topics != nil {
+		topics = *args.Filter.Topics
+	}
+
+	// Construct the range filter
+	filter := filters.NewRangeFilter(filters.Backend(r.backend), begin, end, addresses, topics)
+
+	return runFilter(ctx, r.backend, filter)
+}
+
+func (r *Resolver) GasPrice(ctx context.Context) (hexutil.Big, error) {
+	price, err := r.backend.SuggestPrice(ctx)
+	return hexutil.Big(*price), err
+}
+
+func (r *Resolver) ProtocolVersion(ctx context.Context) (int32, error) {
+	return int32(r.backend.ProtocolVersion()), nil
+}
+
+// SyncState represents the synchronisation status returned from the `syncing` accessor.
+type SyncState struct {
+	progress ethereum.SyncProgress
+}
+
+func (s *SyncState) StartingBlock() hexutil.Uint64 {
+	return hexutil.Uint64(s.progress.StartingBlock)
+}
+
+func (s *SyncState) CurrentBlock() hexutil.Uint64 {
+	return hexutil.Uint64(s.progress.CurrentBlock)
+}
+
+func (s *SyncState) HighestBlock() hexutil.Uint64 {
+	return hexutil.Uint64(s.progress.HighestBlock)
+}
+
+func (s *SyncState) PulledStates() *hexutil.Uint64 {
+	ret := hexutil.Uint64(s.progress.PulledStates)
+	return &ret
+}
+
+func (s *SyncState) KnownStates() *hexutil.Uint64 {
+	ret := hexutil.Uint64(s.progress.KnownStates)
+	return &ret
+}
+
+// Syncing returns false in case the node is currently not syncing with the network. It can be up to date or has not
+// yet received the latest block headers from its pears. In case it is synchronizing:
+// - startingBlock: block number this node started to synchronise from
+// - currentBlock:  block number this node is currently importing
+// - highestBlock:  block number of the highest block header this node has received from peers
+// - pulledStates:  number of state entries processed until now
+// - knownStates:   number of known state entries that still need to be pulled
+func (r *Resolver) Syncing() (*SyncState, error) {
+	progress := r.backend.Downloader().Progress()
+
+	// Return not syncing if the synchronisation already completed
+	if progress.CurrentBlock >= progress.HighestBlock {
+		return nil, nil
+	}
+	// Otherwise gather the block sync stats
+	return &SyncState{progress}, nil
+}
+
+// NewHandler returns a new `http.Handler` that will answer GraphQL queries.
+// It additionally exports an interactive query browser on the / endpoint.
+func NewHandler(be *eth.EthAPIBackend) (http.Handler, error) {
+	q := Resolver{be}
+
+	s, err := graphqlgo.ParseSchema(schema, &q)
+	if err != nil {
+		return nil, err
+	}
+	h := &relay.Handler{Schema: s}
+
+	mux := http.NewServeMux()
+	mux.Handle("/", GraphiQL{})
+	mux.Handle("/graphql", h)
+	mux.Handle("/graphql/", h)
+	return mux, nil
+}
+
+// Service encapsulates a GraphQL service.
+type Service struct {
+	endpoint string             // The host:port endpoint for this service.
+	cors     []string           // Allowed CORS domains
+	vhosts   []string           // Recognised vhosts
+	timeouts rpc.HTTPTimeouts   // Timeout settings for HTTP requests.
+	backend  *eth.EthAPIBackend // The backend that queries will operate onn.
+	handler  http.Handler       // The `http.Handler` used to answer queries.
+	listener net.Listener       // The listening socket.
+}
+
+// Protocols returns the list of protocols exported by this service.
+func (s *Service) Protocols() []p2p.Protocol { return nil }
+
+// APIs returns the list of APIs exported by this service.
+func (s *Service) APIs() []rpc.API { return nil }
+
+// Start is called after all services have been constructed and the networking
+// layer was also initialized to spawn any goroutines required by the service.
+func (s *Service) Start(server *p2p.Server) error {
+	var err error
+	s.handler, err = NewHandler(s.backend)
+	if err != nil {
+		return err
+	}
+
+	if s.listener, err = net.Listen("tcp", s.endpoint); err != nil {
+		return err
+	}
+
+	go rpc.NewHTTPServer(s.cors, s.vhosts, s.timeouts, s.handler).Serve(s.listener)
+	log.Info("GraphQL endpoint opened", "url", fmt.Sprintf("http://%s", s.endpoint))
+	return nil
+}
+
+// Stop terminates all goroutines belonging to the service, blocking until they
+// are all terminated.
+func (s *Service) Stop() error {
+	if s.listener != nil {
+		s.listener.Close()
+		s.listener = nil
+		log.Info("GraphQL endpoint closed", "url", fmt.Sprintf("http://%s", s.endpoint))
+	}
+	return nil
+}
+
+// NewService constructs a new service instance.
+func NewService(backend *eth.EthAPIBackend, endpoint string, cors, vhosts []string, timeouts rpc.HTTPTimeouts) (*Service, error) {
+	return &Service{
+		endpoint: endpoint,
+		cors:     cors,
+		vhosts:   vhosts,
+		timeouts: timeouts,
+		backend:  backend,
+	}, nil
+}
+
+// RegisterGraphQLService is a utility function to construct a new service and register it against a node.
+func RegisterGraphQLService(stack *node.Node, endpoint string, cors, vhosts []string, timeouts rpc.HTTPTimeouts) error {
+	return stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
+		var ethereum *eth.Ethereum
+		if err := ctx.Service(&ethereum); err != nil {
+			return nil, err
+		}
+		return NewService(ethereum.APIBackend, endpoint, cors, vhosts, timeouts)
+	})
+}
diff --git a/graphql/graphiql.go b/graphql/graphiql.go
new file mode 100644
index 000000000..6d9dda3e8
--- /dev/null
+++ b/graphql/graphiql.go
@@ -0,0 +1,95 @@
+// The MIT License (MIT)
+//
+// Copyright (c) 2016 Muhammed Thanish
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package graphql
+
+import (
+	"bytes"
+	"fmt"
+	"net/http"
+)
+
+// GraphiQL is an in-browser IDE for exploring GraphiQL APIs.
+// This handler returns GraphiQL when requested.
+//
+// For more information, see https://github.com/graphql/graphiql.
+type GraphiQL struct{}
+
+func respond(w http.ResponseWriter, body []byte, code int) {
+	w.Header().Set("Content-Type", "application/json; charset=utf-8")
+	w.Header().Set("X-Content-Type-Options", "nosniff")
+	w.WriteHeader(code)
+	_, _ = w.Write(body)
+}
+
+func errorJSON(msg string) []byte {
+	buf := bytes.Buffer{}
+	fmt.Fprintf(&buf, `{"error": "%s"}`, msg)
+	return buf.Bytes()
+}
+
+func (h GraphiQL) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	if r.Method != "GET" {
+		respond(w, errorJSON("only GET requests are supported"), http.StatusMethodNotAllowed)
+		return
+	}
+
+	w.Write(graphiql)
+}
+
+var graphiql = []byte(`
+<!DOCTYPE html>
+<html>
+	<head>
+		<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/graphiql/0.11.11/graphiql.css"/>
+		<script src="https://cdnjs.cloudflare.com/ajax/libs/fetch/2.0.3/fetch.min.js"></script>
+		<script src="https://cdnjs.cloudflare.com/ajax/libs/react/16.2.0/umd/react.production.min.js"></script>
+		<script src="https://cdnjs.cloudflare.com/ajax/libs/react-dom/16.2.0/umd/react-dom.production.min.js"></script>
+		<script src="https://cdnjs.cloudflare.com/ajax/libs/graphiql/0.11.11/graphiql.min.js"></script>
+	</head>
+	<body style="width: 100%; height: 100%; margin: 0; overflow: hidden;">
+		<div id="graphiql" style="height: 100vh;">Loading...</div>
+		<script>
+			function fetchGQL(params) {
+				return fetch("/graphql", {
+					method: "post",
+					body: JSON.stringify(params),
+					credentials: "include",
+				}).then(function (resp) {
+					return resp.text();
+				}).then(function (body) {
+					try {
+						return JSON.parse(body);
+					} catch (error) {
+						return body;
+					}
+				});
+			}
+
+			ReactDOM.render(
+				React.createElement(GraphiQL, {fetcher: fetchGQL}),
+				document.getElementById("graphiql")
+			)
+		</script>
+	</body>
+</html>
+`)
diff --git a/graphql/graphql_test.go b/graphql/graphql_test.go
new file mode 100644
index 000000000..d63418398
--- /dev/null
+++ b/graphql/graphql_test.go
@@ -0,0 +1,29 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package graphql
+
+import (
+	"testing"
+)
+
+func TestBuildSchema(t *testing.T) {
+	// Make sure the schema can be parsed and matched up to the object model.
+	_, err := NewHandler(nil)
+	if err != nil {
+		t.Errorf("Could not construct GraphQL handler: %v", err)
+	}
+}
diff --git a/graphql/schema.go b/graphql/schema.go
new file mode 100644
index 000000000..c1ba87d2d
--- /dev/null
+++ b/graphql/schema.go
@@ -0,0 +1,305 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package graphql
+
+const schema string = `
+    # Bytes32 is a 32 byte binary string, represented as 0x-prefixed hexadecimal.
+    scalar Bytes32
+    # Address is a 20 byte Ethereum address, represented as 0x-prefixed hexadecimal.
+    scalar Address
+    # Bytes is an arbitrary length binary string, represented as 0x-prefixed hexadecimal.
+    scalar Bytes
+    # BigInt is a large integer. Input is accepted as either a JSON number or as a string.
+    # Strings may be either decimal or 0x-prefixed hexadecimal. Output values are all
+    # 0x-prefixed hexadecimal.
+    scalar BigInt
+    # Long is a 64 bit unsigned integer.
+    scalar Long
+
+    schema {
+        query: Query
+        mutation: Mutation
+    }
+
+    # Account is an Ethereum account at a particular block.
+    type Account {
+        # Address is the address owning the account.
+        address: Address!
+        # Balance is the balance of the account, in wei.
+        balance: BigInt!
+        # TransactionCount is the number of transactions sent from this account,
+        # or in the case of a contract, the number of contracts created. Otherwise
+        # known as the nonce.
+        transactionCount: Long!
+        # Code contains the smart contract code for this account, if the account
+        # is a (non-self-destructed) contract.
+        code: Bytes!
+        # Storage provides access to the storage of a contract account, indexed
+        # by its 32 byte slot identifier.
+        storage(slot: Bytes32!): Bytes32!
+    }
+
+    # Log is an Ethereum event log.
+    type Log {
+        # Index is the index of this log in the block.
+        index: Int!
+        # Account is the account which generated this log - this will always
+        # be a contract account.
+        account(block: Long): Account!
+        # Topics is a list of 0-4 indexed topics for the log.
+        topics: [Bytes32!]!
+        # Data is unindexed data for this log.
+        data: Bytes!
+        # Transaction is the transaction that generated this log entry.
+        transaction: Transaction!
+    }
+
+    # Transaction is an Ethereum transaction.
+    type Transaction {
+        # Hash is the hash of this transaction.
+        hash: Bytes32!
+        # Nonce is the nonce of the account this transaction was generated with.
+        nonce: Long!
+        # Index is the index of this transaction in the parent block. This will
+        # be null if the transaction has not yet beenn mined.
+        index: Int
+        # From is the account that sent this transaction - this will always be
+        # an externally owned account.
+        from(block: Long): Account!
+        # To is the account the transaction was sent to. This is null for
+        # contract-creating transactions.
+        to(block: Long): Account
+        # Value is the value, in wei, sent along with this transaction.
+        value: BigInt!
+        # GasPrice is the price offered to miners for gas, in wei per unit.
+        gasPrice: BigInt!
+        # Gas is the maximum amount of gas this transaction can consume.
+        gas: Long!
+        # InputData is the data supplied to the target of the transaction.
+        inputData: Bytes!
+        # Block is the block this transaction was mined in. This will be null if
+        # the transaction has not yet been mined.
+        block: Block
+
+        # Status is the return status of the transaction. This will be 1 if the
+        # transaction succeeded, or 0 if it failed (due to a revert, or due to
+        # running out of gas). If the transaction has not yet been mined, this
+        # field will be null.
+        status: Long
+        # GasUsed is the amount of gas that was used processing this transaction.
+        # If the transaction has not yet been mined, this field will be null.
+        gasUsed: Long
+        # CumulativeGasUsed is the total gas used in the block up to and including
+        # this transaction. If the transaction has not yet been mined, this field
+        # will be null.
+        cumulativeGasUsed: Long
+        # CreatedContract is the account that was created by a contract creation
+        # transaction. If the transaction was not a contract creation transaction,
+        # or it has not yet been mined, this field will be null.
+        createdContract(block: Long): Account
+        # Logs is a list of log entries emitted by this transaction. If the
+        # transaction has not yet been mined, this field will be null.
+        logs: [Log!]
+    }
+
+    # BlockFilterCriteria encapsulates log filter criteria for a filter applied
+    # to a single block.
+    input BlockFilterCriteria {
+        # Addresses is list of addresses that are of interest. If this list is
+        # empty, results will not be filtered by address.
+        addresses: [Address!]
+        # Topics list restricts matches to particular event topics. Each event has a list
+    	# of topics. Topics matches a prefix of that list. An empty element array matches any
+    	# topic. Non-empty elements represent an alternative that matches any of the
+    	# contained topics.
+    	#
+    	# Examples:
+    	#  - [] or nil          matches any topic list
+    	#  - [[A]]              matches topic A in first position
+    	#  - [[], [B]]          matches any topic in first position, B in second position
+    	#  - [[A], [B]]         matches topic A in first position, B in second position
+    	#  - [[A, B]], [C, D]]  matches topic (A OR B) in first position, (C OR D) in second position
+        topics: [[Bytes32!]!]
+    }
+
+    # Block is an Ethereum block.
+    type Block {
+        # Number is the number of this block, starting at 0 for the genesis block.
+        number: Long!
+        # Hash is the block hash of this block.
+        hash: Bytes32!
+        # Parent is the parent block of this block.
+        parent: Block
+        # Nonce is the block nonce, an 8 byte sequence determined by the miner.
+        nonce: Bytes!
+        # TransactionsRoot is the keccak256 hash of the root of the trie of transactions in this block.
+        transactionsRoot: Bytes32!
+        # TransactionCount is the number of transactions in this block. if
+        # transactions are not available for this block, this field will be null.
+        transactionCount: Int
+        # StateRoot is the keccak256 hash of the state trie after this block was processed.
+        stateRoot: Bytes32!
+        # ReceiptsRoot is the keccak256 hash of the trie of transaction receipts in this block.
+        receiptsRoot: Bytes32!
+        # Miner is the account that mined this block.
+        miner(block: Long): Account!
+        # ExtraData is an arbitrary data field supplied by the miner.
+        extraData: Bytes!
+        # GasLimit is the maximum amount of gas that was available to transactions in this block.
+        gasLimit: Long!
+        # GasUsed is the amount of gas that was used executing transactions in this block.
+        gasUsed: Long!
+        # Timestamp is the unix timestamp at which this block was mined.
+        timestamp: BigInt!
+        # LogsBloom is a bloom filter that can be used to check if a block may
+        # contain log entries matching a filter.
+        logsBloom: Bytes!
+        # MixHash is the hash that was used as an input to the PoW process.
+        mixHash: Bytes32!
+        # Difficulty is a measure of the difficulty of mining this block.
+        difficulty: BigInt!
+        # TotalDifficulty is the sum of all difficulty values up to and including
+        # this block.
+        totalDifficulty: BigInt!
+        # OmmerCount is the number of ommers (AKA uncles) associated with this
+        # block. If ommers are unavailable, this field will be null.
+        ommerCount: Int
+        # Ommers is a list of ommer (AKA uncle) blocks associated with this block.
+        # If ommers are unavailable, this field will be null. Depending on your
+        # node, the transactions, transactionAt, transactionCount, ommers,
+        # ommerCount and ommerAt fields may not be available on any ommer blocks.
+        ommers: [Block]
+        # OmmerAt returns the ommer (AKA uncle) at the specified index. If ommers
+        # are unavailable, or the index is out of bounds, this field will be null.
+        ommerAt(index: Int!): Block
+        # OmmerHash is the keccak256 hash of all the ommers (AKA uncles)
+        # associated with this block.
+        ommerHash: Bytes32!
+        # Transactions is a list of transactions associated with this block. If
+        # transactions are unavailable for this block, this field will be null.
+        transactions: [Transaction!]
+        # TransactionAt returns the transaction at the specified index. If
+        # transactions are unavailable for this block, or if the index is out of
+        # bounds, this field will be null.
+        transactionAt(index: Int!): Transaction
+        # Logs returns a filtered set of logs from this block.
+        logs(filter: BlockFilterCriteria!): [Log!]!
+    }
+
+    # CallData represents the data associated with a local contract call.
+    # All fields are optional.
+    input CallData {
+        # From is the address making the call.
+        from: Address
+        # To is the address the call is sent to.
+        to: Address
+        # Gas is the amount of gas sent with the call.
+        gas: Long
+        # GasPrice is the price, in wei, offered for each unit of gas.
+        gasPrice: BigInt
+        # Value is the value, in wei, sent along with the call.
+        value: BigInt
+        # Data is the data sent to the callee.
+        data: Bytes
+    }
+
+    # CallResult is the result of a local call operationn.
+    type CallResult {
+        # Data is the return data of the called contract.
+        data: Bytes!
+        # GasUsed is the amount of gas used by the call, after any refunds.
+        gasUsed: Long!
+        # Status is the result of the call - 1 for success or 0 for failure.
+        status: Long!
+    }
+
+    # FilterCriteria encapsulates log filter criteria for searching log entries.
+    input FilterCriteria {
+        # FromBlock is the block at which to start searching, inclusive. Defaults
+        # to the latest block if not supplied.
+        fromBlock: Long
+        # ToBlock is the block at which to stop searching, inclusive. Defaults
+        # to the latest block if not supplied.
+        toBlock: Long
+        # Addresses is a list of addresses that are of interest. If this list is
+        # empty, results will not be filtered by address.
+        addresses: [Address!]
+        # Topics list restricts matches to particular event topics. Each event has a list
+    	# of topics. Topics matches a prefix of that list. An empty element array matches any
+    	# topic. Non-empty elements represent an alternative that matches any of the
+    	# contained topics.
+    	#
+    	# Examples:
+    	#  - [] or nil          matches any topic list
+    	#  - [[A]]              matches topic A in first position
+    	#  - [[], [B]]          matches any topic in first position, B in second position
+    	#  - [[A], [B]]         matches topic A in first position, B in second position
+    	#  - [[A, B]], [C, D]]  matches topic (A OR B) in first position, (C OR D) in second position
+        topics: [[Bytes32!]!]
+    }
+
+    # SyncState contains the current synchronisation state of the client.
+    type SyncState{
+        # StartingBlock is the block number at which synchronisation started.
+        startingBlock: Long!
+        # CurrentBlock is the point at which synchronisation has presently reached.
+        currentBlock: Long!
+        # HighestBlock is the latest known block number.
+        highestBlock: Long!
+        # PulledStates is the number of state entries fetched so far, or null
+        # if this is not known or not relevant.
+        pulledStates: Long
+        # KnownStates is the number of states the node knows of so far, or null
+        # if this is not known or not relevant.
+        knownStates: Long
+    }
+
+    type Query {
+        # Account fetches an Ethereum account at the specified block number.
+        # If blockNumber is not provided, it defaults to the most recent block.
+        account(address: Address!, blockNumber: Long): Account!
+        # Block fetches an Ethereum block by number or by hash. If neither is
+        # supplied, the most recent known block is returned.
+        block(number: Long, hash: Bytes32): Block
+        # Blocks returns all the blocks between two numbers, inclusive. If
+        # to is not supplied, it defaults to the most recent known block.
+        blocks(from: Long!, to: Long): [Block!]!
+        # Transaction returns a transaction specified by its hash.
+        transaction(hash: Bytes32!): Transaction
+        # Call executes a local call operation. If blockNumber is not specified,
+        # it defaults to the most recent known block.
+        call(data: CallData!, blockNumber: Long): CallResult
+        # EstimateGas estimates the amount of gas that will be required for
+        # successful execution of a transaction. If blockNumber is not specified,
+        # it defaults ot the most recent known block.
+        estimateGas(data: CallData!, blockNumber: Long): Long!
+        # Logs returns log entries matching the provided filter.
+        logs(filter: FilterCriteria!): [Log!]!
+        # GasPrice returns the node's estimate of a gas price sufficient to
+        # ensure a transaction is mined in a timely fashion.
+        gasPrice: BigInt!
+        # ProtocolVersion returns the current wire protocol version number.
+        protocolVersion: Int!
+        # Syncing returns information on the current synchronisation state.
+        syncing: SyncState
+    }
+
+    type Mutation {
+        # SendRawTransaction sends an RLP-encoded transaction to the network.
+        sendRawTransaction(data: Bytes!): Bytes32!
+    }
+`
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index 73b629bd9..26dc1e8a0 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -378,7 +378,7 @@ func (s *PrivateAccountAPI) SendTransaction(ctx context.Context, args SendTxArgs
 		log.Warn("Failed transaction send attempt", "from", args.From, "to", args.To, "value", args.Value.ToInt(), "err", err)
 		return common.Hash{}, err
 	}
-	return submitTransaction(ctx, s.b, signed)
+	return SubmitTransaction(ctx, s.b, signed)
 }
 
 // SignTransaction will create a transaction from the given arguments and
@@ -675,41 +675,54 @@ func (s *PublicBlockChainAPI) GetStorageAt(ctx context.Context, address common.A
 
 // CallArgs represents the arguments for a call.
 type CallArgs struct {
-	From     common.Address  `json:"from"`
+	From     *common.Address `json:"from"`
 	To       *common.Address `json:"to"`
-	Gas      hexutil.Uint64  `json:"gas"`
-	GasPrice hexutil.Big     `json:"gasPrice"`
-	Value    hexutil.Big     `json:"value"`
-	Data     hexutil.Bytes   `json:"data"`
+	Gas      *hexutil.Uint64 `json:"gas"`
+	GasPrice *hexutil.Big    `json:"gasPrice"`
+	Value    *hexutil.Big    `json:"value"`
+	Data     *hexutil.Bytes  `json:"data"`
 }
 
-func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber, timeout time.Duration) ([]byte, uint64, bool, error) {
+func DoCall(ctx context.Context, b Backend, args CallArgs, blockNr rpc.BlockNumber, vmCfg vm.Config, timeout time.Duration) ([]byte, uint64, bool, error) {
 	defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now())
 
-	state, header, err := s.b.StateAndHeaderByNumber(ctx, blockNr)
+	state, header, err := b.StateAndHeaderByNumber(ctx, blockNr)
 	if state == nil || err != nil {
 		return nil, 0, false, err
 	}
 	// Set sender address or use a default if none specified
-	addr := args.From
-	if addr == (common.Address{}) {
-		if wallets := s.b.AccountManager().Wallets(); len(wallets) > 0 {
+	var addr common.Address
+	if args.From == nil {
+		if wallets := b.AccountManager().Wallets(); len(wallets) > 0 {
 			if accounts := wallets[0].Accounts(); len(accounts) > 0 {
 				addr = accounts[0].Address
 			}
 		}
+	} else {
+		addr = *args.From
 	}
 	// Set default gas & gas price if none were set
-	gas, gasPrice := uint64(args.Gas), args.GasPrice.ToInt()
-	if gas == 0 {
-		gas = math.MaxUint64 / 2
+	gas := uint64(math.MaxUint64 / 2)
+	if args.Gas != nil {
+		gas = uint64(*args.Gas)
 	}
-	if gasPrice.Sign() == 0 {
-		gasPrice = new(big.Int).SetUint64(defaultGasPrice)
+	gasPrice := new(big.Int).SetUint64(defaultGasPrice)
+	if args.GasPrice != nil {
+		gasPrice = args.GasPrice.ToInt()
+	}
+
+	value := new(big.Int)
+	if args.Value != nil {
+		value = args.Value.ToInt()
+	}
+
+	var data []byte
+	if args.Data != nil {
+		data = []byte(*args.Data)
 	}
 
 	// Create new call message
-	msg := types.NewMessage(addr, args.To, 0, args.Value.ToInt(), gas, gasPrice, args.Data, false)
+	msg := types.NewMessage(addr, args.To, 0, value, gas, gasPrice, data, false)
 
 	// Setup context so it may be cancelled the call has completed
 	// or, in case of unmetered gas, setup a context with a timeout.
@@ -724,7 +737,7 @@ func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr
 	defer cancel()
 
 	// Get a new instance of the EVM.
-	evm, vmError, err := s.b.GetEVM(ctx, msg, state, header)
+	evm, vmError, err := b.GetEVM(ctx, msg, state, header)
 	if err != nil {
 		return nil, 0, false, err
 	}
@@ -748,24 +761,22 @@ func (s *PublicBlockChainAPI) doCall(ctx context.Context, args CallArgs, blockNr
 // Call executes the given transaction on the state for the given block number.
 // It doesn't make and changes in the state/blockchain and is useful to execute and retrieve values.
 func (s *PublicBlockChainAPI) Call(ctx context.Context, args CallArgs, blockNr rpc.BlockNumber) (hexutil.Bytes, error) {
-	result, _, _, err := s.doCall(ctx, args, blockNr, 5*time.Second)
+	result, _, _, err := DoCall(ctx, s.b, args, blockNr, vm.Config{}, 5*time.Second)
 	return (hexutil.Bytes)(result), err
 }
 
-// EstimateGas returns an estimate of the amount of gas needed to execute the
-// given transaction against the current pending block.
-func (s *PublicBlockChainAPI) EstimateGas(ctx context.Context, args CallArgs) (hexutil.Uint64, error) {
+func DoEstimateGas(ctx context.Context, b Backend, args CallArgs, blockNr rpc.BlockNumber) (hexutil.Uint64, error) {
 	// Binary search the gas requirement, as it may be higher than the amount used
 	var (
 		lo  uint64 = params.TxGas - 1
 		hi  uint64
 		cap uint64
 	)
-	if uint64(args.Gas) >= params.TxGas {
-		hi = uint64(args.Gas)
+	if args.Gas != nil && uint64(*args.Gas) >= params.TxGas {
+		hi = uint64(*args.Gas)
 	} else {
-		// Retrieve the current pending block to act as the gas ceiling
-		block, err := s.b.BlockByNumber(ctx, rpc.PendingBlockNumber)
+		// Retrieve the block to act as the gas ceiling
+		block, err := b.BlockByNumber(ctx, blockNr)
 		if err != nil {
 			return 0, err
 		}
@@ -775,9 +786,9 @@ func (s *PublicBlockChainAPI) EstimateGas(ctx context.Context, args CallArgs) (h
 
 	// Create a helper to check if a gas allowance results in an executable transaction
 	executable := func(gas uint64) bool {
-		args.Gas = hexutil.Uint64(gas)
+		args.Gas = (*hexutil.Uint64)(&gas)
 
-		_, _, failed, err := s.doCall(ctx, args, rpc.PendingBlockNumber, 0)
+		_, _, failed, err := DoCall(ctx, b, args, rpc.PendingBlockNumber, vm.Config{}, 0)
 		if err != nil || failed {
 			return false
 		}
@@ -801,6 +812,12 @@ func (s *PublicBlockChainAPI) EstimateGas(ctx context.Context, args CallArgs) (h
 	return hexutil.Uint64(hi), nil
 }
 
+// EstimateGas returns an estimate of the amount of gas needed to execute the
+// given transaction against the current pending block.
+func (s *PublicBlockChainAPI) EstimateGas(ctx context.Context, args CallArgs) (hexutil.Uint64, error) {
+	return DoEstimateGas(ctx, s.b, args, rpc.PendingBlockNumber)
+}
+
 // ExecutionResult groups all structured logs emitted by the EVM
 // while replaying a transaction in debug mode as well as transaction
 // execution status, the amount of gas used and the return value
@@ -825,7 +842,7 @@ type StructLogRes struct {
 	Storage *map[string]string `json:"storage,omitempty"`
 }
 
-// formatLogs formats EVM returned structured logs for json output
+// FormatLogs formats EVM returned structured logs for json output
 func FormatLogs(logs []vm.StructLog) []StructLogRes {
 	formatted := make([]StructLogRes, len(logs))
 	for index, trace := range logs {
@@ -1256,8 +1273,8 @@ func (args *SendTxArgs) toTransaction() *types.Transaction {
 	return types.NewTransaction(uint64(*args.Nonce), *args.To, (*big.Int)(args.Value), uint64(*args.Gas), (*big.Int)(args.GasPrice), input)
 }
 
-// submitTransaction is a helper function that submits tx to txPool and logs a message.
-func submitTransaction(ctx context.Context, b Backend, tx *types.Transaction) (common.Hash, error) {
+// SubmitTransaction is a helper function that submits tx to txPool and logs a message.
+func SubmitTransaction(ctx context.Context, b Backend, tx *types.Transaction) (common.Hash, error) {
 	if err := b.SendTx(ctx, tx); err != nil {
 		return common.Hash{}, err
 	}
@@ -1309,7 +1326,7 @@ func (s *PublicTransactionPoolAPI) SendTransaction(ctx context.Context, args Sen
 	if err != nil {
 		return common.Hash{}, err
 	}
-	return submitTransaction(ctx, s.b, signed)
+	return SubmitTransaction(ctx, s.b, signed)
 }
 
 // SendRawTransaction will add the signed transaction to the transaction pool.
@@ -1319,7 +1336,7 @@ func (s *PublicTransactionPoolAPI) SendRawTransaction(ctx context.Context, encod
 	if err := rlp.DecodeBytes(encodedTx, tx); err != nil {
 		return common.Hash{}, err
 	}
-	return submitTransaction(ctx, s.b, tx)
+	return SubmitTransaction(ctx, s.b, tx)
 }
 
 // Sign calculates an ECDSA signature for:
diff --git a/node/config.go b/node/config.go
index 7b32a5908..99f325840 100644
--- a/node/config.go
+++ b/node/config.go
@@ -102,6 +102,29 @@ type Config struct {
 	// for ephemeral nodes).
 	HTTPPort int `toml:",omitempty"`
 
+	// GraphQLHost is the host interface on which to start the GraphQL server. If this
+	// field is empty, no GraphQL API endpoint will be started.
+	GraphQLHost string `toml:",omitempty"`
+
+	// GraphQLPort is the TCP port number on which to start the GraphQL server. The
+	// default zero value is/ valid and will pick a port number randomly (useful
+	// for ephemeral nodes).
+	GraphQLPort int `toml:",omitempty"`
+
+	// GraphQLCors is the Cross-Origin Resource Sharing header to send to requesting
+	// clients. Please be aware that CORS is a browser enforced security, it's fully
+	// useless for custom HTTP clients.
+	GraphQLCors []string `toml:",omitempty"`
+
+	// GraphQLVirtualHosts is the list of virtual hostnames which are allowed on incoming requests.
+	// This is by default {'localhost'}. Using this prevents attacks like
+	// DNS rebinding, which bypasses SOP by simply masquerading as being within the same
+	// origin. These attacks do not utilize CORS, since they are not cross-domain.
+	// By explicitly checking the Host-header, the server will not allow requests
+	// made against the server with a malicious host domain.
+	// Requests using ip address directly are not affected
+	GraphQLVirtualHosts []string `toml:",omitempty"`
+
 	// HTTPCors is the Cross-Origin Resource Sharing header to send to requesting
 	// clients. Please be aware that CORS is a browser enforced security, it's fully
 	// useless for custom HTTP clients.
@@ -213,6 +236,15 @@ func (c *Config) HTTPEndpoint() string {
 	return fmt.Sprintf("%s:%d", c.HTTPHost, c.HTTPPort)
 }
 
+// GraphQLEndpoint resolves a GraphQL endpoint based on the configured host interface
+// and port parameters.
+func (c *Config) GraphQLEndpoint() string {
+	if c.GraphQLHost == "" {
+		return ""
+	}
+	return fmt.Sprintf("%s:%d", c.GraphQLHost, c.GraphQLPort)
+}
+
 // DefaultHTTPEndpoint returns the HTTP endpoint used by default.
 func DefaultHTTPEndpoint() string {
 	config := &Config{HTTPHost: DefaultHTTPHost, HTTPPort: DefaultHTTPPort}
diff --git a/node/defaults.go b/node/defaults.go
index c1376dba0..cea4997cb 100644
--- a/node/defaults.go
+++ b/node/defaults.go
@@ -28,10 +28,12 @@ import (
 )
 
 const (
-	DefaultHTTPHost = "localhost" // Default host interface for the HTTP RPC server
-	DefaultHTTPPort = 8545        // Default TCP port for the HTTP RPC server
-	DefaultWSHost   = "localhost" // Default host interface for the websocket RPC server
-	DefaultWSPort   = 8546        // Default TCP port for the websocket RPC server
+	DefaultHTTPHost    = "localhost" // Default host interface for the HTTP RPC server
+	DefaultHTTPPort    = 8545        // Default TCP port for the HTTP RPC server
+	DefaultWSHost      = "localhost" // Default host interface for the websocket RPC server
+	DefaultWSPort      = 8546        // Default TCP port for the websocket RPC server
+	DefaultGraphQLHost = "localhost" // Default host interface for the GraphQL server
+	DefaultGraphQLPort = 8547        // Default TCP port for the GraphQL server
 )
 
 // DefaultConfig contains reasonable default settings.
diff --git a/rpc/http.go b/rpc/http.go
index 674166fb3..59dd5eebc 100644
--- a/rpc/http.go
+++ b/rpc/http.go
@@ -198,7 +198,7 @@ func (t *httpReadWriteNopCloser) Close() error {
 // NewHTTPServer creates a new HTTP RPC server around an API provider.
 //
 // Deprecated: Server implements http.Handler
-func NewHTTPServer(cors []string, vhosts []string, timeouts HTTPTimeouts, srv *Server) *http.Server {
+func NewHTTPServer(cors []string, vhosts []string, timeouts HTTPTimeouts, srv http.Handler) *http.Server {
 	// Wrap the CORS-handler within a host-handler
 	handler := newCorsHandler(srv, cors)
 	handler = newVHostHandler(vhosts, handler)
@@ -284,7 +284,7 @@ func validateRequest(r *http.Request) (int, error) {
 	return http.StatusUnsupportedMediaType, err
 }
 
-func newCorsHandler(srv *Server, allowedOrigins []string) http.Handler {
+func newCorsHandler(srv http.Handler, allowedOrigins []string) http.Handler {
 	// disable CORS support if user has not specified a custom CORS configuration
 	if len(allowedOrigins) == 0 {
 		return srv
diff --git a/vendor/github.com/graph-gophers/graphql-go/Gopkg.lock b/vendor/github.com/graph-gophers/graphql-go/Gopkg.lock
new file mode 100644
index 000000000..4574275c5
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/Gopkg.lock
@@ -0,0 +1,25 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+  name = "github.com/opentracing/opentracing-go"
+  packages = [
+    ".",
+    "ext",
+    "log"
+  ]
+  revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38"
+  version = "v1.0.2"
+
+[[projects]]
+  branch = "master"
+  name = "golang.org/x/net"
+  packages = ["context"]
+  revision = "f5dfe339be1d06f81b22525fe34671ee7d2c8904"
+
+[solve-meta]
+  analyzer-name = "dep"
+  analyzer-version = 1
+  inputs-digest = "f417062128566756a9360b1c13ada79bdeeb6bab1f53ee9147a3328d95c1653f"
+  solver-name = "gps-cdcl"
+  solver-version = 1
diff --git a/vendor/github.com/graph-gophers/graphql-go/Gopkg.toml b/vendor/github.com/graph-gophers/graphql-go/Gopkg.toml
new file mode 100644
index 000000000..62b936799
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/Gopkg.toml
@@ -0,0 +1,10 @@
+# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
+# for detailed Gopkg.toml documentation.
+
+[[constraint]]
+  name = "github.com/opentracing/opentracing-go"
+  version = "1.0.2"
+
+[prune]
+  go-tests = true
+  unused-packages = true
diff --git a/vendor/github.com/graph-gophers/graphql-go/LICENSE b/vendor/github.com/graph-gophers/graphql-go/LICENSE
new file mode 100644
index 000000000..3907cecac
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/LICENSE
@@ -0,0 +1,24 @@
+Copyright (c) 2016 Richard Musiol. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/graph-gophers/graphql-go/README.md b/vendor/github.com/graph-gophers/graphql-go/README.md
new file mode 100644
index 000000000..ef4b4639b
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/README.md
@@ -0,0 +1,100 @@
+# graphql-go [![Sourcegraph](https://sourcegraph.com/github.com/graph-gophers/graphql-go/-/badge.svg)](https://sourcegraph.com/github.com/graph-gophers/graphql-go?badge) [![Build Status](https://semaphoreci.com/api/v1/graph-gophers/graphql-go/branches/master/badge.svg)](https://semaphoreci.com/graph-gophers/graphql-go) [![GoDoc](https://godoc.org/github.com/graph-gophers/graphql-go?status.svg)](https://godoc.org/github.com/graph-gophers/graphql-go)
+
+<p align="center"><img src="docs/img/logo.png" width="300"></p>
+
+The goal of this project is to provide full support of the [GraphQL draft specification](https://facebook.github.io/graphql/draft) with a set of idiomatic, easy to use Go packages.
+
+While still under heavy development (`internal` APIs are almost certainly subject to change), this library is
+safe for production use.
+
+## Features
+
+- minimal API
+- support for `context.Context`
+- support for the `OpenTracing` standard
+- schema type-checking against resolvers
+- resolvers are matched to the schema based on method sets (can resolve a GraphQL schema with a Go interface or Go struct).
+- handles panics in resolvers
+- parallel execution of resolvers
+
+## Roadmap
+
+We're trying out the GitHub Project feature to manage `graphql-go`'s [development roadmap](https://github.com/graph-gophers/graphql-go/projects/1).
+Feedback is welcome and appreciated.
+
+## (Some) Documentation
+
+### Basic Sample
+
+```go
+package main
+
+import (
+        "log"
+        "net/http"
+
+        graphql "github.com/graph-gophers/graphql-go"
+        "github.com/graph-gophers/graphql-go/relay"
+)
+
+type query struct{}
+
+func (_ *query) Hello() string { return "Hello, world!" }
+
+func main() {
+        s := `
+                schema {
+                        query: Query
+                }
+                type Query {
+                        hello: String!
+                }
+        `
+        schema := graphql.MustParseSchema(s, &query{})
+        http.Handle("/query", &relay.Handler{Schema: schema})
+        log.Fatal(http.ListenAndServe(":8080", nil))
+}
+```
+
+To test:
+```sh
+$ curl -XPOST -d '{"query": "{ hello }"}' localhost:8080/query
+```
+
+### Resolvers
+
+A resolver must have one method for each field of the GraphQL type it resolves. The method name has to be [exported](https://golang.org/ref/spec#Exported_identifiers) and match the field's name in a non-case-sensitive way.
+
+The method has up to two arguments:
+
+- Optional `context.Context` argument.
+- Mandatory `*struct { ... }` argument if the corresponding GraphQL field has arguments. The names of the struct fields have to be [exported](https://golang.org/ref/spec#Exported_identifiers) and have to match the names of the GraphQL arguments in a non-case-sensitive way.
+
+The method has up to two results:
+
+- The GraphQL field's value as determined by the resolver.
+- Optional `error` result.
+
+Example for a simple resolver method:
+
+```go
+func (r *helloWorldResolver) Hello() string {
+	return "Hello world!"
+}
+```
+
+The following signature is also allowed:
+
+```go
+func (r *helloWorldResolver) Hello(ctx context.Context) (string, error) {
+	return "Hello world!", nil
+}
+```
+
+### Community Examples
+
+[tonyghita/graphql-go-example](https://github.com/tonyghita/graphql-go-example) - A more "productionized" version of the Star Wars API example given in this repository.
+
+[deltaskelta/graphql-go-pets-example](https://github.com/deltaskelta/graphql-go-pets-example) - graphql-go resolving against a sqlite database
+
+[OscarYuen/go-graphql-starter](https://github.com/OscarYuen/go-graphql-starter) - a starter application integrated with dataloader, psql and basic authentication
diff --git a/vendor/github.com/graph-gophers/graphql-go/errors/errors.go b/vendor/github.com/graph-gophers/graphql-go/errors/errors.go
new file mode 100644
index 000000000..fdfa62024
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/errors/errors.go
@@ -0,0 +1,41 @@
+package errors
+
+import (
+	"fmt"
+)
+
+type QueryError struct {
+	Message       string        `json:"message"`
+	Locations     []Location    `json:"locations,omitempty"`
+	Path          []interface{} `json:"path,omitempty"`
+	Rule          string        `json:"-"`
+	ResolverError error         `json:"-"`
+}
+
+type Location struct {
+	Line   int `json:"line"`
+	Column int `json:"column"`
+}
+
+func (a Location) Before(b Location) bool {
+	return a.Line < b.Line || (a.Line == b.Line && a.Column < b.Column)
+}
+
+func Errorf(format string, a ...interface{}) *QueryError {
+	return &QueryError{
+		Message: fmt.Sprintf(format, a...),
+	}
+}
+
+func (err *QueryError) Error() string {
+	if err == nil {
+		return "<nil>"
+	}
+	str := fmt.Sprintf("graphql: %s", err.Message)
+	for _, loc := range err.Locations {
+		str += fmt.Sprintf(" (line %d, column %d)", loc.Line, loc.Column)
+	}
+	return str
+}
+
+var _ error = &QueryError{}
diff --git a/vendor/github.com/graph-gophers/graphql-go/graphql.go b/vendor/github.com/graph-gophers/graphql-go/graphql.go
new file mode 100644
index 000000000..06ffd4599
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/graphql.go
@@ -0,0 +1,205 @@
+package graphql
+
+import (
+	"context"
+	"fmt"
+
+	"encoding/json"
+
+	"github.com/graph-gophers/graphql-go/errors"
+	"github.com/graph-gophers/graphql-go/internal/common"
+	"github.com/graph-gophers/graphql-go/internal/exec"
+	"github.com/graph-gophers/graphql-go/internal/exec/resolvable"
+	"github.com/graph-gophers/graphql-go/internal/exec/selected"
+	"github.com/graph-gophers/graphql-go/internal/query"
+	"github.com/graph-gophers/graphql-go/internal/schema"
+	"github.com/graph-gophers/graphql-go/internal/validation"
+	"github.com/graph-gophers/graphql-go/introspection"
+	"github.com/graph-gophers/graphql-go/log"
+	"github.com/graph-gophers/graphql-go/trace"
+)
+
+// ParseSchema parses a GraphQL schema and attaches the given root resolver. It returns an error if
+// the Go type signature of the resolvers does not match the schema. If nil is passed as the
+// resolver, then the schema can not be executed, but it may be inspected (e.g. with ToJSON).
+func ParseSchema(schemaString string, resolver interface{}, opts ...SchemaOpt) (*Schema, error) {
+	s := &Schema{
+		schema:           schema.New(),
+		maxParallelism:   10,
+		tracer:           trace.OpenTracingTracer{},
+		validationTracer: trace.NoopValidationTracer{},
+		logger:           &log.DefaultLogger{},
+	}
+	for _, opt := range opts {
+		opt(s)
+	}
+
+	if err := s.schema.Parse(schemaString); err != nil {
+		return nil, err
+	}
+
+	if resolver != nil {
+		r, err := resolvable.ApplyResolver(s.schema, resolver)
+		if err != nil {
+			return nil, err
+		}
+		s.res = r
+	}
+
+	return s, nil
+}
+
+// MustParseSchema calls ParseSchema and panics on error.
+func MustParseSchema(schemaString string, resolver interface{}, opts ...SchemaOpt) *Schema {
+	s, err := ParseSchema(schemaString, resolver, opts...)
+	if err != nil {
+		panic(err)
+	}
+	return s
+}
+
+// Schema represents a GraphQL schema with an optional resolver.
+type Schema struct {
+	schema *schema.Schema
+	res    *resolvable.Schema
+
+	maxDepth         int
+	maxParallelism   int
+	tracer           trace.Tracer
+	validationTracer trace.ValidationTracer
+	logger           log.Logger
+}
+
+// SchemaOpt is an option to pass to ParseSchema or MustParseSchema.
+type SchemaOpt func(*Schema)
+
+// MaxDepth specifies the maximum field nesting depth in a query. The default is 0 which disables max depth checking.
+func MaxDepth(n int) SchemaOpt {
+	return func(s *Schema) {
+		s.maxDepth = n
+	}
+}
+
+// MaxParallelism specifies the maximum number of resolvers per request allowed to run in parallel. The default is 10.
+func MaxParallelism(n int) SchemaOpt {
+	return func(s *Schema) {
+		s.maxParallelism = n
+	}
+}
+
+// Tracer is used to trace queries and fields. It defaults to trace.OpenTracingTracer.
+func Tracer(tracer trace.Tracer) SchemaOpt {
+	return func(s *Schema) {
+		s.tracer = tracer
+	}
+}
+
+// ValidationTracer is used to trace validation errors. It defaults to trace.NoopValidationTracer.
+func ValidationTracer(tracer trace.ValidationTracer) SchemaOpt {
+	return func(s *Schema) {
+		s.validationTracer = tracer
+	}
+}
+
+// Logger is used to log panics during query execution. It defaults to exec.DefaultLogger.
+func Logger(logger log.Logger) SchemaOpt {
+	return func(s *Schema) {
+		s.logger = logger
+	}
+}
+
+// Response represents a typical response of a GraphQL server. It may be encoded to JSON directly or
+// it may be further processed to a custom response type, for example to include custom error data.
+// Errors are intentionally serialized first based on the advice in https://github.com/facebook/graphql/commit/7b40390d48680b15cb93e02d46ac5eb249689876#diff-757cea6edf0288677a9eea4cfc801d87R107
+type Response struct {
+	Errors     []*errors.QueryError   `json:"errors,omitempty"`
+	Data       json.RawMessage        `json:"data,omitempty"`
+	Extensions map[string]interface{} `json:"extensions,omitempty"`
+}
+
+// Validate validates the given query with the schema.
+func (s *Schema) Validate(queryString string) []*errors.QueryError {
+	doc, qErr := query.Parse(queryString)
+	if qErr != nil {
+		return []*errors.QueryError{qErr}
+	}
+
+	return validation.Validate(s.schema, doc, s.maxDepth)
+}
+
+// Exec executes the given query with the schema's resolver. It panics if the schema was created
+// without a resolver. If the context get cancelled, no further resolvers will be called and a
+// the context error will be returned as soon as possible (not immediately).
+func (s *Schema) Exec(ctx context.Context, queryString string, operationName string, variables map[string]interface{}) *Response {
+	if s.res == nil {
+		panic("schema created without resolver, can not exec")
+	}
+	return s.exec(ctx, queryString, operationName, variables, s.res)
+}
+
+func (s *Schema) exec(ctx context.Context, queryString string, operationName string, variables map[string]interface{}, res *resolvable.Schema) *Response {
+	doc, qErr := query.Parse(queryString)
+	if qErr != nil {
+		return &Response{Errors: []*errors.QueryError{qErr}}
+	}
+
+	validationFinish := s.validationTracer.TraceValidation()
+	errs := validation.Validate(s.schema, doc, s.maxDepth)
+	validationFinish(errs)
+	if len(errs) != 0 {
+		return &Response{Errors: errs}
+	}
+
+	op, err := getOperation(doc, operationName)
+	if err != nil {
+		return &Response{Errors: []*errors.QueryError{errors.Errorf("%s", err)}}
+	}
+
+	r := &exec.Request{
+		Request: selected.Request{
+			Doc:    doc,
+			Vars:   variables,
+			Schema: s.schema,
+		},
+		Limiter: make(chan struct{}, s.maxParallelism),
+		Tracer:  s.tracer,
+		Logger:  s.logger,
+	}
+	varTypes := make(map[string]*introspection.Type)
+	for _, v := range op.Vars {
+		t, err := common.ResolveType(v.Type, s.schema.Resolve)
+		if err != nil {
+			return &Response{Errors: []*errors.QueryError{err}}
+		}
+		varTypes[v.Name.Name] = introspection.WrapType(t)
+	}
+	traceCtx, finish := s.tracer.TraceQuery(ctx, queryString, operationName, variables, varTypes)
+	data, errs := r.Execute(traceCtx, res, op)
+	finish(errs)
+
+	return &Response{
+		Data:   data,
+		Errors: errs,
+	}
+}
+
+func getOperation(document *query.Document, operationName string) (*query.Operation, error) {
+	if len(document.Operations) == 0 {
+		return nil, fmt.Errorf("no operations in query document")
+	}
+
+	if operationName == "" {
+		if len(document.Operations) > 1 {
+			return nil, fmt.Errorf("more than one operation in query document and no operation name given")
+		}
+		for _, op := range document.Operations {
+			return op, nil // return the one and only operation
+		}
+	}
+
+	op := document.Operations.Get(operationName)
+	if op == nil {
+		return nil, fmt.Errorf("no operation with name %q", operationName)
+	}
+	return op, nil
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/id.go b/vendor/github.com/graph-gophers/graphql-go/id.go
new file mode 100644
index 000000000..52771c413
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/id.go
@@ -0,0 +1,30 @@
+package graphql
+
+import (
+	"errors"
+	"strconv"
+)
+
+// ID represents GraphQL's "ID" scalar type. A custom type may be used instead.
+type ID string
+
+func (ID) ImplementsGraphQLType(name string) bool {
+	return name == "ID"
+}
+
+func (id *ID) UnmarshalGraphQL(input interface{}) error {
+	var err error
+	switch input := input.(type) {
+	case string:
+		*id = ID(input)
+	case int32:
+		*id = ID(strconv.Itoa(int(input)))
+	default:
+		err = errors.New("wrong type")
+	}
+	return err
+}
+
+func (id ID) MarshalJSON() ([]byte, error) {
+	return strconv.AppendQuote(nil, string(id)), nil
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/common/directive.go b/vendor/github.com/graph-gophers/graphql-go/internal/common/directive.go
new file mode 100644
index 000000000..62dca47f8
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/common/directive.go
@@ -0,0 +1,32 @@
+package common
+
+type Directive struct {
+	Name Ident
+	Args ArgumentList
+}
+
+func ParseDirectives(l *Lexer) DirectiveList {
+	var directives DirectiveList
+	for l.Peek() == '@' {
+		l.ConsumeToken('@')
+		d := &Directive{}
+		d.Name = l.ConsumeIdentWithLoc()
+		d.Name.Loc.Column--
+		if l.Peek() == '(' {
+			d.Args = ParseArguments(l)
+		}
+		directives = append(directives, d)
+	}
+	return directives
+}
+
+type DirectiveList []*Directive
+
+func (l DirectiveList) Get(name string) *Directive {
+	for _, d := range l {
+		if d.Name.Name == name {
+			return d
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/common/lexer.go b/vendor/github.com/graph-gophers/graphql-go/internal/common/lexer.go
new file mode 100644
index 000000000..a38fcbaf7
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/common/lexer.go
@@ -0,0 +1,161 @@
+package common
+
+import (
+	"fmt"
+	"strings"
+	"text/scanner"
+
+	"github.com/graph-gophers/graphql-go/errors"
+)
+
+type syntaxError string
+
+type Lexer struct {
+	sc          *scanner.Scanner
+	next        rune
+	descComment string
+}
+
+type Ident struct {
+	Name string
+	Loc  errors.Location
+}
+
+func NewLexer(s string) *Lexer {
+	sc := &scanner.Scanner{
+		Mode: scanner.ScanIdents | scanner.ScanInts | scanner.ScanFloats | scanner.ScanStrings,
+	}
+	sc.Init(strings.NewReader(s))
+
+	return &Lexer{sc: sc}
+}
+
+func (l *Lexer) CatchSyntaxError(f func()) (errRes *errors.QueryError) {
+	defer func() {
+		if err := recover(); err != nil {
+			if err, ok := err.(syntaxError); ok {
+				errRes = errors.Errorf("syntax error: %s", err)
+				errRes.Locations = []errors.Location{l.Location()}
+				return
+			}
+			panic(err)
+		}
+	}()
+
+	f()
+	return
+}
+
+func (l *Lexer) Peek() rune {
+	return l.next
+}
+
+// Consume whitespace and tokens equivalent to whitespace (e.g. commas and comments).
+//
+// Consumed comment characters will build the description for the next type or field encountered.
+// The description is available from `DescComment()`, and will be reset every time `Consume()` is
+// executed.
+func (l *Lexer) Consume() {
+	l.descComment = ""
+	for {
+		l.next = l.sc.Scan()
+
+		if l.next == ',' {
+			// Similar to white space and line terminators, commas (',') are used to improve the
+			// legibility of source text and separate lexical tokens but are otherwise syntactically and
+			// semantically insignificant within GraphQL documents.
+			//
+			// http://facebook.github.io/graphql/draft/#sec-Insignificant-Commas
+			continue
+		}
+
+		if l.next == '#' {
+			// GraphQL source documents may contain single-line comments, starting with the '#' marker.
+			//
+			// A comment can contain any Unicode code point except `LineTerminator` so a comment always
+			// consists of all code points starting with the '#' character up to but not including the
+			// line terminator.
+
+			l.consumeComment()
+			continue
+		}
+
+		break
+	}
+}
+
+func (l *Lexer) ConsumeIdent() string {
+	name := l.sc.TokenText()
+	l.ConsumeToken(scanner.Ident)
+	return name
+}
+
+func (l *Lexer) ConsumeIdentWithLoc() Ident {
+	loc := l.Location()
+	name := l.sc.TokenText()
+	l.ConsumeToken(scanner.Ident)
+	return Ident{name, loc}
+}
+
+func (l *Lexer) ConsumeKeyword(keyword string) {
+	if l.next != scanner.Ident || l.sc.TokenText() != keyword {
+		l.SyntaxError(fmt.Sprintf("unexpected %q, expecting %q", l.sc.TokenText(), keyword))
+	}
+	l.Consume()
+}
+
+func (l *Lexer) ConsumeLiteral() *BasicLit {
+	lit := &BasicLit{Type: l.next, Text: l.sc.TokenText()}
+	l.Consume()
+	return lit
+}
+
+func (l *Lexer) ConsumeToken(expected rune) {
+	if l.next != expected {
+		l.SyntaxError(fmt.Sprintf("unexpected %q, expecting %s", l.sc.TokenText(), scanner.TokenString(expected)))
+	}
+	l.Consume()
+}
+
+func (l *Lexer) DescComment() string {
+	return l.descComment
+}
+
+func (l *Lexer) SyntaxError(message string) {
+	panic(syntaxError(message))
+}
+
+func (l *Lexer) Location() errors.Location {
+	return errors.Location{
+		Line:   l.sc.Line,
+		Column: l.sc.Column,
+	}
+}
+
+// consumeComment consumes all characters from `#` to the first encountered line terminator.
+// The characters are appended to `l.descComment`.
+func (l *Lexer) consumeComment() {
+	if l.next != '#' {
+		return
+	}
+
+	// TODO: count and trim whitespace so we can dedent any following lines.
+	if l.sc.Peek() == ' ' {
+		l.sc.Next()
+	}
+
+	if l.descComment != "" {
+		// TODO: use a bytes.Buffer or strings.Builder instead of this.
+		l.descComment += "\n"
+	}
+
+	for {
+		next := l.sc.Next()
+		if next == '\r' || next == '\n' || next == scanner.EOF {
+			break
+		}
+
+		// TODO: use a bytes.Buffer or strings.Build instead of this.
+		l.descComment += string(next)
+	}
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/common/literals.go b/vendor/github.com/graph-gophers/graphql-go/internal/common/literals.go
new file mode 100644
index 000000000..e7bbe2638
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/common/literals.go
@@ -0,0 +1,206 @@
+package common
+
+import (
+	"strconv"
+	"strings"
+	"text/scanner"
+
+	"github.com/graph-gophers/graphql-go/errors"
+)
+
+type Literal interface {
+	Value(vars map[string]interface{}) interface{}
+	String() string
+	Location() errors.Location
+}
+
+type BasicLit struct {
+	Type rune
+	Text string
+	Loc  errors.Location
+}
+
+func (lit *BasicLit) Value(vars map[string]interface{}) interface{} {
+	switch lit.Type {
+	case scanner.Int:
+		value, err := strconv.ParseInt(lit.Text, 10, 32)
+		if err != nil {
+			panic(err)
+		}
+		return int32(value)
+
+	case scanner.Float:
+		value, err := strconv.ParseFloat(lit.Text, 64)
+		if err != nil {
+			panic(err)
+		}
+		return value
+
+	case scanner.String:
+		value, err := strconv.Unquote(lit.Text)
+		if err != nil {
+			panic(err)
+		}
+		return value
+
+	case scanner.Ident:
+		switch lit.Text {
+		case "true":
+			return true
+		case "false":
+			return false
+		default:
+			return lit.Text
+		}
+
+	default:
+		panic("invalid literal")
+	}
+}
+
+func (lit *BasicLit) String() string {
+	return lit.Text
+}
+
+func (lit *BasicLit) Location() errors.Location {
+	return lit.Loc
+}
+
+type ListLit struct {
+	Entries []Literal
+	Loc     errors.Location
+}
+
+func (lit *ListLit) Value(vars map[string]interface{}) interface{} {
+	entries := make([]interface{}, len(lit.Entries))
+	for i, entry := range lit.Entries {
+		entries[i] = entry.Value(vars)
+	}
+	return entries
+}
+
+func (lit *ListLit) String() string {
+	entries := make([]string, len(lit.Entries))
+	for i, entry := range lit.Entries {
+		entries[i] = entry.String()
+	}
+	return "[" + strings.Join(entries, ", ") + "]"
+}
+
+func (lit *ListLit) Location() errors.Location {
+	return lit.Loc
+}
+
+type ObjectLit struct {
+	Fields []*ObjectLitField
+	Loc    errors.Location
+}
+
+type ObjectLitField struct {
+	Name  Ident
+	Value Literal
+}
+
+func (lit *ObjectLit) Value(vars map[string]interface{}) interface{} {
+	fields := make(map[string]interface{}, len(lit.Fields))
+	for _, f := range lit.Fields {
+		fields[f.Name.Name] = f.Value.Value(vars)
+	}
+	return fields
+}
+
+func (lit *ObjectLit) String() string {
+	entries := make([]string, 0, len(lit.Fields))
+	for _, f := range lit.Fields {
+		entries = append(entries, f.Name.Name+": "+f.Value.String())
+	}
+	return "{" + strings.Join(entries, ", ") + "}"
+}
+
+func (lit *ObjectLit) Location() errors.Location {
+	return lit.Loc
+}
+
+type NullLit struct {
+	Loc errors.Location
+}
+
+func (lit *NullLit) Value(vars map[string]interface{}) interface{} {
+	return nil
+}
+
+func (lit *NullLit) String() string {
+	return "null"
+}
+
+func (lit *NullLit) Location() errors.Location {
+	return lit.Loc
+}
+
+type Variable struct {
+	Name string
+	Loc  errors.Location
+}
+
+func (v Variable) Value(vars map[string]interface{}) interface{} {
+	return vars[v.Name]
+}
+
+func (v Variable) String() string {
+	return "$" + v.Name
+}
+
+func (v *Variable) Location() errors.Location {
+	return v.Loc
+}
+
+func ParseLiteral(l *Lexer, constOnly bool) Literal {
+	loc := l.Location()
+	switch l.Peek() {
+	case '$':
+		if constOnly {
+			l.SyntaxError("variable not allowed")
+			panic("unreachable")
+		}
+		l.ConsumeToken('$')
+		return &Variable{l.ConsumeIdent(), loc}
+
+	case scanner.Int, scanner.Float, scanner.String, scanner.Ident:
+		lit := l.ConsumeLiteral()
+		if lit.Type == scanner.Ident && lit.Text == "null" {
+			return &NullLit{loc}
+		}
+		lit.Loc = loc
+		return lit
+	case '-':
+		l.ConsumeToken('-')
+		lit := l.ConsumeLiteral()
+		lit.Text = "-" + lit.Text
+		lit.Loc = loc
+		return lit
+	case '[':
+		l.ConsumeToken('[')
+		var list []Literal
+		for l.Peek() != ']' {
+			list = append(list, ParseLiteral(l, constOnly))
+		}
+		l.ConsumeToken(']')
+		return &ListLit{list, loc}
+
+	case '{':
+		l.ConsumeToken('{')
+		var fields []*ObjectLitField
+		for l.Peek() != '}' {
+			name := l.ConsumeIdentWithLoc()
+			l.ConsumeToken(':')
+			value := ParseLiteral(l, constOnly)
+			fields = append(fields, &ObjectLitField{name, value})
+		}
+		l.ConsumeToken('}')
+		return &ObjectLit{fields, loc}
+
+	default:
+		l.SyntaxError("invalid value")
+		panic("unreachable")
+	}
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/common/types.go b/vendor/github.com/graph-gophers/graphql-go/internal/common/types.go
new file mode 100644
index 000000000..a20ca3090
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/common/types.go
@@ -0,0 +1,80 @@
+package common
+
+import (
+	"github.com/graph-gophers/graphql-go/errors"
+)
+
+type Type interface {
+	Kind() string
+	String() string
+}
+
+type List struct {
+	OfType Type
+}
+
+type NonNull struct {
+	OfType Type
+}
+
+type TypeName struct {
+	Ident
+}
+
+func (*List) Kind() string     { return "LIST" }
+func (*NonNull) Kind() string  { return "NON_NULL" }
+func (*TypeName) Kind() string { panic("TypeName needs to be resolved to actual type") }
+
+func (t *List) String() string    { return "[" + t.OfType.String() + "]" }
+func (t *NonNull) String() string { return t.OfType.String() + "!" }
+func (*TypeName) String() string  { panic("TypeName needs to be resolved to actual type") }
+
+func ParseType(l *Lexer) Type {
+	t := parseNullType(l)
+	if l.Peek() == '!' {
+		l.ConsumeToken('!')
+		return &NonNull{OfType: t}
+	}
+	return t
+}
+
+func parseNullType(l *Lexer) Type {
+	if l.Peek() == '[' {
+		l.ConsumeToken('[')
+		ofType := ParseType(l)
+		l.ConsumeToken(']')
+		return &List{OfType: ofType}
+	}
+
+	return &TypeName{Ident: l.ConsumeIdentWithLoc()}
+}
+
+type Resolver func(name string) Type
+
+func ResolveType(t Type, resolver Resolver) (Type, *errors.QueryError) {
+	switch t := t.(type) {
+	case *List:
+		ofType, err := ResolveType(t.OfType, resolver)
+		if err != nil {
+			return nil, err
+		}
+		return &List{OfType: ofType}, nil
+	case *NonNull:
+		ofType, err := ResolveType(t.OfType, resolver)
+		if err != nil {
+			return nil, err
+		}
+		return &NonNull{OfType: ofType}, nil
+	case *TypeName:
+		refT := resolver(t.Name)
+		if refT == nil {
+			err := errors.Errorf("Unknown type %q.", t.Name)
+			err.Rule = "KnownTypeNames"
+			err.Locations = []errors.Location{t.Loc}
+			return nil, err
+		}
+		return refT, nil
+	default:
+		return t, nil
+	}
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/common/values.go b/vendor/github.com/graph-gophers/graphql-go/internal/common/values.go
new file mode 100644
index 000000000..fcd456abf
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/common/values.go
@@ -0,0 +1,78 @@
+package common
+
+import (
+	"github.com/graph-gophers/graphql-go/errors"
+)
+
+// http://facebook.github.io/graphql/draft/#InputValueDefinition
+type InputValue struct {
+	Name    Ident
+	Type    Type
+	Default Literal
+	Desc    string
+	Loc     errors.Location
+	TypeLoc errors.Location
+}
+
+type InputValueList []*InputValue
+
+func (l InputValueList) Get(name string) *InputValue {
+	for _, v := range l {
+		if v.Name.Name == name {
+			return v
+		}
+	}
+	return nil
+}
+
+func ParseInputValue(l *Lexer) *InputValue {
+	p := &InputValue{}
+	p.Loc = l.Location()
+	p.Desc = l.DescComment()
+	p.Name = l.ConsumeIdentWithLoc()
+	l.ConsumeToken(':')
+	p.TypeLoc = l.Location()
+	p.Type = ParseType(l)
+	if l.Peek() == '=' {
+		l.ConsumeToken('=')
+		p.Default = ParseLiteral(l, true)
+	}
+	return p
+}
+
+type Argument struct {
+	Name  Ident
+	Value Literal
+}
+
+type ArgumentList []Argument
+
+func (l ArgumentList) Get(name string) (Literal, bool) {
+	for _, arg := range l {
+		if arg.Name.Name == name {
+			return arg.Value, true
+		}
+	}
+	return nil, false
+}
+
+func (l ArgumentList) MustGet(name string) Literal {
+	value, ok := l.Get(name)
+	if !ok {
+		panic("argument not found")
+	}
+	return value
+}
+
+func ParseArguments(l *Lexer) ArgumentList {
+	var args ArgumentList
+	l.ConsumeToken('(')
+	for l.Peek() != ')' {
+		name := l.ConsumeIdentWithLoc()
+		l.ConsumeToken(':')
+		value := ParseLiteral(l, false)
+		args = append(args, Argument{Name: name, Value: value})
+	}
+	l.ConsumeToken(')')
+	return args
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/exec/exec.go b/vendor/github.com/graph-gophers/graphql-go/internal/exec/exec.go
new file mode 100644
index 000000000..e6cca7448
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/exec/exec.go
@@ -0,0 +1,305 @@
+package exec
+
+import (
+	"bytes"
+	"context"
+	"encoding/json"
+	"reflect"
+	"sync"
+
+	"github.com/graph-gophers/graphql-go/errors"
+	"github.com/graph-gophers/graphql-go/internal/common"
+	"github.com/graph-gophers/graphql-go/internal/exec/resolvable"
+	"github.com/graph-gophers/graphql-go/internal/exec/selected"
+	"github.com/graph-gophers/graphql-go/internal/query"
+	"github.com/graph-gophers/graphql-go/internal/schema"
+	"github.com/graph-gophers/graphql-go/log"
+	"github.com/graph-gophers/graphql-go/trace"
+)
+
+type Request struct {
+	selected.Request
+	Limiter chan struct{}
+	Tracer  trace.Tracer
+	Logger  log.Logger
+}
+
+func (r *Request) handlePanic(ctx context.Context) {
+	if value := recover(); value != nil {
+		r.Logger.LogPanic(ctx, value)
+		r.AddError(makePanicError(value))
+	}
+}
+
+func makePanicError(value interface{}) *errors.QueryError {
+	return errors.Errorf("graphql: panic occurred: %v", value)
+}
+
+func (r *Request) Execute(ctx context.Context, s *resolvable.Schema, op *query.Operation) ([]byte, []*errors.QueryError) {
+	var out bytes.Buffer
+	func() {
+		defer r.handlePanic(ctx)
+		sels := selected.ApplyOperation(&r.Request, s, op)
+		r.execSelections(ctx, sels, nil, s.Resolver, &out, op.Type == query.Mutation)
+	}()
+
+	if err := ctx.Err(); err != nil {
+		return nil, []*errors.QueryError{errors.Errorf("%s", err)}
+	}
+
+	return out.Bytes(), r.Errs
+}
+
+type fieldToExec struct {
+	field    *selected.SchemaField
+	sels     []selected.Selection
+	resolver reflect.Value
+	out      *bytes.Buffer
+}
+
+func (r *Request) execSelections(ctx context.Context, sels []selected.Selection, path *pathSegment, resolver reflect.Value, out *bytes.Buffer, serially bool) {
+	async := !serially && selected.HasAsyncSel(sels)
+
+	var fields []*fieldToExec
+	collectFieldsToResolve(sels, resolver, &fields, make(map[string]*fieldToExec))
+
+	if async {
+		var wg sync.WaitGroup
+		wg.Add(len(fields))
+		for _, f := range fields {
+			go func(f *fieldToExec) {
+				defer wg.Done()
+				defer r.handlePanic(ctx)
+				f.out = new(bytes.Buffer)
+				execFieldSelection(ctx, r, f, &pathSegment{path, f.field.Alias}, true)
+			}(f)
+		}
+		wg.Wait()
+	}
+
+	out.WriteByte('{')
+	for i, f := range fields {
+		if i > 0 {
+			out.WriteByte(',')
+		}
+		out.WriteByte('"')
+		out.WriteString(f.field.Alias)
+		out.WriteByte('"')
+		out.WriteByte(':')
+		if async {
+			out.Write(f.out.Bytes())
+			continue
+		}
+		f.out = out
+		execFieldSelection(ctx, r, f, &pathSegment{path, f.field.Alias}, false)
+	}
+	out.WriteByte('}')
+}
+
+func collectFieldsToResolve(sels []selected.Selection, resolver reflect.Value, fields *[]*fieldToExec, fieldByAlias map[string]*fieldToExec) {
+	for _, sel := range sels {
+		switch sel := sel.(type) {
+		case *selected.SchemaField:
+			field, ok := fieldByAlias[sel.Alias]
+			if !ok { // validation already checked for conflict (TODO)
+				field = &fieldToExec{field: sel, resolver: resolver}
+				fieldByAlias[sel.Alias] = field
+				*fields = append(*fields, field)
+			}
+			field.sels = append(field.sels, sel.Sels...)
+
+		case *selected.TypenameField:
+			sf := &selected.SchemaField{
+				Field:       resolvable.MetaFieldTypename,
+				Alias:       sel.Alias,
+				FixedResult: reflect.ValueOf(typeOf(sel, resolver)),
+			}
+			*fields = append(*fields, &fieldToExec{field: sf, resolver: resolver})
+
+		case *selected.TypeAssertion:
+			out := resolver.Method(sel.MethodIndex).Call(nil)
+			if !out[1].Bool() {
+				continue
+			}
+			collectFieldsToResolve(sel.Sels, out[0], fields, fieldByAlias)
+
+		default:
+			panic("unreachable")
+		}
+	}
+}
+
+func typeOf(tf *selected.TypenameField, resolver reflect.Value) string {
+	if len(tf.TypeAssertions) == 0 {
+		return tf.Name
+	}
+	for name, a := range tf.TypeAssertions {
+		out := resolver.Method(a.MethodIndex).Call(nil)
+		if out[1].Bool() {
+			return name
+		}
+	}
+	return ""
+}
+
+func execFieldSelection(ctx context.Context, r *Request, f *fieldToExec, path *pathSegment, applyLimiter bool) {
+	if applyLimiter {
+		r.Limiter <- struct{}{}
+	}
+
+	var result reflect.Value
+	var err *errors.QueryError
+
+	traceCtx, finish := r.Tracer.TraceField(ctx, f.field.TraceLabel, f.field.TypeName, f.field.Name, !f.field.Async, f.field.Args)
+	defer func() {
+		finish(err)
+	}()
+
+	err = func() (err *errors.QueryError) {
+		defer func() {
+			if panicValue := recover(); panicValue != nil {
+				r.Logger.LogPanic(ctx, panicValue)
+				err = makePanicError(panicValue)
+				err.Path = path.toSlice()
+			}
+		}()
+
+		if f.field.FixedResult.IsValid() {
+			result = f.field.FixedResult
+			return nil
+		}
+
+		if err := traceCtx.Err(); err != nil {
+			return errors.Errorf("%s", err) // don't execute any more resolvers if context got cancelled
+		}
+
+		var in []reflect.Value
+		if f.field.HasContext {
+			in = append(in, reflect.ValueOf(traceCtx))
+		}
+		if f.field.ArgsPacker != nil {
+			in = append(in, f.field.PackedArgs)
+		}
+		callOut := f.resolver.Method(f.field.MethodIndex).Call(in)
+		result = callOut[0]
+		if f.field.HasError && !callOut[1].IsNil() {
+			resolverErr := callOut[1].Interface().(error)
+			err := errors.Errorf("%s", resolverErr)
+			err.Path = path.toSlice()
+			err.ResolverError = resolverErr
+			return err
+		}
+		return nil
+	}()
+
+	if applyLimiter {
+		<-r.Limiter
+	}
+
+	if err != nil {
+		r.AddError(err)
+		f.out.WriteString("null") // TODO handle non-nil
+		return
+	}
+
+	r.execSelectionSet(traceCtx, f.sels, f.field.Type, path, result, f.out)
+}
+
+func (r *Request) execSelectionSet(ctx context.Context, sels []selected.Selection, typ common.Type, path *pathSegment, resolver reflect.Value, out *bytes.Buffer) {
+	t, nonNull := unwrapNonNull(typ)
+	switch t := t.(type) {
+	case *schema.Object, *schema.Interface, *schema.Union:
+		// a reflect.Value of a nil interface will show up as an Invalid value
+		if resolver.Kind() == reflect.Invalid || ((resolver.Kind() == reflect.Ptr || resolver.Kind() == reflect.Interface) && resolver.IsNil()) {
+			if nonNull {
+				panic(errors.Errorf("got nil for non-null %q", t))
+			}
+			out.WriteString("null")
+			return
+		}
+
+		r.execSelections(ctx, sels, path, resolver, out, false)
+		return
+	}
+
+	if !nonNull {
+		if resolver.IsNil() {
+			out.WriteString("null")
+			return
+		}
+		resolver = resolver.Elem()
+	}
+
+	switch t := t.(type) {
+	case *common.List:
+		l := resolver.Len()
+
+		if selected.HasAsyncSel(sels) {
+			var wg sync.WaitGroup
+			wg.Add(l)
+			entryouts := make([]bytes.Buffer, l)
+			for i := 0; i < l; i++ {
+				go func(i int) {
+					defer wg.Done()
+					defer r.handlePanic(ctx)
+					r.execSelectionSet(ctx, sels, t.OfType, &pathSegment{path, i}, resolver.Index(i), &entryouts[i])
+				}(i)
+			}
+			wg.Wait()
+
+			out.WriteByte('[')
+			for i, entryout := range entryouts {
+				if i > 0 {
+					out.WriteByte(',')
+				}
+				out.Write(entryout.Bytes())
+			}
+			out.WriteByte(']')
+			return
+		}
+
+		out.WriteByte('[')
+		for i := 0; i < l; i++ {
+			if i > 0 {
+				out.WriteByte(',')
+			}
+			r.execSelectionSet(ctx, sels, t.OfType, &pathSegment{path, i}, resolver.Index(i), out)
+		}
+		out.WriteByte(']')
+
+	case *schema.Scalar:
+		v := resolver.Interface()
+		data, err := json.Marshal(v)
+		if err != nil {
+			panic(errors.Errorf("could not marshal %v: %s", v, err))
+		}
+		out.Write(data)
+
+	case *schema.Enum:
+		out.WriteByte('"')
+		out.WriteString(resolver.String())
+		out.WriteByte('"')
+
+	default:
+		panic("unreachable")
+	}
+}
+
+func unwrapNonNull(t common.Type) (common.Type, bool) {
+	if nn, ok := t.(*common.NonNull); ok {
+		return nn.OfType, true
+	}
+	return t, false
+}
+
+type pathSegment struct {
+	parent *pathSegment
+	value  interface{}
+}
+
+func (p *pathSegment) toSlice() []interface{} {
+	if p == nil {
+		return nil
+	}
+	return append(p.parent.toSlice(), p.value)
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/exec/packer/packer.go b/vendor/github.com/graph-gophers/graphql-go/internal/exec/packer/packer.go
new file mode 100644
index 000000000..22706bcd1
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/exec/packer/packer.go
@@ -0,0 +1,371 @@
+package packer
+
+import (
+	"fmt"
+	"math"
+	"reflect"
+	"strings"
+
+	"github.com/graph-gophers/graphql-go/errors"
+	"github.com/graph-gophers/graphql-go/internal/common"
+	"github.com/graph-gophers/graphql-go/internal/schema"
+)
+
+type packer interface {
+	Pack(value interface{}) (reflect.Value, error)
+}
+
+type Builder struct {
+	packerMap     map[typePair]*packerMapEntry
+	structPackers []*StructPacker
+}
+
+type typePair struct {
+	graphQLType  common.Type
+	resolverType reflect.Type
+}
+
+type packerMapEntry struct {
+	packer  packer
+	targets []*packer
+}
+
+func NewBuilder() *Builder {
+	return &Builder{
+		packerMap: make(map[typePair]*packerMapEntry),
+	}
+}
+
+func (b *Builder) Finish() error {
+	for _, entry := range b.packerMap {
+		for _, target := range entry.targets {
+			*target = entry.packer
+		}
+	}
+
+	for _, p := range b.structPackers {
+		p.defaultStruct = reflect.New(p.structType).Elem()
+		for _, f := range p.fields {
+			if defaultVal := f.field.Default; defaultVal != nil {
+				v, err := f.fieldPacker.Pack(defaultVal.Value(nil))
+				if err != nil {
+					return err
+				}
+				p.defaultStruct.FieldByIndex(f.fieldIndex).Set(v)
+			}
+		}
+	}
+
+	return nil
+}
+
+func (b *Builder) assignPacker(target *packer, schemaType common.Type, reflectType reflect.Type) error {
+	k := typePair{schemaType, reflectType}
+	ref, ok := b.packerMap[k]
+	if !ok {
+		ref = &packerMapEntry{}
+		b.packerMap[k] = ref
+		var err error
+		ref.packer, err = b.makePacker(schemaType, reflectType)
+		if err != nil {
+			return err
+		}
+	}
+	ref.targets = append(ref.targets, target)
+	return nil
+}
+
+func (b *Builder) makePacker(schemaType common.Type, reflectType reflect.Type) (packer, error) {
+	t, nonNull := unwrapNonNull(schemaType)
+	if !nonNull {
+		if reflectType.Kind() != reflect.Ptr {
+			return nil, fmt.Errorf("%s is not a pointer", reflectType)
+		}
+		elemType := reflectType.Elem()
+		addPtr := true
+		if _, ok := t.(*schema.InputObject); ok {
+			elemType = reflectType // keep pointer for input objects
+			addPtr = false
+		}
+		elem, err := b.makeNonNullPacker(t, elemType)
+		if err != nil {
+			return nil, err
+		}
+		return &nullPacker{
+			elemPacker: elem,
+			valueType:  reflectType,
+			addPtr:     addPtr,
+		}, nil
+	}
+
+	return b.makeNonNullPacker(t, reflectType)
+}
+
+func (b *Builder) makeNonNullPacker(schemaType common.Type, reflectType reflect.Type) (packer, error) {
+	if u, ok := reflect.New(reflectType).Interface().(Unmarshaler); ok {
+		if !u.ImplementsGraphQLType(schemaType.String()) {
+			return nil, fmt.Errorf("can not unmarshal %s into %s", schemaType, reflectType)
+		}
+		return &unmarshalerPacker{
+			ValueType: reflectType,
+		}, nil
+	}
+
+	switch t := schemaType.(type) {
+	case *schema.Scalar:
+		return &ValuePacker{
+			ValueType: reflectType,
+		}, nil
+
+	case *schema.Enum:
+		if reflectType.Kind() != reflect.String {
+			return nil, fmt.Errorf("wrong type, expected %s", reflect.String)
+		}
+		return &ValuePacker{
+			ValueType: reflectType,
+		}, nil
+
+	case *schema.InputObject:
+		e, err := b.MakeStructPacker(t.Values, reflectType)
+		if err != nil {
+			return nil, err
+		}
+		return e, nil
+
+	case *common.List:
+		if reflectType.Kind() != reflect.Slice {
+			return nil, fmt.Errorf("expected slice, got %s", reflectType)
+		}
+		p := &listPacker{
+			sliceType: reflectType,
+		}
+		if err := b.assignPacker(&p.elem, t.OfType, reflectType.Elem()); err != nil {
+			return nil, err
+		}
+		return p, nil
+
+	case *schema.Object, *schema.Interface, *schema.Union:
+		return nil, fmt.Errorf("type of kind %s can not be used as input", t.Kind())
+
+	default:
+		panic("unreachable")
+	}
+}
+
+func (b *Builder) MakeStructPacker(values common.InputValueList, typ reflect.Type) (*StructPacker, error) {
+	structType := typ
+	usePtr := false
+	if typ.Kind() == reflect.Ptr {
+		structType = typ.Elem()
+		usePtr = true
+	}
+	if structType.Kind() != reflect.Struct {
+		return nil, fmt.Errorf("expected struct or pointer to struct, got %s", typ)
+	}
+
+	var fields []*structPackerField
+	for _, v := range values {
+		fe := &structPackerField{field: v}
+		fx := func(n string) bool {
+			return strings.EqualFold(stripUnderscore(n), stripUnderscore(v.Name.Name))
+		}
+
+		sf, ok := structType.FieldByNameFunc(fx)
+		if !ok {
+			return nil, fmt.Errorf("missing argument %q", v.Name)
+		}
+		if sf.PkgPath != "" {
+			return nil, fmt.Errorf("field %q must be exported", sf.Name)
+		}
+		fe.fieldIndex = sf.Index
+
+		ft := v.Type
+		if v.Default != nil {
+			ft, _ = unwrapNonNull(ft)
+			ft = &common.NonNull{OfType: ft}
+		}
+
+		if err := b.assignPacker(&fe.fieldPacker, ft, sf.Type); err != nil {
+			return nil, fmt.Errorf("field %q: %s", sf.Name, err)
+		}
+
+		fields = append(fields, fe)
+	}
+
+	p := &StructPacker{
+		structType: structType,
+		usePtr:     usePtr,
+		fields:     fields,
+	}
+	b.structPackers = append(b.structPackers, p)
+	return p, nil
+}
+
+type StructPacker struct {
+	structType    reflect.Type
+	usePtr        bool
+	defaultStruct reflect.Value
+	fields        []*structPackerField
+}
+
+type structPackerField struct {
+	field       *common.InputValue
+	fieldIndex  []int
+	fieldPacker packer
+}
+
+func (p *StructPacker) Pack(value interface{}) (reflect.Value, error) {
+	if value == nil {
+		return reflect.Value{}, errors.Errorf("got null for non-null")
+	}
+
+	values := value.(map[string]interface{})
+	v := reflect.New(p.structType)
+	v.Elem().Set(p.defaultStruct)
+	for _, f := range p.fields {
+		if value, ok := values[f.field.Name.Name]; ok {
+			packed, err := f.fieldPacker.Pack(value)
+			if err != nil {
+				return reflect.Value{}, err
+			}
+			v.Elem().FieldByIndex(f.fieldIndex).Set(packed)
+		}
+	}
+	if !p.usePtr {
+		return v.Elem(), nil
+	}
+	return v, nil
+}
+
+type listPacker struct {
+	sliceType reflect.Type
+	elem      packer
+}
+
+func (e *listPacker) Pack(value interface{}) (reflect.Value, error) {
+	list, ok := value.([]interface{})
+	if !ok {
+		list = []interface{}{value}
+	}
+
+	v := reflect.MakeSlice(e.sliceType, len(list), len(list))
+	for i := range list {
+		packed, err := e.elem.Pack(list[i])
+		if err != nil {
+			return reflect.Value{}, err
+		}
+		v.Index(i).Set(packed)
+	}
+	return v, nil
+}
+
+type nullPacker struct {
+	elemPacker packer
+	valueType  reflect.Type
+	addPtr     bool
+}
+
+func (p *nullPacker) Pack(value interface{}) (reflect.Value, error) {
+	if value == nil {
+		return reflect.Zero(p.valueType), nil
+	}
+
+	v, err := p.elemPacker.Pack(value)
+	if err != nil {
+		return reflect.Value{}, err
+	}
+
+	if p.addPtr {
+		ptr := reflect.New(p.valueType.Elem())
+		ptr.Elem().Set(v)
+		return ptr, nil
+	}
+
+	return v, nil
+}
+
+type ValuePacker struct {
+	ValueType reflect.Type
+}
+
+func (p *ValuePacker) Pack(value interface{}) (reflect.Value, error) {
+	if value == nil {
+		return reflect.Value{}, errors.Errorf("got null for non-null")
+	}
+
+	coerced, err := unmarshalInput(p.ValueType, value)
+	if err != nil {
+		return reflect.Value{}, fmt.Errorf("could not unmarshal %#v (%T) into %s: %s", value, value, p.ValueType, err)
+	}
+	return reflect.ValueOf(coerced), nil
+}
+
+type unmarshalerPacker struct {
+	ValueType reflect.Type
+}
+
+func (p *unmarshalerPacker) Pack(value interface{}) (reflect.Value, error) {
+	if value == nil {
+		return reflect.Value{}, errors.Errorf("got null for non-null")
+	}
+
+	v := reflect.New(p.ValueType)
+	if err := v.Interface().(Unmarshaler).UnmarshalGraphQL(value); err != nil {
+		return reflect.Value{}, err
+	}
+	return v.Elem(), nil
+}
+
+type Unmarshaler interface {
+	ImplementsGraphQLType(name string) bool
+	UnmarshalGraphQL(input interface{}) error
+}
+
+func unmarshalInput(typ reflect.Type, input interface{}) (interface{}, error) {
+	if reflect.TypeOf(input) == typ {
+		return input, nil
+	}
+
+	switch typ.Kind() {
+	case reflect.Int32:
+		switch input := input.(type) {
+		case int:
+			if input < math.MinInt32 || input > math.MaxInt32 {
+				return nil, fmt.Errorf("not a 32-bit integer")
+			}
+			return int32(input), nil
+		case float64:
+			coerced := int32(input)
+			if input < math.MinInt32 || input > math.MaxInt32 || float64(coerced) != input {
+				return nil, fmt.Errorf("not a 32-bit integer")
+			}
+			return coerced, nil
+		}
+
+	case reflect.Float64:
+		switch input := input.(type) {
+		case int32:
+			return float64(input), nil
+		case int:
+			return float64(input), nil
+		}
+
+	case reflect.String:
+		if reflect.TypeOf(input).ConvertibleTo(typ) {
+			return reflect.ValueOf(input).Convert(typ).Interface(), nil
+		}
+	}
+
+	return nil, fmt.Errorf("incompatible type")
+}
+
+func unwrapNonNull(t common.Type) (common.Type, bool) {
+	if nn, ok := t.(*common.NonNull); ok {
+		return nn.OfType, true
+	}
+	return t, false
+}
+
+func stripUnderscore(s string) string {
+	return strings.Replace(s, "_", "", -1)
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/meta.go b/vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/meta.go
new file mode 100644
index 000000000..826c82348
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/meta.go
@@ -0,0 +1,58 @@
+package resolvable
+
+import (
+	"fmt"
+	"reflect"
+
+	"github.com/graph-gophers/graphql-go/internal/common"
+	"github.com/graph-gophers/graphql-go/internal/schema"
+	"github.com/graph-gophers/graphql-go/introspection"
+)
+
+var MetaSchema *Object
+var MetaType *Object
+
+func init() {
+	var err error
+	b := newBuilder(schema.Meta)
+
+	metaSchema := schema.Meta.Types["__Schema"].(*schema.Object)
+	MetaSchema, err = b.makeObjectExec(metaSchema.Name, metaSchema.Fields, nil, false, reflect.TypeOf(&introspection.Schema{}))
+	if err != nil {
+		panic(err)
+	}
+
+	metaType := schema.Meta.Types["__Type"].(*schema.Object)
+	MetaType, err = b.makeObjectExec(metaType.Name, metaType.Fields, nil, false, reflect.TypeOf(&introspection.Type{}))
+	if err != nil {
+		panic(err)
+	}
+
+	if err := b.finish(); err != nil {
+		panic(err)
+	}
+}
+
+var MetaFieldTypename = Field{
+	Field: schema.Field{
+		Name: "__typename",
+		Type: &common.NonNull{OfType: schema.Meta.Types["String"]},
+	},
+	TraceLabel: fmt.Sprintf("GraphQL field: __typename"),
+}
+
+var MetaFieldSchema = Field{
+	Field: schema.Field{
+		Name: "__schema",
+		Type: schema.Meta.Types["__Schema"],
+	},
+	TraceLabel: fmt.Sprintf("GraphQL field: __schema"),
+}
+
+var MetaFieldType = Field{
+	Field: schema.Field{
+		Name: "__type",
+		Type: schema.Meta.Types["__Type"],
+	},
+	TraceLabel: fmt.Sprintf("GraphQL field: __type"),
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/resolvable.go b/vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/resolvable.go
new file mode 100644
index 000000000..3e5d9e44d
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/resolvable.go
@@ -0,0 +1,331 @@
+package resolvable
+
+import (
+	"context"
+	"fmt"
+	"reflect"
+	"strings"
+
+	"github.com/graph-gophers/graphql-go/internal/common"
+	"github.com/graph-gophers/graphql-go/internal/exec/packer"
+	"github.com/graph-gophers/graphql-go/internal/schema"
+)
+
+type Schema struct {
+	schema.Schema
+	Query    Resolvable
+	Mutation Resolvable
+	Resolver reflect.Value
+}
+
+type Resolvable interface {
+	isResolvable()
+}
+
+type Object struct {
+	Name           string
+	Fields         map[string]*Field
+	TypeAssertions map[string]*TypeAssertion
+}
+
+type Field struct {
+	schema.Field
+	TypeName    string
+	MethodIndex int
+	HasContext  bool
+	HasError    bool
+	ArgsPacker  *packer.StructPacker
+	ValueExec   Resolvable
+	TraceLabel  string
+}
+
+type TypeAssertion struct {
+	MethodIndex int
+	TypeExec    Resolvable
+}
+
+type List struct {
+	Elem Resolvable
+}
+
+type Scalar struct{}
+
+func (*Object) isResolvable() {}
+func (*List) isResolvable()   {}
+func (*Scalar) isResolvable() {}
+
+func ApplyResolver(s *schema.Schema, resolver interface{}) (*Schema, error) {
+	b := newBuilder(s)
+
+	var query, mutation Resolvable
+
+	if t, ok := s.EntryPoints["query"]; ok {
+		if err := b.assignExec(&query, t, reflect.TypeOf(resolver)); err != nil {
+			return nil, err
+		}
+	}
+
+	if t, ok := s.EntryPoints["mutation"]; ok {
+		if err := b.assignExec(&mutation, t, reflect.TypeOf(resolver)); err != nil {
+			return nil, err
+		}
+	}
+
+	if err := b.finish(); err != nil {
+		return nil, err
+	}
+
+	return &Schema{
+		Schema:   *s,
+		Resolver: reflect.ValueOf(resolver),
+		Query:    query,
+		Mutation: mutation,
+	}, nil
+}
+
+type execBuilder struct {
+	schema        *schema.Schema
+	resMap        map[typePair]*resMapEntry
+	packerBuilder *packer.Builder
+}
+
+type typePair struct {
+	graphQLType  common.Type
+	resolverType reflect.Type
+}
+
+type resMapEntry struct {
+	exec    Resolvable
+	targets []*Resolvable
+}
+
+func newBuilder(s *schema.Schema) *execBuilder {
+	return &execBuilder{
+		schema:        s,
+		resMap:        make(map[typePair]*resMapEntry),
+		packerBuilder: packer.NewBuilder(),
+	}
+}
+
+func (b *execBuilder) finish() error {
+	for _, entry := range b.resMap {
+		for _, target := range entry.targets {
+			*target = entry.exec
+		}
+	}
+
+	return b.packerBuilder.Finish()
+}
+
+func (b *execBuilder) assignExec(target *Resolvable, t common.Type, resolverType reflect.Type) error {
+	k := typePair{t, resolverType}
+	ref, ok := b.resMap[k]
+	if !ok {
+		ref = &resMapEntry{}
+		b.resMap[k] = ref
+		var err error
+		ref.exec, err = b.makeExec(t, resolverType)
+		if err != nil {
+			return err
+		}
+	}
+	ref.targets = append(ref.targets, target)
+	return nil
+}
+
+func (b *execBuilder) makeExec(t common.Type, resolverType reflect.Type) (Resolvable, error) {
+	var nonNull bool
+	t, nonNull = unwrapNonNull(t)
+
+	switch t := t.(type) {
+	case *schema.Object:
+		return b.makeObjectExec(t.Name, t.Fields, nil, nonNull, resolverType)
+
+	case *schema.Interface:
+		return b.makeObjectExec(t.Name, t.Fields, t.PossibleTypes, nonNull, resolverType)
+
+	case *schema.Union:
+		return b.makeObjectExec(t.Name, nil, t.PossibleTypes, nonNull, resolverType)
+	}
+
+	if !nonNull {
+		if resolverType.Kind() != reflect.Ptr {
+			return nil, fmt.Errorf("%s is not a pointer", resolverType)
+		}
+		resolverType = resolverType.Elem()
+	}
+
+	switch t := t.(type) {
+	case *schema.Scalar:
+		return makeScalarExec(t, resolverType)
+
+	case *schema.Enum:
+		return &Scalar{}, nil
+
+	case *common.List:
+		if resolverType.Kind() != reflect.Slice {
+			return nil, fmt.Errorf("%s is not a slice", resolverType)
+		}
+		e := &List{}
+		if err := b.assignExec(&e.Elem, t.OfType, resolverType.Elem()); err != nil {
+			return nil, err
+		}
+		return e, nil
+
+	default:
+		panic("invalid type: " + t.String())
+	}
+}
+
+func makeScalarExec(t *schema.Scalar, resolverType reflect.Type) (Resolvable, error) {
+	implementsType := false
+	switch r := reflect.New(resolverType).Interface().(type) {
+	case *int32:
+		implementsType = (t.Name == "Int")
+	case *float64:
+		implementsType = (t.Name == "Float")
+	case *string:
+		implementsType = (t.Name == "String")
+	case *bool:
+		implementsType = (t.Name == "Boolean")
+	case packer.Unmarshaler:
+		implementsType = r.ImplementsGraphQLType(t.Name)
+	}
+	if !implementsType {
+		return nil, fmt.Errorf("can not use %s as %s", resolverType, t.Name)
+	}
+	return &Scalar{}, nil
+}
+
+func (b *execBuilder) makeObjectExec(typeName string, fields schema.FieldList, possibleTypes []*schema.Object, nonNull bool, resolverType reflect.Type) (*Object, error) {
+	if !nonNull {
+		if resolverType.Kind() != reflect.Ptr && resolverType.Kind() != reflect.Interface {
+			return nil, fmt.Errorf("%s is not a pointer or interface", resolverType)
+		}
+	}
+
+	methodHasReceiver := resolverType.Kind() != reflect.Interface
+
+	Fields := make(map[string]*Field)
+	for _, f := range fields {
+		methodIndex := findMethod(resolverType, f.Name)
+		if methodIndex == -1 {
+			hint := ""
+			if findMethod(reflect.PtrTo(resolverType), f.Name) != -1 {
+				hint = " (hint: the method exists on the pointer type)"
+			}
+			return nil, fmt.Errorf("%s does not resolve %q: missing method for field %q%s", resolverType, typeName, f.Name, hint)
+		}
+
+		m := resolverType.Method(methodIndex)
+		fe, err := b.makeFieldExec(typeName, f, m, methodIndex, methodHasReceiver)
+		if err != nil {
+			return nil, fmt.Errorf("%s\n\treturned by (%s).%s", err, resolverType, m.Name)
+		}
+		Fields[f.Name] = fe
+	}
+
+	typeAssertions := make(map[string]*TypeAssertion)
+	for _, impl := range possibleTypes {
+		methodIndex := findMethod(resolverType, "To"+impl.Name)
+		if methodIndex == -1 {
+			return nil, fmt.Errorf("%s does not resolve %q: missing method %q to convert to %q", resolverType, typeName, "To"+impl.Name, impl.Name)
+		}
+		if resolverType.Method(methodIndex).Type.NumOut() != 2 {
+			return nil, fmt.Errorf("%s does not resolve %q: method %q should return a value and a bool indicating success", resolverType, typeName, "To"+impl.Name)
+		}
+		a := &TypeAssertion{
+			MethodIndex: methodIndex,
+		}
+		if err := b.assignExec(&a.TypeExec, impl, resolverType.Method(methodIndex).Type.Out(0)); err != nil {
+			return nil, err
+		}
+		typeAssertions[impl.Name] = a
+	}
+
+	return &Object{
+		Name:           typeName,
+		Fields:         Fields,
+		TypeAssertions: typeAssertions,
+	}, nil
+}
+
+var contextType = reflect.TypeOf((*context.Context)(nil)).Elem()
+var errorType = reflect.TypeOf((*error)(nil)).Elem()
+
+func (b *execBuilder) makeFieldExec(typeName string, f *schema.Field, m reflect.Method, methodIndex int, methodHasReceiver bool) (*Field, error) {
+	in := make([]reflect.Type, m.Type.NumIn())
+	for i := range in {
+		in[i] = m.Type.In(i)
+	}
+	if methodHasReceiver {
+		in = in[1:] // first parameter is receiver
+	}
+
+	hasContext := len(in) > 0 && in[0] == contextType
+	if hasContext {
+		in = in[1:]
+	}
+
+	var argsPacker *packer.StructPacker
+	if len(f.Args) > 0 {
+		if len(in) == 0 {
+			return nil, fmt.Errorf("must have parameter for field arguments")
+		}
+		var err error
+		argsPacker, err = b.packerBuilder.MakeStructPacker(f.Args, in[0])
+		if err != nil {
+			return nil, err
+		}
+		in = in[1:]
+	}
+
+	if len(in) > 0 {
+		return nil, fmt.Errorf("too many parameters")
+	}
+
+	if m.Type.NumOut() > 2 {
+		return nil, fmt.Errorf("too many return values")
+	}
+
+	hasError := m.Type.NumOut() == 2
+	if hasError {
+		if m.Type.Out(1) != errorType {
+			return nil, fmt.Errorf(`must have "error" as its second return value`)
+		}
+	}
+
+	fe := &Field{
+		Field:       *f,
+		TypeName:    typeName,
+		MethodIndex: methodIndex,
+		HasContext:  hasContext,
+		ArgsPacker:  argsPacker,
+		HasError:    hasError,
+		TraceLabel:  fmt.Sprintf("GraphQL field: %s.%s", typeName, f.Name),
+	}
+	if err := b.assignExec(&fe.ValueExec, f.Type, m.Type.Out(0)); err != nil {
+		return nil, err
+	}
+	return fe, nil
+}
+
+func findMethod(t reflect.Type, name string) int {
+	for i := 0; i < t.NumMethod(); i++ {
+		if strings.EqualFold(stripUnderscore(name), stripUnderscore(t.Method(i).Name)) {
+			return i
+		}
+	}
+	return -1
+}
+
+func unwrapNonNull(t common.Type) (common.Type, bool) {
+	if nn, ok := t.(*common.NonNull); ok {
+		return nn.OfType, true
+	}
+	return t, false
+}
+
+func stripUnderscore(s string) string {
+	return strings.Replace(s, "_", "", -1)
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/exec/selected/selected.go b/vendor/github.com/graph-gophers/graphql-go/internal/exec/selected/selected.go
new file mode 100644
index 000000000..aed079b67
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/exec/selected/selected.go
@@ -0,0 +1,238 @@
+package selected
+
+import (
+	"fmt"
+	"reflect"
+	"sync"
+
+	"github.com/graph-gophers/graphql-go/errors"
+	"github.com/graph-gophers/graphql-go/internal/common"
+	"github.com/graph-gophers/graphql-go/internal/exec/packer"
+	"github.com/graph-gophers/graphql-go/internal/exec/resolvable"
+	"github.com/graph-gophers/graphql-go/internal/query"
+	"github.com/graph-gophers/graphql-go/internal/schema"
+	"github.com/graph-gophers/graphql-go/introspection"
+)
+
+type Request struct {
+	Schema *schema.Schema
+	Doc    *query.Document
+	Vars   map[string]interface{}
+	Mu     sync.Mutex
+	Errs   []*errors.QueryError
+}
+
+func (r *Request) AddError(err *errors.QueryError) {
+	r.Mu.Lock()
+	r.Errs = append(r.Errs, err)
+	r.Mu.Unlock()
+}
+
+func ApplyOperation(r *Request, s *resolvable.Schema, op *query.Operation) []Selection {
+	var obj *resolvable.Object
+	switch op.Type {
+	case query.Query:
+		obj = s.Query.(*resolvable.Object)
+	case query.Mutation:
+		obj = s.Mutation.(*resolvable.Object)
+	}
+	return applySelectionSet(r, obj, op.Selections)
+}
+
+type Selection interface {
+	isSelection()
+}
+
+type SchemaField struct {
+	resolvable.Field
+	Alias       string
+	Args        map[string]interface{}
+	PackedArgs  reflect.Value
+	Sels        []Selection
+	Async       bool
+	FixedResult reflect.Value
+}
+
+type TypeAssertion struct {
+	resolvable.TypeAssertion
+	Sels []Selection
+}
+
+type TypenameField struct {
+	resolvable.Object
+	Alias string
+}
+
+func (*SchemaField) isSelection()   {}
+func (*TypeAssertion) isSelection() {}
+func (*TypenameField) isSelection() {}
+
+func applySelectionSet(r *Request, e *resolvable.Object, sels []query.Selection) (flattenedSels []Selection) {
+	for _, sel := range sels {
+		switch sel := sel.(type) {
+		case *query.Field:
+			field := sel
+			if skipByDirective(r, field.Directives) {
+				continue
+			}
+
+			switch field.Name.Name {
+			case "__typename":
+				flattenedSels = append(flattenedSels, &TypenameField{
+					Object: *e,
+					Alias:  field.Alias.Name,
+				})
+
+			case "__schema":
+				flattenedSels = append(flattenedSels, &SchemaField{
+					Field:       resolvable.MetaFieldSchema,
+					Alias:       field.Alias.Name,
+					Sels:        applySelectionSet(r, resolvable.MetaSchema, field.Selections),
+					Async:       true,
+					FixedResult: reflect.ValueOf(introspection.WrapSchema(r.Schema)),
+				})
+
+			case "__type":
+				p := packer.ValuePacker{ValueType: reflect.TypeOf("")}
+				v, err := p.Pack(field.Arguments.MustGet("name").Value(r.Vars))
+				if err != nil {
+					r.AddError(errors.Errorf("%s", err))
+					return nil
+				}
+
+				t, ok := r.Schema.Types[v.String()]
+				if !ok {
+					return nil
+				}
+
+				flattenedSels = append(flattenedSels, &SchemaField{
+					Field:       resolvable.MetaFieldType,
+					Alias:       field.Alias.Name,
+					Sels:        applySelectionSet(r, resolvable.MetaType, field.Selections),
+					Async:       true,
+					FixedResult: reflect.ValueOf(introspection.WrapType(t)),
+				})
+
+			default:
+				fe := e.Fields[field.Name.Name]
+
+				var args map[string]interface{}
+				var packedArgs reflect.Value
+				if fe.ArgsPacker != nil {
+					args = make(map[string]interface{})
+					for _, arg := range field.Arguments {
+						args[arg.Name.Name] = arg.Value.Value(r.Vars)
+					}
+					var err error
+					packedArgs, err = fe.ArgsPacker.Pack(args)
+					if err != nil {
+						r.AddError(errors.Errorf("%s", err))
+						return
+					}
+				}
+
+				fieldSels := applyField(r, fe.ValueExec, field.Selections)
+				flattenedSels = append(flattenedSels, &SchemaField{
+					Field:      *fe,
+					Alias:      field.Alias.Name,
+					Args:       args,
+					PackedArgs: packedArgs,
+					Sels:       fieldSels,
+					Async:      fe.HasContext || fe.ArgsPacker != nil || fe.HasError || HasAsyncSel(fieldSels),
+				})
+			}
+
+		case *query.InlineFragment:
+			frag := sel
+			if skipByDirective(r, frag.Directives) {
+				continue
+			}
+			flattenedSels = append(flattenedSels, applyFragment(r, e, &frag.Fragment)...)
+
+		case *query.FragmentSpread:
+			spread := sel
+			if skipByDirective(r, spread.Directives) {
+				continue
+			}
+			flattenedSels = append(flattenedSels, applyFragment(r, e, &r.Doc.Fragments.Get(spread.Name.Name).Fragment)...)
+
+		default:
+			panic("invalid type")
+		}
+	}
+	return
+}
+
+func applyFragment(r *Request, e *resolvable.Object, frag *query.Fragment) []Selection {
+	if frag.On.Name != "" && frag.On.Name != e.Name {
+		a, ok := e.TypeAssertions[frag.On.Name]
+		if !ok {
+			panic(fmt.Errorf("%q does not implement %q", frag.On, e.Name)) // TODO proper error handling
+		}
+
+		return []Selection{&TypeAssertion{
+			TypeAssertion: *a,
+			Sels:          applySelectionSet(r, a.TypeExec.(*resolvable.Object), frag.Selections),
+		}}
+	}
+	return applySelectionSet(r, e, frag.Selections)
+}
+
+func applyField(r *Request, e resolvable.Resolvable, sels []query.Selection) []Selection {
+	switch e := e.(type) {
+	case *resolvable.Object:
+		return applySelectionSet(r, e, sels)
+	case *resolvable.List:
+		return applyField(r, e.Elem, sels)
+	case *resolvable.Scalar:
+		return nil
+	default:
+		panic("unreachable")
+	}
+}
+
+func skipByDirective(r *Request, directives common.DirectiveList) bool {
+	if d := directives.Get("skip"); d != nil {
+		p := packer.ValuePacker{ValueType: reflect.TypeOf(false)}
+		v, err := p.Pack(d.Args.MustGet("if").Value(r.Vars))
+		if err != nil {
+			r.AddError(errors.Errorf("%s", err))
+		}
+		if err == nil && v.Bool() {
+			return true
+		}
+	}
+
+	if d := directives.Get("include"); d != nil {
+		p := packer.ValuePacker{ValueType: reflect.TypeOf(false)}
+		v, err := p.Pack(d.Args.MustGet("if").Value(r.Vars))
+		if err != nil {
+			r.AddError(errors.Errorf("%s", err))
+		}
+		if err == nil && !v.Bool() {
+			return true
+		}
+	}
+
+	return false
+}
+
+func HasAsyncSel(sels []Selection) bool {
+	for _, sel := range sels {
+		switch sel := sel.(type) {
+		case *SchemaField:
+			if sel.Async {
+				return true
+			}
+		case *TypeAssertion:
+			if HasAsyncSel(sel.Sels) {
+				return true
+			}
+		case *TypenameField:
+			// sync
+		default:
+			panic("unreachable")
+		}
+	}
+	return false
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/query/query.go b/vendor/github.com/graph-gophers/graphql-go/internal/query/query.go
new file mode 100644
index 000000000..faba4d2ad
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/query/query.go
@@ -0,0 +1,234 @@
+package query
+
+import (
+	"fmt"
+	"text/scanner"
+
+	"github.com/graph-gophers/graphql-go/errors"
+	"github.com/graph-gophers/graphql-go/internal/common"
+)
+
+type Document struct {
+	Operations OperationList
+	Fragments  FragmentList
+}
+
+type OperationList []*Operation
+
+func (l OperationList) Get(name string) *Operation {
+	for _, f := range l {
+		if f.Name.Name == name {
+			return f
+		}
+	}
+	return nil
+}
+
+type FragmentList []*FragmentDecl
+
+func (l FragmentList) Get(name string) *FragmentDecl {
+	for _, f := range l {
+		if f.Name.Name == name {
+			return f
+		}
+	}
+	return nil
+}
+
+type Operation struct {
+	Type       OperationType
+	Name       common.Ident
+	Vars       common.InputValueList
+	Selections []Selection
+	Directives common.DirectiveList
+	Loc        errors.Location
+}
+
+type OperationType string
+
+const (
+	Query        OperationType = "QUERY"
+	Mutation                   = "MUTATION"
+	Subscription               = "SUBSCRIPTION"
+)
+
+type Fragment struct {
+	On         common.TypeName
+	Selections []Selection
+}
+
+type FragmentDecl struct {
+	Fragment
+	Name       common.Ident
+	Directives common.DirectiveList
+	Loc        errors.Location
+}
+
+type Selection interface {
+	isSelection()
+}
+
+type Field struct {
+	Alias           common.Ident
+	Name            common.Ident
+	Arguments       common.ArgumentList
+	Directives      common.DirectiveList
+	Selections      []Selection
+	SelectionSetLoc errors.Location
+}
+
+type InlineFragment struct {
+	Fragment
+	Directives common.DirectiveList
+	Loc        errors.Location
+}
+
+type FragmentSpread struct {
+	Name       common.Ident
+	Directives common.DirectiveList
+	Loc        errors.Location
+}
+
+func (Field) isSelection()          {}
+func (InlineFragment) isSelection() {}
+func (FragmentSpread) isSelection() {}
+
+func Parse(queryString string) (*Document, *errors.QueryError) {
+	l := common.NewLexer(queryString)
+
+	var doc *Document
+	err := l.CatchSyntaxError(func() { doc = parseDocument(l) })
+	if err != nil {
+		return nil, err
+	}
+
+	return doc, nil
+}
+
+func parseDocument(l *common.Lexer) *Document {
+	d := &Document{}
+	l.Consume()
+	for l.Peek() != scanner.EOF {
+		if l.Peek() == '{' {
+			op := &Operation{Type: Query, Loc: l.Location()}
+			op.Selections = parseSelectionSet(l)
+			d.Operations = append(d.Operations, op)
+			continue
+		}
+
+		loc := l.Location()
+		switch x := l.ConsumeIdent(); x {
+		case "query":
+			op := parseOperation(l, Query)
+			op.Loc = loc
+			d.Operations = append(d.Operations, op)
+
+		case "mutation":
+			d.Operations = append(d.Operations, parseOperation(l, Mutation))
+
+		case "subscription":
+			d.Operations = append(d.Operations, parseOperation(l, Subscription))
+
+		case "fragment":
+			frag := parseFragment(l)
+			frag.Loc = loc
+			d.Fragments = append(d.Fragments, frag)
+
+		default:
+			l.SyntaxError(fmt.Sprintf(`unexpected %q, expecting "fragment"`, x))
+		}
+	}
+	return d
+}
+
+func parseOperation(l *common.Lexer, opType OperationType) *Operation {
+	op := &Operation{Type: opType}
+	op.Name.Loc = l.Location()
+	if l.Peek() == scanner.Ident {
+		op.Name = l.ConsumeIdentWithLoc()
+	}
+	op.Directives = common.ParseDirectives(l)
+	if l.Peek() == '(' {
+		l.ConsumeToken('(')
+		for l.Peek() != ')' {
+			loc := l.Location()
+			l.ConsumeToken('$')
+			iv := common.ParseInputValue(l)
+			iv.Loc = loc
+			op.Vars = append(op.Vars, iv)
+		}
+		l.ConsumeToken(')')
+	}
+	op.Selections = parseSelectionSet(l)
+	return op
+}
+
+func parseFragment(l *common.Lexer) *FragmentDecl {
+	f := &FragmentDecl{}
+	f.Name = l.ConsumeIdentWithLoc()
+	l.ConsumeKeyword("on")
+	f.On = common.TypeName{Ident: l.ConsumeIdentWithLoc()}
+	f.Directives = common.ParseDirectives(l)
+	f.Selections = parseSelectionSet(l)
+	return f
+}
+
+func parseSelectionSet(l *common.Lexer) []Selection {
+	var sels []Selection
+	l.ConsumeToken('{')
+	for l.Peek() != '}' {
+		sels = append(sels, parseSelection(l))
+	}
+	l.ConsumeToken('}')
+	return sels
+}
+
+func parseSelection(l *common.Lexer) Selection {
+	if l.Peek() == '.' {
+		return parseSpread(l)
+	}
+	return parseField(l)
+}
+
+func parseField(l *common.Lexer) *Field {
+	f := &Field{}
+	f.Alias = l.ConsumeIdentWithLoc()
+	f.Name = f.Alias
+	if l.Peek() == ':' {
+		l.ConsumeToken(':')
+		f.Name = l.ConsumeIdentWithLoc()
+	}
+	if l.Peek() == '(' {
+		f.Arguments = common.ParseArguments(l)
+	}
+	f.Directives = common.ParseDirectives(l)
+	if l.Peek() == '{' {
+		f.SelectionSetLoc = l.Location()
+		f.Selections = parseSelectionSet(l)
+	}
+	return f
+}
+
+func parseSpread(l *common.Lexer) Selection {
+	loc := l.Location()
+	l.ConsumeToken('.')
+	l.ConsumeToken('.')
+	l.ConsumeToken('.')
+
+	f := &InlineFragment{Loc: loc}
+	if l.Peek() == scanner.Ident {
+		ident := l.ConsumeIdentWithLoc()
+		if ident.Name != "on" {
+			fs := &FragmentSpread{
+				Name: ident,
+				Loc:  loc,
+			}
+			fs.Directives = common.ParseDirectives(l)
+			return fs
+		}
+		f.On = common.TypeName{Ident: l.ConsumeIdentWithLoc()}
+	}
+	f.Directives = common.ParseDirectives(l)
+	f.Selections = parseSelectionSet(l)
+	return f
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/schema/meta.go b/vendor/github.com/graph-gophers/graphql-go/internal/schema/meta.go
new file mode 100644
index 000000000..b48bf7acf
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/schema/meta.go
@@ -0,0 +1,190 @@
+package schema
+
+var Meta *Schema
+
+func init() {
+	Meta = &Schema{} // bootstrap
+	Meta = New()
+	if err := Meta.Parse(metaSrc); err != nil {
+		panic(err)
+	}
+}
+
+var metaSrc = `
+	# The ` + "`" + `Int` + "`" + ` scalar type represents non-fractional signed whole numeric values. Int can represent values between -(2^31) and 2^31 - 1.
+	scalar Int
+
+	# The ` + "`" + `Float` + "`" + ` scalar type represents signed double-precision fractional values as specified by [IEEE 754](http://en.wikipedia.org/wiki/IEEE_floating_point).
+	scalar Float
+
+	# The ` + "`" + `String` + "`" + ` scalar type represents textual data, represented as UTF-8 character sequences. The String type is most often used by GraphQL to represent free-form human-readable text.
+	scalar String
+
+	# The ` + "`" + `Boolean` + "`" + ` scalar type represents ` + "`" + `true` + "`" + ` or ` + "`" + `false` + "`" + `.
+	scalar Boolean
+
+	# The ` + "`" + `ID` + "`" + ` scalar type represents a unique identifier, often used to refetch an object or as key for a cache. The ID type appears in a JSON response as a String; however, it is not intended to be human-readable. When expected as an input type, any string (such as ` + "`" + `"4"` + "`" + `) or integer (such as ` + "`" + `4` + "`" + `) input value will be accepted as an ID.
+	scalar ID
+
+	# Directs the executor to include this field or fragment only when the ` + "`" + `if` + "`" + ` argument is true.
+	directive @include(
+		# Included when true.
+		if: Boolean!
+	) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT
+
+	# Directs the executor to skip this field or fragment when the ` + "`" + `if` + "`" + ` argument is true.
+	directive @skip(
+		# Skipped when true.
+		if: Boolean!
+	) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT
+
+	# Marks an element of a GraphQL schema as no longer supported.
+	directive @deprecated(
+		# Explains why this element was deprecated, usually also including a suggestion
+		# for how to access supported similar data. Formatted in
+		# [Markdown](https://daringfireball.net/projects/markdown/).
+		reason: String = "No longer supported"
+	) on FIELD_DEFINITION | ENUM_VALUE
+
+	# A Directive provides a way to describe alternate runtime execution and type validation behavior in a GraphQL document.
+	#
+	# In some cases, you need to provide options to alter GraphQL's execution behavior
+	# in ways field arguments will not suffice, such as conditionally including or
+	# skipping a field. Directives provide this by describing additional information
+	# to the executor.
+	type __Directive {
+		name: String!
+		description: String
+		locations: [__DirectiveLocation!]!
+		args: [__InputValue!]!
+	}
+
+	# A Directive can be adjacent to many parts of the GraphQL language, a
+	# __DirectiveLocation describes one such possible adjacencies.
+	enum __DirectiveLocation {
+		# Location adjacent to a query operation.
+		QUERY
+		# Location adjacent to a mutation operation.
+		MUTATION
+		# Location adjacent to a subscription operation.
+		SUBSCRIPTION
+		# Location adjacent to a field.
+		FIELD
+		# Location adjacent to a fragment definition.
+		FRAGMENT_DEFINITION
+		# Location adjacent to a fragment spread.
+		FRAGMENT_SPREAD
+		# Location adjacent to an inline fragment.
+		INLINE_FRAGMENT
+		# Location adjacent to a schema definition.
+		SCHEMA
+		# Location adjacent to a scalar definition.
+		SCALAR
+		# Location adjacent to an object type definition.
+		OBJECT
+		# Location adjacent to a field definition.
+		FIELD_DEFINITION
+		# Location adjacent to an argument definition.
+		ARGUMENT_DEFINITION
+		# Location adjacent to an interface definition.
+		INTERFACE
+		# Location adjacent to a union definition.
+		UNION
+		# Location adjacent to an enum definition.
+		ENUM
+		# Location adjacent to an enum value definition.
+		ENUM_VALUE
+		# Location adjacent to an input object type definition.
+		INPUT_OBJECT
+		# Location adjacent to an input object field definition.
+		INPUT_FIELD_DEFINITION
+	}
+
+	# One possible value for a given Enum. Enum values are unique values, not a
+	# placeholder for a string or numeric value. However an Enum value is returned in
+	# a JSON response as a string.
+	type __EnumValue {
+		name: String!
+		description: String
+		isDeprecated: Boolean!
+		deprecationReason: String
+	}
+
+	# Object and Interface types are described by a list of Fields, each of which has
+	# a name, potentially a list of arguments, and a return type.
+	type __Field {
+		name: String!
+		description: String
+		args: [__InputValue!]!
+		type: __Type!
+		isDeprecated: Boolean!
+		deprecationReason: String
+	}
+
+	# Arguments provided to Fields or Directives and the input fields of an
+	# InputObject are represented as Input Values which describe their type and
+	# optionally a default value.
+	type __InputValue {
+		name: String!
+		description: String
+		type: __Type!
+		# A GraphQL-formatted string representing the default value for this input value.
+		defaultValue: String
+	}
+
+	# A GraphQL Schema defines the capabilities of a GraphQL server. It exposes all
+	# available types and directives on the server, as well as the entry points for
+	# query, mutation, and subscription operations.
+	type __Schema {
+		# A list of all types supported by this server.
+		types: [__Type!]!
+		# The type that query operations will be rooted at.
+		queryType: __Type!
+		# If this server supports mutation, the type that mutation operations will be rooted at.
+		mutationType: __Type
+		# If this server support subscription, the type that subscription operations will be rooted at.
+		subscriptionType: __Type
+		# A list of all directives supported by this server.
+		directives: [__Directive!]!
+	}
+
+	# The fundamental unit of any GraphQL Schema is the type. There are many kinds of
+	# types in GraphQL as represented by the ` + "`" + `__TypeKind` + "`" + ` enum.
+	#
+	# Depending on the kind of a type, certain fields describe information about that
+	# type. Scalar types provide no information beyond a name and description, while
+	# Enum types provide their values. Object and Interface types provide the fields
+	# they describe. Abstract types, Union and Interface, provide the Object types
+	# possible at runtime. List and NonNull types compose other types.
+	type __Type {
+		kind: __TypeKind!
+		name: String
+		description: String
+		fields(includeDeprecated: Boolean = false): [__Field!]
+		interfaces: [__Type!]
+		possibleTypes: [__Type!]
+		enumValues(includeDeprecated: Boolean = false): [__EnumValue!]
+		inputFields: [__InputValue!]
+		ofType: __Type
+	}
+	
+	# An enum describing what kind of type a given ` + "`" + `__Type` + "`" + ` is.
+	enum __TypeKind {
+		# Indicates this type is a scalar.
+		SCALAR
+		# Indicates this type is an object. ` + "`" + `fields` + "`" + ` and ` + "`" + `interfaces` + "`" + ` are valid fields.
+		OBJECT
+		# Indicates this type is an interface. ` + "`" + `fields` + "`" + ` and ` + "`" + `possibleTypes` + "`" + ` are valid fields.
+		INTERFACE
+		# Indicates this type is a union. ` + "`" + `possibleTypes` + "`" + ` is a valid field.
+		UNION
+		# Indicates this type is an enum. ` + "`" + `enumValues` + "`" + ` is a valid field.
+		ENUM
+		# Indicates this type is an input object. ` + "`" + `inputFields` + "`" + ` is a valid field.
+		INPUT_OBJECT
+		# Indicates this type is a list. ` + "`" + `ofType` + "`" + ` is a valid field.
+		LIST
+		# Indicates this type is a non-null. ` + "`" + `ofType` + "`" + ` is a valid field.
+		NON_NULL
+	}
+`
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/schema/schema.go b/vendor/github.com/graph-gophers/graphql-go/internal/schema/schema.go
new file mode 100644
index 000000000..e549f17c0
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/schema/schema.go
@@ -0,0 +1,570 @@
+package schema
+
+import (
+	"fmt"
+	"text/scanner"
+
+	"github.com/graph-gophers/graphql-go/errors"
+	"github.com/graph-gophers/graphql-go/internal/common"
+)
+
+// Schema represents a GraphQL service's collective type system capabilities.
+// A schema is defined in terms of the types and directives it supports as well as the root
+// operation types for each kind of operation: `query`, `mutation`, and `subscription`.
+//
+// For a more formal definition, read the relevant section in the specification:
+//
+// http://facebook.github.io/graphql/draft/#sec-Schema
+type Schema struct {
+	// EntryPoints determines the place in the type system where `query`, `mutation`, and
+	// `subscription` operations begin.
+	//
+	// http://facebook.github.io/graphql/draft/#sec-Root-Operation-Types
+	//
+	// NOTE: The specification refers to this concept as "Root Operation Types".
+	// TODO: Rename the `EntryPoints` field to `RootOperationTypes` to align with spec terminology.
+	EntryPoints map[string]NamedType
+
+	// Types are the fundamental unit of any GraphQL schema.
+	// There are six kinds of named types, and two wrapping types.
+	//
+	// http://facebook.github.io/graphql/draft/#sec-Types
+	Types map[string]NamedType
+
+	// TODO: Type extensions?
+	// http://facebook.github.io/graphql/draft/#sec-Type-Extensions
+
+	// Directives are used to annotate various parts of a GraphQL document as an indicator that they
+	// should be evaluated differently by a validator, executor, or client tool such as a code
+	// generator.
+	//
+	// http://facebook.github.io/graphql/draft/#sec-Type-System.Directives
+	Directives map[string]*DirectiveDecl
+
+	entryPointNames map[string]string
+	objects         []*Object
+	unions          []*Union
+	enums           []*Enum
+}
+
+// Resolve a named type in the schema by its name.
+func (s *Schema) Resolve(name string) common.Type {
+	return s.Types[name]
+}
+
+// NamedType represents a type with a name.
+//
+// http://facebook.github.io/graphql/draft/#NamedType
+type NamedType interface {
+	common.Type
+	TypeName() string
+	Description() string
+}
+
+// Scalar types represent primitive leaf values (e.g. a string or an integer) in a GraphQL type
+// system.
+//
+// GraphQL responses take the form of a hierarchical tree; the leaves on these trees are GraphQL
+// scalars.
+//
+// http://facebook.github.io/graphql/draft/#sec-Scalars
+type Scalar struct {
+	Name string
+	Desc string
+	// TODO: Add a list of directives?
+}
+
+// Object types represent a list of named fields, each of which yield a value of a specific type.
+//
+// GraphQL queries are hierarchical and composed, describing a tree of information.
+// While Scalar types describe the leaf values of these hierarchical types, Objects describe the
+// intermediate levels.
+//
+// http://facebook.github.io/graphql/draft/#sec-Objects
+type Object struct {
+	Name       string
+	Interfaces []*Interface
+	Fields     FieldList
+	Desc       string
+	// TODO: Add a list of directives?
+
+	interfaceNames []string
+}
+
+// Interface types represent a list of named fields and their arguments.
+//
+// GraphQL objects can then implement these interfaces which requires that the object type will
+// define all fields defined by those interfaces.
+//
+// http://facebook.github.io/graphql/draft/#sec-Interfaces
+type Interface struct {
+	Name          string
+	PossibleTypes []*Object
+	Fields        FieldList // NOTE: the spec refers to this as `FieldsDefinition`.
+	Desc          string
+	// TODO: Add a list of directives?
+}
+
+// Union types represent objects that could be one of a list of GraphQL object types, but provides no
+// guaranteed fields between those types.
+//
+// They also differ from interfaces in that object types declare what interfaces they implement, but
+// are not aware of what unions contain them.
+//
+// http://facebook.github.io/graphql/draft/#sec-Unions
+type Union struct {
+	Name          string
+	PossibleTypes []*Object // NOTE: the spec refers to this as `UnionMemberTypes`.
+	Desc          string
+	// TODO: Add a list of directives?
+
+	typeNames []string
+}
+
+// Enum types describe a set of possible values.
+//
+// Like scalar types, Enum types also represent leaf values in a GraphQL type system.
+//
+// http://facebook.github.io/graphql/draft/#sec-Enums
+type Enum struct {
+	Name   string
+	Values []*EnumValue // NOTE: the spec refers to this as `EnumValuesDefinition`.
+	Desc   string
+	// TODO: Add a list of directives?
+}
+
+// EnumValue types are unique values that may be serialized as a string: the name of the
+// represented value.
+//
+// http://facebook.github.io/graphql/draft/#EnumValueDefinition
+type EnumValue struct {
+	Name       string
+	Directives common.DirectiveList
+	Desc       string
+	// TODO: Add a list of directives?
+}
+
+// InputObject types define a set of input fields; the input fields are either scalars, enums, or
+// other input objects.
+//
+// This allows arguments to accept arbitrarily complex structs.
+//
+// http://facebook.github.io/graphql/draft/#sec-Input-Objects
+type InputObject struct {
+	Name   string
+	Desc   string
+	Values common.InputValueList
+	// TODO: Add a list of directives?
+}
+
+// FieldsList is a list of an Object's Fields.
+//
+// http://facebook.github.io/graphql/draft/#FieldsDefinition
+type FieldList []*Field
+
+// Get iterates over the field list, returning a pointer-to-Field when the field name matches the
+// provided `name` argument.
+// Returns nil when no field was found by that name.
+func (l FieldList) Get(name string) *Field {
+	for _, f := range l {
+		if f.Name == name {
+			return f
+		}
+	}
+	return nil
+}
+
+// Names returns a string slice of the field names in the FieldList.
+func (l FieldList) Names() []string {
+	names := make([]string, len(l))
+	for i, f := range l {
+		names[i] = f.Name
+	}
+	return names
+}
+
+// http://facebook.github.io/graphql/draft/#sec-Type-System.Directives
+type DirectiveDecl struct {
+	Name string
+	Desc string
+	Locs []string
+	Args common.InputValueList
+}
+
+func (*Scalar) Kind() string      { return "SCALAR" }
+func (*Object) Kind() string      { return "OBJECT" }
+func (*Interface) Kind() string   { return "INTERFACE" }
+func (*Union) Kind() string       { return "UNION" }
+func (*Enum) Kind() string        { return "ENUM" }
+func (*InputObject) Kind() string { return "INPUT_OBJECT" }
+
+func (t *Scalar) String() string      { return t.Name }
+func (t *Object) String() string      { return t.Name }
+func (t *Interface) String() string   { return t.Name }
+func (t *Union) String() string       { return t.Name }
+func (t *Enum) String() string        { return t.Name }
+func (t *InputObject) String() string { return t.Name }
+
+func (t *Scalar) TypeName() string      { return t.Name }
+func (t *Object) TypeName() string      { return t.Name }
+func (t *Interface) TypeName() string   { return t.Name }
+func (t *Union) TypeName() string       { return t.Name }
+func (t *Enum) TypeName() string        { return t.Name }
+func (t *InputObject) TypeName() string { return t.Name }
+
+func (t *Scalar) Description() string      { return t.Desc }
+func (t *Object) Description() string      { return t.Desc }
+func (t *Interface) Description() string   { return t.Desc }
+func (t *Union) Description() string       { return t.Desc }
+func (t *Enum) Description() string        { return t.Desc }
+func (t *InputObject) Description() string { return t.Desc }
+
+// Field is a conceptual function which yields values.
+// http://facebook.github.io/graphql/draft/#FieldDefinition
+type Field struct {
+	Name       string
+	Args       common.InputValueList // NOTE: the spec refers to this as `ArgumentsDefinition`.
+	Type       common.Type
+	Directives common.DirectiveList
+	Desc       string
+}
+
+// New initializes an instance of Schema.
+func New() *Schema {
+	s := &Schema{
+		entryPointNames: make(map[string]string),
+		Types:           make(map[string]NamedType),
+		Directives:      make(map[string]*DirectiveDecl),
+	}
+	for n, t := range Meta.Types {
+		s.Types[n] = t
+	}
+	for n, d := range Meta.Directives {
+		s.Directives[n] = d
+	}
+	return s
+}
+
+// Parse the schema string.
+func (s *Schema) Parse(schemaString string) error {
+	l := common.NewLexer(schemaString)
+
+	err := l.CatchSyntaxError(func() { parseSchema(s, l) })
+	if err != nil {
+		return err
+	}
+
+	for _, t := range s.Types {
+		if err := resolveNamedType(s, t); err != nil {
+			return err
+		}
+	}
+	for _, d := range s.Directives {
+		for _, arg := range d.Args {
+			t, err := common.ResolveType(arg.Type, s.Resolve)
+			if err != nil {
+				return err
+			}
+			arg.Type = t
+		}
+	}
+
+	s.EntryPoints = make(map[string]NamedType)
+	for key, name := range s.entryPointNames {
+		t, ok := s.Types[name]
+		if !ok {
+			if !ok {
+				return errors.Errorf("type %q not found", name)
+			}
+		}
+		s.EntryPoints[key] = t
+	}
+
+	for _, obj := range s.objects {
+		obj.Interfaces = make([]*Interface, len(obj.interfaceNames))
+		for i, intfName := range obj.interfaceNames {
+			t, ok := s.Types[intfName]
+			if !ok {
+				return errors.Errorf("interface %q not found", intfName)
+			}
+			intf, ok := t.(*Interface)
+			if !ok {
+				return errors.Errorf("type %q is not an interface", intfName)
+			}
+			obj.Interfaces[i] = intf
+			intf.PossibleTypes = append(intf.PossibleTypes, obj)
+		}
+	}
+
+	for _, union := range s.unions {
+		union.PossibleTypes = make([]*Object, len(union.typeNames))
+		for i, name := range union.typeNames {
+			t, ok := s.Types[name]
+			if !ok {
+				return errors.Errorf("object type %q not found", name)
+			}
+			obj, ok := t.(*Object)
+			if !ok {
+				return errors.Errorf("type %q is not an object", name)
+			}
+			union.PossibleTypes[i] = obj
+		}
+	}
+
+	for _, enum := range s.enums {
+		for _, value := range enum.Values {
+			if err := resolveDirectives(s, value.Directives); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func resolveNamedType(s *Schema, t NamedType) error {
+	switch t := t.(type) {
+	case *Object:
+		for _, f := range t.Fields {
+			if err := resolveField(s, f); err != nil {
+				return err
+			}
+		}
+	case *Interface:
+		for _, f := range t.Fields {
+			if err := resolveField(s, f); err != nil {
+				return err
+			}
+		}
+	case *InputObject:
+		if err := resolveInputObject(s, t.Values); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func resolveField(s *Schema, f *Field) error {
+	t, err := common.ResolveType(f.Type, s.Resolve)
+	if err != nil {
+		return err
+	}
+	f.Type = t
+	if err := resolveDirectives(s, f.Directives); err != nil {
+		return err
+	}
+	return resolveInputObject(s, f.Args)
+}
+
+func resolveDirectives(s *Schema, directives common.DirectiveList) error {
+	for _, d := range directives {
+		dirName := d.Name.Name
+		dd, ok := s.Directives[dirName]
+		if !ok {
+			return errors.Errorf("directive %q not found", dirName)
+		}
+		for _, arg := range d.Args {
+			if dd.Args.Get(arg.Name.Name) == nil {
+				return errors.Errorf("invalid argument %q for directive %q", arg.Name.Name, dirName)
+			}
+		}
+		for _, arg := range dd.Args {
+			if _, ok := d.Args.Get(arg.Name.Name); !ok {
+				d.Args = append(d.Args, common.Argument{Name: arg.Name, Value: arg.Default})
+			}
+		}
+	}
+	return nil
+}
+
+func resolveInputObject(s *Schema, values common.InputValueList) error {
+	for _, v := range values {
+		t, err := common.ResolveType(v.Type, s.Resolve)
+		if err != nil {
+			return err
+		}
+		v.Type = t
+	}
+	return nil
+}
+
+func parseSchema(s *Schema, l *common.Lexer) {
+	l.Consume()
+
+	for l.Peek() != scanner.EOF {
+		desc := l.DescComment()
+		switch x := l.ConsumeIdent(); x {
+
+		case "schema":
+			l.ConsumeToken('{')
+			for l.Peek() != '}' {
+				name := l.ConsumeIdent()
+				l.ConsumeToken(':')
+				typ := l.ConsumeIdent()
+				s.entryPointNames[name] = typ
+			}
+			l.ConsumeToken('}')
+
+		case "type":
+			obj := parseObjectDef(l)
+			obj.Desc = desc
+			s.Types[obj.Name] = obj
+			s.objects = append(s.objects, obj)
+
+		case "interface":
+			iface := parseInterfaceDef(l)
+			iface.Desc = desc
+			s.Types[iface.Name] = iface
+
+		case "union":
+			union := parseUnionDef(l)
+			union.Desc = desc
+			s.Types[union.Name] = union
+			s.unions = append(s.unions, union)
+
+		case "enum":
+			enum := parseEnumDef(l)
+			enum.Desc = desc
+			s.Types[enum.Name] = enum
+			s.enums = append(s.enums, enum)
+
+		case "input":
+			input := parseInputDef(l)
+			input.Desc = desc
+			s.Types[input.Name] = input
+
+		case "scalar":
+			name := l.ConsumeIdent()
+			s.Types[name] = &Scalar{Name: name, Desc: desc}
+
+		case "directive":
+			directive := parseDirectiveDef(l)
+			directive.Desc = desc
+			s.Directives[directive.Name] = directive
+
+		default:
+			// TODO: Add support for type extensions.
+			l.SyntaxError(fmt.Sprintf(`unexpected %q, expecting "schema", "type", "enum", "interface", "union", "input", "scalar" or "directive"`, x))
+		}
+	}
+}
+
+func parseObjectDef(l *common.Lexer) *Object {
+	object := &Object{Name: l.ConsumeIdent()}
+
+	if l.Peek() == scanner.Ident {
+		l.ConsumeKeyword("implements")
+
+		for l.Peek() != '{' {
+			if l.Peek() == '&' {
+				l.ConsumeToken('&')
+			}
+
+			object.interfaceNames = append(object.interfaceNames, l.ConsumeIdent())
+		}
+	}
+
+	l.ConsumeToken('{')
+	object.Fields = parseFieldsDef(l)
+	l.ConsumeToken('}')
+
+	return object
+}
+
+func parseInterfaceDef(l *common.Lexer) *Interface {
+	i := &Interface{Name: l.ConsumeIdent()}
+
+	l.ConsumeToken('{')
+	i.Fields = parseFieldsDef(l)
+	l.ConsumeToken('}')
+
+	return i
+}
+
+func parseUnionDef(l *common.Lexer) *Union {
+	union := &Union{Name: l.ConsumeIdent()}
+
+	l.ConsumeToken('=')
+	union.typeNames = []string{l.ConsumeIdent()}
+	for l.Peek() == '|' {
+		l.ConsumeToken('|')
+		union.typeNames = append(union.typeNames, l.ConsumeIdent())
+	}
+
+	return union
+}
+
+func parseInputDef(l *common.Lexer) *InputObject {
+	i := &InputObject{}
+	i.Name = l.ConsumeIdent()
+	l.ConsumeToken('{')
+	for l.Peek() != '}' {
+		i.Values = append(i.Values, common.ParseInputValue(l))
+	}
+	l.ConsumeToken('}')
+	return i
+}
+
+func parseEnumDef(l *common.Lexer) *Enum {
+	enum := &Enum{Name: l.ConsumeIdent()}
+
+	l.ConsumeToken('{')
+	for l.Peek() != '}' {
+		v := &EnumValue{
+			Desc:       l.DescComment(),
+			Name:       l.ConsumeIdent(),
+			Directives: common.ParseDirectives(l),
+		}
+
+		enum.Values = append(enum.Values, v)
+	}
+	l.ConsumeToken('}')
+	return enum
+}
+
+func parseDirectiveDef(l *common.Lexer) *DirectiveDecl {
+	l.ConsumeToken('@')
+	d := &DirectiveDecl{Name: l.ConsumeIdent()}
+
+	if l.Peek() == '(' {
+		l.ConsumeToken('(')
+		for l.Peek() != ')' {
+			v := common.ParseInputValue(l)
+			d.Args = append(d.Args, v)
+		}
+		l.ConsumeToken(')')
+	}
+
+	l.ConsumeKeyword("on")
+
+	for {
+		loc := l.ConsumeIdent()
+		d.Locs = append(d.Locs, loc)
+		if l.Peek() != '|' {
+			break
+		}
+		l.ConsumeToken('|')
+	}
+	return d
+}
+
+func parseFieldsDef(l *common.Lexer) FieldList {
+	var fields FieldList
+	for l.Peek() != '}' {
+		f := &Field{}
+		f.Desc = l.DescComment()
+		f.Name = l.ConsumeIdent()
+		if l.Peek() == '(' {
+			l.ConsumeToken('(')
+			for l.Peek() != ')' {
+				f.Args = append(f.Args, common.ParseInputValue(l))
+			}
+			l.ConsumeToken(')')
+		}
+		l.ConsumeToken(':')
+		f.Type = common.ParseType(l)
+		f.Directives = common.ParseDirectives(l)
+		fields = append(fields, f)
+	}
+	return fields
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/validation/suggestion.go b/vendor/github.com/graph-gophers/graphql-go/internal/validation/suggestion.go
new file mode 100644
index 000000000..9702b5f52
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/validation/suggestion.go
@@ -0,0 +1,71 @@
+package validation
+
+import (
+	"fmt"
+	"sort"
+	"strconv"
+	"strings"
+)
+
+func makeSuggestion(prefix string, options []string, input string) string {
+	var selected []string
+	distances := make(map[string]int)
+	for _, opt := range options {
+		distance := levenshteinDistance(input, opt)
+		threshold := max(len(input)/2, max(len(opt)/2, 1))
+		if distance < threshold {
+			selected = append(selected, opt)
+			distances[opt] = distance
+		}
+	}
+
+	if len(selected) == 0 {
+		return ""
+	}
+	sort.Slice(selected, func(i, j int) bool {
+		return distances[selected[i]] < distances[selected[j]]
+	})
+
+	parts := make([]string, len(selected))
+	for i, opt := range selected {
+		parts[i] = strconv.Quote(opt)
+	}
+	if len(parts) > 1 {
+		parts[len(parts)-1] = "or " + parts[len(parts)-1]
+	}
+	return fmt.Sprintf(" %s %s?", prefix, strings.Join(parts, ", "))
+}
+
+func levenshteinDistance(s1, s2 string) int {
+	column := make([]int, len(s1)+1)
+	for y := range s1 {
+		column[y+1] = y + 1
+	}
+	for x, rx := range s2 {
+		column[0] = x + 1
+		lastdiag := x
+		for y, ry := range s1 {
+			olddiag := column[y+1]
+			if rx != ry {
+				lastdiag++
+			}
+			column[y+1] = min(column[y+1]+1, min(column[y]+1, lastdiag))
+			lastdiag = olddiag
+		}
+	}
+	return column[len(s1)]
+}
+
+func min(a, b int) int {
+	if a < b {
+		return a
+	}
+	return b
+}
+
+func max(a, b int) int {
+	if a > b {
+		return a
+	}
+	return b
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/validation/validation.go b/vendor/github.com/graph-gophers/graphql-go/internal/validation/validation.go
new file mode 100644
index 000000000..94ad5ca7f
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/internal/validation/validation.go
@@ -0,0 +1,909 @@
+package validation
+
+import (
+	"fmt"
+	"math"
+	"reflect"
+	"strconv"
+	"strings"
+	"text/scanner"
+
+	"github.com/graph-gophers/graphql-go/errors"
+	"github.com/graph-gophers/graphql-go/internal/common"
+	"github.com/graph-gophers/graphql-go/internal/query"
+	"github.com/graph-gophers/graphql-go/internal/schema"
+)
+
+type varSet map[*common.InputValue]struct{}
+
+type selectionPair struct{ a, b query.Selection }
+
+type fieldInfo struct {
+	sf     *schema.Field
+	parent schema.NamedType
+}
+
+type context struct {
+	schema           *schema.Schema
+	doc              *query.Document
+	errs             []*errors.QueryError
+	opErrs           map[*query.Operation][]*errors.QueryError
+	usedVars         map[*query.Operation]varSet
+	fieldMap         map[*query.Field]fieldInfo
+	overlapValidated map[selectionPair]struct{}
+	maxDepth         int
+}
+
+func (c *context) addErr(loc errors.Location, rule string, format string, a ...interface{}) {
+	c.addErrMultiLoc([]errors.Location{loc}, rule, format, a...)
+}
+
+func (c *context) addErrMultiLoc(locs []errors.Location, rule string, format string, a ...interface{}) {
+	c.errs = append(c.errs, &errors.QueryError{
+		Message:   fmt.Sprintf(format, a...),
+		Locations: locs,
+		Rule:      rule,
+	})
+}
+
+type opContext struct {
+	*context
+	ops []*query.Operation
+}
+
+func newContext(s *schema.Schema, doc *query.Document, maxDepth int) *context {
+	return &context{
+		schema:           s,
+		doc:              doc,
+		opErrs:           make(map[*query.Operation][]*errors.QueryError),
+		usedVars:         make(map[*query.Operation]varSet),
+		fieldMap:         make(map[*query.Field]fieldInfo),
+		overlapValidated: make(map[selectionPair]struct{}),
+		maxDepth:         maxDepth,
+	}
+}
+
+func Validate(s *schema.Schema, doc *query.Document, maxDepth int) []*errors.QueryError {
+	c := newContext(s, doc, maxDepth)
+
+	opNames := make(nameSet)
+	fragUsedBy := make(map[*query.FragmentDecl][]*query.Operation)
+	for _, op := range doc.Operations {
+		c.usedVars[op] = make(varSet)
+		opc := &opContext{c, []*query.Operation{op}}
+
+		// Check if max depth is exceeded, if it's set. If max depth is exceeded,
+		// don't continue to validate the document and exit early.
+		if validateMaxDepth(opc, op.Selections, 1) {
+			return c.errs
+		}
+
+		if op.Name.Name == "" && len(doc.Operations) != 1 {
+			c.addErr(op.Loc, "LoneAnonymousOperation", "This anonymous operation must be the only defined operation.")
+		}
+		if op.Name.Name != "" {
+			validateName(c, opNames, op.Name, "UniqueOperationNames", "operation")
+		}
+
+		validateDirectives(opc, string(op.Type), op.Directives)
+
+		varNames := make(nameSet)
+		for _, v := range op.Vars {
+			validateName(c, varNames, v.Name, "UniqueVariableNames", "variable")
+
+			t := resolveType(c, v.Type)
+			if !canBeInput(t) {
+				c.addErr(v.TypeLoc, "VariablesAreInputTypes", "Variable %q cannot be non-input type %q.", "$"+v.Name.Name, t)
+			}
+
+			if v.Default != nil {
+				validateLiteral(opc, v.Default)
+
+				if t != nil {
+					if nn, ok := t.(*common.NonNull); ok {
+						c.addErr(v.Default.Location(), "DefaultValuesOfCorrectType", "Variable %q of type %q is required and will not use the default value. Perhaps you meant to use type %q.", "$"+v.Name.Name, t, nn.OfType)
+					}
+
+					if ok, reason := validateValueType(opc, v.Default, t); !ok {
+						c.addErr(v.Default.Location(), "DefaultValuesOfCorrectType", "Variable %q of type %q has invalid default value %s.\n%s", "$"+v.Name.Name, t, v.Default, reason)
+					}
+				}
+			}
+		}
+
+		var entryPoint schema.NamedType
+		switch op.Type {
+		case query.Query:
+			entryPoint = s.EntryPoints["query"]
+		case query.Mutation:
+			entryPoint = s.EntryPoints["mutation"]
+		case query.Subscription:
+			entryPoint = s.EntryPoints["subscription"]
+		default:
+			panic("unreachable")
+		}
+
+		validateSelectionSet(opc, op.Selections, entryPoint)
+
+		fragUsed := make(map[*query.FragmentDecl]struct{})
+		markUsedFragments(c, op.Selections, fragUsed)
+		for frag := range fragUsed {
+			fragUsedBy[frag] = append(fragUsedBy[frag], op)
+		}
+	}
+
+	fragNames := make(nameSet)
+	fragVisited := make(map[*query.FragmentDecl]struct{})
+	for _, frag := range doc.Fragments {
+		opc := &opContext{c, fragUsedBy[frag]}
+
+		validateName(c, fragNames, frag.Name, "UniqueFragmentNames", "fragment")
+		validateDirectives(opc, "FRAGMENT_DEFINITION", frag.Directives)
+
+		t := unwrapType(resolveType(c, &frag.On))
+		// continue even if t is nil
+		if t != nil && !canBeFragment(t) {
+			c.addErr(frag.On.Loc, "FragmentsOnCompositeTypes", "Fragment %q cannot condition on non composite type %q.", frag.Name.Name, t)
+			continue
+		}
+
+		validateSelectionSet(opc, frag.Selections, t)
+
+		if _, ok := fragVisited[frag]; !ok {
+			detectFragmentCycle(c, frag.Selections, fragVisited, nil, map[string]int{frag.Name.Name: 0})
+		}
+	}
+
+	for _, frag := range doc.Fragments {
+		if len(fragUsedBy[frag]) == 0 {
+			c.addErr(frag.Loc, "NoUnusedFragments", "Fragment %q is never used.", frag.Name.Name)
+		}
+	}
+
+	for _, op := range doc.Operations {
+		c.errs = append(c.errs, c.opErrs[op]...)
+
+		opUsedVars := c.usedVars[op]
+		for _, v := range op.Vars {
+			if _, ok := opUsedVars[v]; !ok {
+				opSuffix := ""
+				if op.Name.Name != "" {
+					opSuffix = fmt.Sprintf(" in operation %q", op.Name.Name)
+				}
+				c.addErr(v.Loc, "NoUnusedVariables", "Variable %q is never used%s.", "$"+v.Name.Name, opSuffix)
+			}
+		}
+	}
+
+	return c.errs
+}
+
+// validates the query doesn't go deeper than maxDepth (if set). Returns whether
+// or not query validated max depth to avoid excessive recursion.
+func validateMaxDepth(c *opContext, sels []query.Selection, depth int) bool {
+	// maxDepth checking is turned off when maxDepth is 0
+	if c.maxDepth == 0 {
+		return false
+	}
+
+	exceededMaxDepth := false
+
+	for _, sel := range sels {
+		switch sel := sel.(type) {
+		case *query.Field:
+			if depth > c.maxDepth {
+				exceededMaxDepth = true
+				c.addErr(sel.Alias.Loc, "MaxDepthExceeded", "Field %q has depth %d that exceeds max depth %d", sel.Name.Name, depth, c.maxDepth)
+				continue
+			}
+			exceededMaxDepth = exceededMaxDepth || validateMaxDepth(c, sel.Selections, depth+1)
+		case *query.InlineFragment:
+			// Depth is not checked because inline fragments resolve to other fields which are checked.
+			// Depth is not incremented because inline fragments have the same depth as neighboring fields
+			exceededMaxDepth = exceededMaxDepth || validateMaxDepth(c, sel.Selections, depth)
+		case *query.FragmentSpread:
+			// Depth is not checked because fragments resolve to other fields which are checked.
+			frag := c.doc.Fragments.Get(sel.Name.Name)
+			if frag == nil {
+				// In case of unknown fragment (invalid request), ignore max depth evaluation
+				c.addErr(sel.Loc, "MaxDepthEvaluationError", "Unknown fragment %q. Unable to evaluate depth.", sel.Name.Name)
+				continue
+			}
+			// Depth is not incremented because fragments have the same depth as surrounding fields
+			exceededMaxDepth = exceededMaxDepth || validateMaxDepth(c, frag.Selections, depth)
+		}
+	}
+
+	return exceededMaxDepth
+}
+
+func validateSelectionSet(c *opContext, sels []query.Selection, t schema.NamedType) {
+	for _, sel := range sels {
+		validateSelection(c, sel, t)
+	}
+
+	for i, a := range sels {
+		for _, b := range sels[i+1:] {
+			c.validateOverlap(a, b, nil, nil)
+		}
+	}
+}
+
+func validateSelection(c *opContext, sel query.Selection, t schema.NamedType) {
+	switch sel := sel.(type) {
+	case *query.Field:
+		validateDirectives(c, "FIELD", sel.Directives)
+
+		fieldName := sel.Name.Name
+		var f *schema.Field
+		switch fieldName {
+		case "__typename":
+			f = &schema.Field{
+				Name: "__typename",
+				Type: c.schema.Types["String"],
+			}
+		case "__schema":
+			f = &schema.Field{
+				Name: "__schema",
+				Type: c.schema.Types["__Schema"],
+			}
+		case "__type":
+			f = &schema.Field{
+				Name: "__type",
+				Args: common.InputValueList{
+					&common.InputValue{
+						Name: common.Ident{Name: "name"},
+						Type: &common.NonNull{OfType: c.schema.Types["String"]},
+					},
+				},
+				Type: c.schema.Types["__Type"],
+			}
+		default:
+			f = fields(t).Get(fieldName)
+			if f == nil && t != nil {
+				suggestion := makeSuggestion("Did you mean", fields(t).Names(), fieldName)
+				c.addErr(sel.Alias.Loc, "FieldsOnCorrectType", "Cannot query field %q on type %q.%s", fieldName, t, suggestion)
+			}
+		}
+		c.fieldMap[sel] = fieldInfo{sf: f, parent: t}
+
+		validateArgumentLiterals(c, sel.Arguments)
+		if f != nil {
+			validateArgumentTypes(c, sel.Arguments, f.Args, sel.Alias.Loc,
+				func() string { return fmt.Sprintf("field %q of type %q", fieldName, t) },
+				func() string { return fmt.Sprintf("Field %q", fieldName) },
+			)
+		}
+
+		var ft common.Type
+		if f != nil {
+			ft = f.Type
+			sf := hasSubfields(ft)
+			if sf && sel.Selections == nil {
+				c.addErr(sel.Alias.Loc, "ScalarLeafs", "Field %q of type %q must have a selection of subfields. Did you mean \"%s { ... }\"?", fieldName, ft, fieldName)
+			}
+			if !sf && sel.Selections != nil {
+				c.addErr(sel.SelectionSetLoc, "ScalarLeafs", "Field %q must not have a selection since type %q has no subfields.", fieldName, ft)
+			}
+		}
+		if sel.Selections != nil {
+			validateSelectionSet(c, sel.Selections, unwrapType(ft))
+		}
+
+	case *query.InlineFragment:
+		validateDirectives(c, "INLINE_FRAGMENT", sel.Directives)
+		if sel.On.Name != "" {
+			fragTyp := unwrapType(resolveType(c.context, &sel.On))
+			if fragTyp != nil && !compatible(t, fragTyp) {
+				c.addErr(sel.Loc, "PossibleFragmentSpreads", "Fragment cannot be spread here as objects of type %q can never be of type %q.", t, fragTyp)
+			}
+			t = fragTyp
+			// continue even if t is nil
+		}
+		if t != nil && !canBeFragment(t) {
+			c.addErr(sel.On.Loc, "FragmentsOnCompositeTypes", "Fragment cannot condition on non composite type %q.", t)
+			return
+		}
+		validateSelectionSet(c, sel.Selections, unwrapType(t))
+
+	case *query.FragmentSpread:
+		validateDirectives(c, "FRAGMENT_SPREAD", sel.Directives)
+		frag := c.doc.Fragments.Get(sel.Name.Name)
+		if frag == nil {
+			c.addErr(sel.Name.Loc, "KnownFragmentNames", "Unknown fragment %q.", sel.Name.Name)
+			return
+		}
+		fragTyp := c.schema.Types[frag.On.Name]
+		if !compatible(t, fragTyp) {
+			c.addErr(sel.Loc, "PossibleFragmentSpreads", "Fragment %q cannot be spread here as objects of type %q can never be of type %q.", frag.Name.Name, t, fragTyp)
+		}
+
+	default:
+		panic("unreachable")
+	}
+}
+
+func compatible(a, b common.Type) bool {
+	for _, pta := range possibleTypes(a) {
+		for _, ptb := range possibleTypes(b) {
+			if pta == ptb {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+func possibleTypes(t common.Type) []*schema.Object {
+	switch t := t.(type) {
+	case *schema.Object:
+		return []*schema.Object{t}
+	case *schema.Interface:
+		return t.PossibleTypes
+	case *schema.Union:
+		return t.PossibleTypes
+	default:
+		return nil
+	}
+}
+
+func markUsedFragments(c *context, sels []query.Selection, fragUsed map[*query.FragmentDecl]struct{}) {
+	for _, sel := range sels {
+		switch sel := sel.(type) {
+		case *query.Field:
+			if sel.Selections != nil {
+				markUsedFragments(c, sel.Selections, fragUsed)
+			}
+
+		case *query.InlineFragment:
+			markUsedFragments(c, sel.Selections, fragUsed)
+
+		case *query.FragmentSpread:
+			frag := c.doc.Fragments.Get(sel.Name.Name)
+			if frag == nil {
+				return
+			}
+
+			if _, ok := fragUsed[frag]; ok {
+				return
+			}
+			fragUsed[frag] = struct{}{}
+			markUsedFragments(c, frag.Selections, fragUsed)
+
+		default:
+			panic("unreachable")
+		}
+	}
+}
+
+func detectFragmentCycle(c *context, sels []query.Selection, fragVisited map[*query.FragmentDecl]struct{}, spreadPath []*query.FragmentSpread, spreadPathIndex map[string]int) {
+	for _, sel := range sels {
+		detectFragmentCycleSel(c, sel, fragVisited, spreadPath, spreadPathIndex)
+	}
+}
+
+func detectFragmentCycleSel(c *context, sel query.Selection, fragVisited map[*query.FragmentDecl]struct{}, spreadPath []*query.FragmentSpread, spreadPathIndex map[string]int) {
+	switch sel := sel.(type) {
+	case *query.Field:
+		if sel.Selections != nil {
+			detectFragmentCycle(c, sel.Selections, fragVisited, spreadPath, spreadPathIndex)
+		}
+
+	case *query.InlineFragment:
+		detectFragmentCycle(c, sel.Selections, fragVisited, spreadPath, spreadPathIndex)
+
+	case *query.FragmentSpread:
+		frag := c.doc.Fragments.Get(sel.Name.Name)
+		if frag == nil {
+			return
+		}
+
+		spreadPath = append(spreadPath, sel)
+		if i, ok := spreadPathIndex[frag.Name.Name]; ok {
+			cyclePath := spreadPath[i:]
+			via := ""
+			if len(cyclePath) > 1 {
+				names := make([]string, len(cyclePath)-1)
+				for i, frag := range cyclePath[:len(cyclePath)-1] {
+					names[i] = frag.Name.Name
+				}
+				via = " via " + strings.Join(names, ", ")
+			}
+
+			locs := make([]errors.Location, len(cyclePath))
+			for i, frag := range cyclePath {
+				locs[i] = frag.Loc
+			}
+			c.addErrMultiLoc(locs, "NoFragmentCycles", "Cannot spread fragment %q within itself%s.", frag.Name.Name, via)
+			return
+		}
+
+		if _, ok := fragVisited[frag]; ok {
+			return
+		}
+		fragVisited[frag] = struct{}{}
+
+		spreadPathIndex[frag.Name.Name] = len(spreadPath)
+		detectFragmentCycle(c, frag.Selections, fragVisited, spreadPath, spreadPathIndex)
+		delete(spreadPathIndex, frag.Name.Name)
+
+	default:
+		panic("unreachable")
+	}
+}
+
+func (c *context) validateOverlap(a, b query.Selection, reasons *[]string, locs *[]errors.Location) {
+	if a == b {
+		return
+	}
+
+	if _, ok := c.overlapValidated[selectionPair{a, b}]; ok {
+		return
+	}
+	c.overlapValidated[selectionPair{a, b}] = struct{}{}
+	c.overlapValidated[selectionPair{b, a}] = struct{}{}
+
+	switch a := a.(type) {
+	case *query.Field:
+		switch b := b.(type) {
+		case *query.Field:
+			if b.Alias.Loc.Before(a.Alias.Loc) {
+				a, b = b, a
+			}
+			if reasons2, locs2 := c.validateFieldOverlap(a, b); len(reasons2) != 0 {
+				locs2 = append(locs2, a.Alias.Loc, b.Alias.Loc)
+				if reasons == nil {
+					c.addErrMultiLoc(locs2, "OverlappingFieldsCanBeMerged", "Fields %q conflict because %s. Use different aliases on the fields to fetch both if this was intentional.", a.Alias.Name, strings.Join(reasons2, " and "))
+					return
+				}
+				for _, r := range reasons2 {
+					*reasons = append(*reasons, fmt.Sprintf("subfields %q conflict because %s", a.Alias.Name, r))
+				}
+				*locs = append(*locs, locs2...)
+			}
+
+		case *query.InlineFragment:
+			for _, sel := range b.Selections {
+				c.validateOverlap(a, sel, reasons, locs)
+			}
+
+		case *query.FragmentSpread:
+			if frag := c.doc.Fragments.Get(b.Name.Name); frag != nil {
+				for _, sel := range frag.Selections {
+					c.validateOverlap(a, sel, reasons, locs)
+				}
+			}
+
+		default:
+			panic("unreachable")
+		}
+
+	case *query.InlineFragment:
+		for _, sel := range a.Selections {
+			c.validateOverlap(sel, b, reasons, locs)
+		}
+
+	case *query.FragmentSpread:
+		if frag := c.doc.Fragments.Get(a.Name.Name); frag != nil {
+			for _, sel := range frag.Selections {
+				c.validateOverlap(sel, b, reasons, locs)
+			}
+		}
+
+	default:
+		panic("unreachable")
+	}
+}
+
+func (c *context) validateFieldOverlap(a, b *query.Field) ([]string, []errors.Location) {
+	if a.Alias.Name != b.Alias.Name {
+		return nil, nil
+	}
+
+	if asf := c.fieldMap[a].sf; asf != nil {
+		if bsf := c.fieldMap[b].sf; bsf != nil {
+			if !typesCompatible(asf.Type, bsf.Type) {
+				return []string{fmt.Sprintf("they return conflicting types %s and %s", asf.Type, bsf.Type)}, nil
+			}
+		}
+	}
+
+	at := c.fieldMap[a].parent
+	bt := c.fieldMap[b].parent
+	if at == nil || bt == nil || at == bt {
+		if a.Name.Name != b.Name.Name {
+			return []string{fmt.Sprintf("%s and %s are different fields", a.Name.Name, b.Name.Name)}, nil
+		}
+
+		if argumentsConflict(a.Arguments, b.Arguments) {
+			return []string{"they have differing arguments"}, nil
+		}
+	}
+
+	var reasons []string
+	var locs []errors.Location
+	for _, a2 := range a.Selections {
+		for _, b2 := range b.Selections {
+			c.validateOverlap(a2, b2, &reasons, &locs)
+		}
+	}
+	return reasons, locs
+}
+
+func argumentsConflict(a, b common.ArgumentList) bool {
+	if len(a) != len(b) {
+		return true
+	}
+	for _, argA := range a {
+		valB, ok := b.Get(argA.Name.Name)
+		if !ok || !reflect.DeepEqual(argA.Value.Value(nil), valB.Value(nil)) {
+			return true
+		}
+	}
+	return false
+}
+
+func fields(t common.Type) schema.FieldList {
+	switch t := t.(type) {
+	case *schema.Object:
+		return t.Fields
+	case *schema.Interface:
+		return t.Fields
+	default:
+		return nil
+	}
+}
+
+func unwrapType(t common.Type) schema.NamedType {
+	if t == nil {
+		return nil
+	}
+	for {
+		switch t2 := t.(type) {
+		case schema.NamedType:
+			return t2
+		case *common.List:
+			t = t2.OfType
+		case *common.NonNull:
+			t = t2.OfType
+		default:
+			panic("unreachable")
+		}
+	}
+}
+
+func resolveType(c *context, t common.Type) common.Type {
+	t2, err := common.ResolveType(t, c.schema.Resolve)
+	if err != nil {
+		c.errs = append(c.errs, err)
+	}
+	return t2
+}
+
+func validateDirectives(c *opContext, loc string, directives common.DirectiveList) {
+	directiveNames := make(nameSet)
+	for _, d := range directives {
+		dirName := d.Name.Name
+		validateNameCustomMsg(c.context, directiveNames, d.Name, "UniqueDirectivesPerLocation", func() string {
+			return fmt.Sprintf("The directive %q can only be used once at this location.", dirName)
+		})
+
+		validateArgumentLiterals(c, d.Args)
+
+		dd, ok := c.schema.Directives[dirName]
+		if !ok {
+			c.addErr(d.Name.Loc, "KnownDirectives", "Unknown directive %q.", dirName)
+			continue
+		}
+
+		locOK := false
+		for _, allowedLoc := range dd.Locs {
+			if loc == allowedLoc {
+				locOK = true
+				break
+			}
+		}
+		if !locOK {
+			c.addErr(d.Name.Loc, "KnownDirectives", "Directive %q may not be used on %s.", dirName, loc)
+		}
+
+		validateArgumentTypes(c, d.Args, dd.Args, d.Name.Loc,
+			func() string { return fmt.Sprintf("directive %q", "@"+dirName) },
+			func() string { return fmt.Sprintf("Directive %q", "@"+dirName) },
+		)
+	}
+}
+
+type nameSet map[string]errors.Location
+
+func validateName(c *context, set nameSet, name common.Ident, rule string, kind string) {
+	validateNameCustomMsg(c, set, name, rule, func() string {
+		return fmt.Sprintf("There can be only one %s named %q.", kind, name.Name)
+	})
+}
+
+func validateNameCustomMsg(c *context, set nameSet, name common.Ident, rule string, msg func() string) {
+	if loc, ok := set[name.Name]; ok {
+		c.addErrMultiLoc([]errors.Location{loc, name.Loc}, rule, msg())
+		return
+	}
+	set[name.Name] = name.Loc
+}
+
+func validateArgumentTypes(c *opContext, args common.ArgumentList, argDecls common.InputValueList, loc errors.Location, owner1, owner2 func() string) {
+	for _, selArg := range args {
+		arg := argDecls.Get(selArg.Name.Name)
+		if arg == nil {
+			c.addErr(selArg.Name.Loc, "KnownArgumentNames", "Unknown argument %q on %s.", selArg.Name.Name, owner1())
+			continue
+		}
+		value := selArg.Value
+		if ok, reason := validateValueType(c, value, arg.Type); !ok {
+			c.addErr(value.Location(), "ArgumentsOfCorrectType", "Argument %q has invalid value %s.\n%s", arg.Name.Name, value, reason)
+		}
+	}
+	for _, decl := range argDecls {
+		if _, ok := decl.Type.(*common.NonNull); ok {
+			if _, ok := args.Get(decl.Name.Name); !ok {
+				c.addErr(loc, "ProvidedNonNullArguments", "%s argument %q of type %q is required but not provided.", owner2(), decl.Name.Name, decl.Type)
+			}
+		}
+	}
+}
+
+func validateArgumentLiterals(c *opContext, args common.ArgumentList) {
+	argNames := make(nameSet)
+	for _, arg := range args {
+		validateName(c.context, argNames, arg.Name, "UniqueArgumentNames", "argument")
+		validateLiteral(c, arg.Value)
+	}
+}
+
+func validateLiteral(c *opContext, l common.Literal) {
+	switch l := l.(type) {
+	case *common.ObjectLit:
+		fieldNames := make(nameSet)
+		for _, f := range l.Fields {
+			validateName(c.context, fieldNames, f.Name, "UniqueInputFieldNames", "input field")
+			validateLiteral(c, f.Value)
+		}
+	case *common.ListLit:
+		for _, entry := range l.Entries {
+			validateLiteral(c, entry)
+		}
+	case *common.Variable:
+		for _, op := range c.ops {
+			v := op.Vars.Get(l.Name)
+			if v == nil {
+				byOp := ""
+				if op.Name.Name != "" {
+					byOp = fmt.Sprintf(" by operation %q", op.Name.Name)
+				}
+				c.opErrs[op] = append(c.opErrs[op], &errors.QueryError{
+					Message:   fmt.Sprintf("Variable %q is not defined%s.", "$"+l.Name, byOp),
+					Locations: []errors.Location{l.Loc, op.Loc},
+					Rule:      "NoUndefinedVariables",
+				})
+				continue
+			}
+			c.usedVars[op][v] = struct{}{}
+		}
+	}
+}
+
+func validateValueType(c *opContext, v common.Literal, t common.Type) (bool, string) {
+	if v, ok := v.(*common.Variable); ok {
+		for _, op := range c.ops {
+			if v2 := op.Vars.Get(v.Name); v2 != nil {
+				t2, err := common.ResolveType(v2.Type, c.schema.Resolve)
+				if _, ok := t2.(*common.NonNull); !ok && v2.Default != nil {
+					t2 = &common.NonNull{OfType: t2}
+				}
+				if err == nil && !typeCanBeUsedAs(t2, t) {
+					c.addErrMultiLoc([]errors.Location{v2.Loc, v.Loc}, "VariablesInAllowedPosition", "Variable %q of type %q used in position expecting type %q.", "$"+v.Name, t2, t)
+				}
+			}
+		}
+		return true, ""
+	}
+
+	if nn, ok := t.(*common.NonNull); ok {
+		if isNull(v) {
+			return false, fmt.Sprintf("Expected %q, found null.", t)
+		}
+		t = nn.OfType
+	}
+	if isNull(v) {
+		return true, ""
+	}
+
+	switch t := t.(type) {
+	case *schema.Scalar, *schema.Enum:
+		if lit, ok := v.(*common.BasicLit); ok {
+			if validateBasicLit(lit, t) {
+				return true, ""
+			}
+		}
+
+	case *common.List:
+		list, ok := v.(*common.ListLit)
+		if !ok {
+			return validateValueType(c, v, t.OfType) // single value instead of list
+		}
+		for i, entry := range list.Entries {
+			if ok, reason := validateValueType(c, entry, t.OfType); !ok {
+				return false, fmt.Sprintf("In element #%d: %s", i, reason)
+			}
+		}
+		return true, ""
+
+	case *schema.InputObject:
+		v, ok := v.(*common.ObjectLit)
+		if !ok {
+			return false, fmt.Sprintf("Expected %q, found not an object.", t)
+		}
+		for _, f := range v.Fields {
+			name := f.Name.Name
+			iv := t.Values.Get(name)
+			if iv == nil {
+				return false, fmt.Sprintf("In field %q: Unknown field.", name)
+			}
+			if ok, reason := validateValueType(c, f.Value, iv.Type); !ok {
+				return false, fmt.Sprintf("In field %q: %s", name, reason)
+			}
+		}
+		for _, iv := range t.Values {
+			found := false
+			for _, f := range v.Fields {
+				if f.Name.Name == iv.Name.Name {
+					found = true
+					break
+				}
+			}
+			if !found {
+				if _, ok := iv.Type.(*common.NonNull); ok && iv.Default == nil {
+					return false, fmt.Sprintf("In field %q: Expected %q, found null.", iv.Name.Name, iv.Type)
+				}
+			}
+		}
+		return true, ""
+	}
+
+	return false, fmt.Sprintf("Expected type %q, found %s.", t, v)
+}
+
+func validateBasicLit(v *common.BasicLit, t common.Type) bool {
+	switch t := t.(type) {
+	case *schema.Scalar:
+		switch t.Name {
+		case "Int":
+			if v.Type != scanner.Int {
+				return false
+			}
+			f, err := strconv.ParseFloat(v.Text, 64)
+			if err != nil {
+				panic(err)
+			}
+			return f >= math.MinInt32 && f <= math.MaxInt32
+		case "Float":
+			return v.Type == scanner.Int || v.Type == scanner.Float
+		case "String":
+			return v.Type == scanner.String
+		case "Boolean":
+			return v.Type == scanner.Ident && (v.Text == "true" || v.Text == "false")
+		case "ID":
+			return v.Type == scanner.Int || v.Type == scanner.String
+		default:
+			//TODO: Type-check against expected type by Unmarshalling
+			return true
+		}
+
+	case *schema.Enum:
+		if v.Type != scanner.Ident {
+			return false
+		}
+		for _, option := range t.Values {
+			if option.Name == v.Text {
+				return true
+			}
+		}
+		return false
+	}
+
+	return false
+}
+
+func canBeFragment(t common.Type) bool {
+	switch t.(type) {
+	case *schema.Object, *schema.Interface, *schema.Union:
+		return true
+	default:
+		return false
+	}
+}
+
+func canBeInput(t common.Type) bool {
+	switch t := t.(type) {
+	case *schema.InputObject, *schema.Scalar, *schema.Enum:
+		return true
+	case *common.List:
+		return canBeInput(t.OfType)
+	case *common.NonNull:
+		return canBeInput(t.OfType)
+	default:
+		return false
+	}
+}
+
+func hasSubfields(t common.Type) bool {
+	switch t := t.(type) {
+	case *schema.Object, *schema.Interface, *schema.Union:
+		return true
+	case *common.List:
+		return hasSubfields(t.OfType)
+	case *common.NonNull:
+		return hasSubfields(t.OfType)
+	default:
+		return false
+	}
+}
+
+func isLeaf(t common.Type) bool {
+	switch t.(type) {
+	case *schema.Scalar, *schema.Enum:
+		return true
+	default:
+		return false
+	}
+}
+
+func isNull(lit interface{}) bool {
+	_, ok := lit.(*common.NullLit)
+	return ok
+}
+
+func typesCompatible(a, b common.Type) bool {
+	al, aIsList := a.(*common.List)
+	bl, bIsList := b.(*common.List)
+	if aIsList || bIsList {
+		return aIsList && bIsList && typesCompatible(al.OfType, bl.OfType)
+	}
+
+	ann, aIsNN := a.(*common.NonNull)
+	bnn, bIsNN := b.(*common.NonNull)
+	if aIsNN || bIsNN {
+		return aIsNN && bIsNN && typesCompatible(ann.OfType, bnn.OfType)
+	}
+
+	if isLeaf(a) || isLeaf(b) {
+		return a == b
+	}
+
+	return true
+}
+
+func typeCanBeUsedAs(t, as common.Type) bool {
+	nnT, okT := t.(*common.NonNull)
+	if okT {
+		t = nnT.OfType
+	}
+
+	nnAs, okAs := as.(*common.NonNull)
+	if okAs {
+		as = nnAs.OfType
+		if !okT {
+			return false // nullable can not be used as non-null
+		}
+	}
+
+	if t == as {
+		return true
+	}
+
+	if lT, ok := t.(*common.List); ok {
+		if lAs, ok := as.(*common.List); ok {
+			return typeCanBeUsedAs(lT.OfType, lAs.OfType)
+		}
+	}
+	return false
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/introspection.go b/vendor/github.com/graph-gophers/graphql-go/introspection.go
new file mode 100644
index 000000000..7e515cf25
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/introspection.go
@@ -0,0 +1,117 @@
+package graphql
+
+import (
+	"context"
+	"encoding/json"
+
+	"github.com/graph-gophers/graphql-go/internal/exec/resolvable"
+	"github.com/graph-gophers/graphql-go/introspection"
+)
+
+// Inspect allows inspection of the given schema.
+func (s *Schema) Inspect() *introspection.Schema {
+	return introspection.WrapSchema(s.schema)
+}
+
+// ToJSON encodes the schema in a JSON format used by tools like Relay.
+func (s *Schema) ToJSON() ([]byte, error) {
+	result := s.exec(context.Background(), introspectionQuery, "", nil, &resolvable.Schema{
+		Query:  &resolvable.Object{},
+		Schema: *s.schema,
+	})
+	if len(result.Errors) != 0 {
+		panic(result.Errors[0])
+	}
+	return json.MarshalIndent(result.Data, "", "\t")
+}
+
+var introspectionQuery = `
+  query {
+    __schema {
+      queryType { name }
+      mutationType { name }
+      subscriptionType { name }
+      types {
+        ...FullType
+      }
+      directives {
+        name
+        description
+        locations
+        args {
+          ...InputValue
+        }
+      }
+    }
+  }
+  fragment FullType on __Type {
+    kind
+    name
+    description
+    fields(includeDeprecated: true) {
+      name
+      description
+      args {
+        ...InputValue
+      }
+      type {
+        ...TypeRef
+      }
+      isDeprecated
+      deprecationReason
+    }
+    inputFields {
+      ...InputValue
+    }
+    interfaces {
+      ...TypeRef
+    }
+    enumValues(includeDeprecated: true) {
+      name
+      description
+      isDeprecated
+      deprecationReason
+    }
+    possibleTypes {
+      ...TypeRef
+    }
+  }
+  fragment InputValue on __InputValue {
+    name
+    description
+    type { ...TypeRef }
+    defaultValue
+  }
+  fragment TypeRef on __Type {
+    kind
+    name
+    ofType {
+      kind
+      name
+      ofType {
+        kind
+        name
+        ofType {
+          kind
+          name
+          ofType {
+            kind
+            name
+            ofType {
+              kind
+              name
+              ofType {
+                kind
+                name
+                ofType {
+                  kind
+                  name
+                }
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+`
diff --git a/vendor/github.com/graph-gophers/graphql-go/introspection/introspection.go b/vendor/github.com/graph-gophers/graphql-go/introspection/introspection.go
new file mode 100644
index 000000000..2f4acad0a
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/introspection/introspection.go
@@ -0,0 +1,313 @@
+package introspection
+
+import (
+	"sort"
+
+	"github.com/graph-gophers/graphql-go/internal/common"
+	"github.com/graph-gophers/graphql-go/internal/schema"
+)
+
+type Schema struct {
+	schema *schema.Schema
+}
+
+// WrapSchema is only used internally.
+func WrapSchema(schema *schema.Schema) *Schema {
+	return &Schema{schema}
+}
+
+func (r *Schema) Types() []*Type {
+	var names []string
+	for name := range r.schema.Types {
+		names = append(names, name)
+	}
+	sort.Strings(names)
+
+	l := make([]*Type, len(names))
+	for i, name := range names {
+		l[i] = &Type{r.schema.Types[name]}
+	}
+	return l
+}
+
+func (r *Schema) Directives() []*Directive {
+	var names []string
+	for name := range r.schema.Directives {
+		names = append(names, name)
+	}
+	sort.Strings(names)
+
+	l := make([]*Directive, len(names))
+	for i, name := range names {
+		l[i] = &Directive{r.schema.Directives[name]}
+	}
+	return l
+}
+
+func (r *Schema) QueryType() *Type {
+	t, ok := r.schema.EntryPoints["query"]
+	if !ok {
+		return nil
+	}
+	return &Type{t}
+}
+
+func (r *Schema) MutationType() *Type {
+	t, ok := r.schema.EntryPoints["mutation"]
+	if !ok {
+		return nil
+	}
+	return &Type{t}
+}
+
+func (r *Schema) SubscriptionType() *Type {
+	t, ok := r.schema.EntryPoints["subscription"]
+	if !ok {
+		return nil
+	}
+	return &Type{t}
+}
+
+type Type struct {
+	typ common.Type
+}
+
+// WrapType is only used internally.
+func WrapType(typ common.Type) *Type {
+	return &Type{typ}
+}
+
+func (r *Type) Kind() string {
+	return r.typ.Kind()
+}
+
+func (r *Type) Name() *string {
+	if named, ok := r.typ.(schema.NamedType); ok {
+		name := named.TypeName()
+		return &name
+	}
+	return nil
+}
+
+func (r *Type) Description() *string {
+	if named, ok := r.typ.(schema.NamedType); ok {
+		desc := named.Description()
+		if desc == "" {
+			return nil
+		}
+		return &desc
+	}
+	return nil
+}
+
+func (r *Type) Fields(args *struct{ IncludeDeprecated bool }) *[]*Field {
+	var fields schema.FieldList
+	switch t := r.typ.(type) {
+	case *schema.Object:
+		fields = t.Fields
+	case *schema.Interface:
+		fields = t.Fields
+	default:
+		return nil
+	}
+
+	var l []*Field
+	for _, f := range fields {
+		if d := f.Directives.Get("deprecated"); d == nil || args.IncludeDeprecated {
+			l = append(l, &Field{f})
+		}
+	}
+	return &l
+}
+
+func (r *Type) Interfaces() *[]*Type {
+	t, ok := r.typ.(*schema.Object)
+	if !ok {
+		return nil
+	}
+
+	l := make([]*Type, len(t.Interfaces))
+	for i, intf := range t.Interfaces {
+		l[i] = &Type{intf}
+	}
+	return &l
+}
+
+func (r *Type) PossibleTypes() *[]*Type {
+	var possibleTypes []*schema.Object
+	switch t := r.typ.(type) {
+	case *schema.Interface:
+		possibleTypes = t.PossibleTypes
+	case *schema.Union:
+		possibleTypes = t.PossibleTypes
+	default:
+		return nil
+	}
+
+	l := make([]*Type, len(possibleTypes))
+	for i, intf := range possibleTypes {
+		l[i] = &Type{intf}
+	}
+	return &l
+}
+
+func (r *Type) EnumValues(args *struct{ IncludeDeprecated bool }) *[]*EnumValue {
+	t, ok := r.typ.(*schema.Enum)
+	if !ok {
+		return nil
+	}
+
+	var l []*EnumValue
+	for _, v := range t.Values {
+		if d := v.Directives.Get("deprecated"); d == nil || args.IncludeDeprecated {
+			l = append(l, &EnumValue{v})
+		}
+	}
+	return &l
+}
+
+func (r *Type) InputFields() *[]*InputValue {
+	t, ok := r.typ.(*schema.InputObject)
+	if !ok {
+		return nil
+	}
+
+	l := make([]*InputValue, len(t.Values))
+	for i, v := range t.Values {
+		l[i] = &InputValue{v}
+	}
+	return &l
+}
+
+func (r *Type) OfType() *Type {
+	switch t := r.typ.(type) {
+	case *common.List:
+		return &Type{t.OfType}
+	case *common.NonNull:
+		return &Type{t.OfType}
+	default:
+		return nil
+	}
+}
+
+type Field struct {
+	field *schema.Field
+}
+
+func (r *Field) Name() string {
+	return r.field.Name
+}
+
+func (r *Field) Description() *string {
+	if r.field.Desc == "" {
+		return nil
+	}
+	return &r.field.Desc
+}
+
+func (r *Field) Args() []*InputValue {
+	l := make([]*InputValue, len(r.field.Args))
+	for i, v := range r.field.Args {
+		l[i] = &InputValue{v}
+	}
+	return l
+}
+
+func (r *Field) Type() *Type {
+	return &Type{r.field.Type}
+}
+
+func (r *Field) IsDeprecated() bool {
+	return r.field.Directives.Get("deprecated") != nil
+}
+
+func (r *Field) DeprecationReason() *string {
+	d := r.field.Directives.Get("deprecated")
+	if d == nil {
+		return nil
+	}
+	reason := d.Args.MustGet("reason").Value(nil).(string)
+	return &reason
+}
+
+type InputValue struct {
+	value *common.InputValue
+}
+
+func (r *InputValue) Name() string {
+	return r.value.Name.Name
+}
+
+func (r *InputValue) Description() *string {
+	if r.value.Desc == "" {
+		return nil
+	}
+	return &r.value.Desc
+}
+
+func (r *InputValue) Type() *Type {
+	return &Type{r.value.Type}
+}
+
+func (r *InputValue) DefaultValue() *string {
+	if r.value.Default == nil {
+		return nil
+	}
+	s := r.value.Default.String()
+	return &s
+}
+
+type EnumValue struct {
+	value *schema.EnumValue
+}
+
+func (r *EnumValue) Name() string {
+	return r.value.Name
+}
+
+func (r *EnumValue) Description() *string {
+	if r.value.Desc == "" {
+		return nil
+	}
+	return &r.value.Desc
+}
+
+func (r *EnumValue) IsDeprecated() bool {
+	return r.value.Directives.Get("deprecated") != nil
+}
+
+func (r *EnumValue) DeprecationReason() *string {
+	d := r.value.Directives.Get("deprecated")
+	if d == nil {
+		return nil
+	}
+	reason := d.Args.MustGet("reason").Value(nil).(string)
+	return &reason
+}
+
+type Directive struct {
+	directive *schema.DirectiveDecl
+}
+
+func (r *Directive) Name() string {
+	return r.directive.Name
+}
+
+func (r *Directive) Description() *string {
+	if r.directive.Desc == "" {
+		return nil
+	}
+	return &r.directive.Desc
+}
+
+func (r *Directive) Locations() []string {
+	return r.directive.Locs
+}
+
+func (r *Directive) Args() []*InputValue {
+	l := make([]*InputValue, len(r.directive.Args))
+	for i, v := range r.directive.Args {
+		l[i] = &InputValue{v}
+	}
+	return l
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/log/log.go b/vendor/github.com/graph-gophers/graphql-go/log/log.go
new file mode 100644
index 000000000..25569af7c
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/log/log.go
@@ -0,0 +1,23 @@
+package log
+
+import (
+	"context"
+	"log"
+	"runtime"
+)
+
+// Logger is the interface used to log panics that occur during query execution. It is settable via graphql.ParseSchema
+type Logger interface {
+	LogPanic(ctx context.Context, value interface{})
+}
+
+// DefaultLogger is the default logger used to log panics that occur during query execution
+type DefaultLogger struct{}
+
+// LogPanic is used to log recovered panic values that occur during query execution
+func (l *DefaultLogger) LogPanic(_ context.Context, value interface{}) {
+	const size = 64 << 10
+	buf := make([]byte, size)
+	buf = buf[:runtime.Stack(buf, false)]
+	log.Printf("graphql: panic occurred: %v\n%s", value, buf)
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/relay/relay.go b/vendor/github.com/graph-gophers/graphql-go/relay/relay.go
new file mode 100644
index 000000000..78e4dfdd5
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/relay/relay.go
@@ -0,0 +1,70 @@
+package relay
+
+import (
+	"encoding/base64"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"net/http"
+	"strings"
+
+	graphql "github.com/graph-gophers/graphql-go"
+)
+
+func MarshalID(kind string, spec interface{}) graphql.ID {
+	d, err := json.Marshal(spec)
+	if err != nil {
+		panic(fmt.Errorf("relay.MarshalID: %s", err))
+	}
+	return graphql.ID(base64.URLEncoding.EncodeToString(append([]byte(kind+":"), d...)))
+}
+
+func UnmarshalKind(id graphql.ID) string {
+	s, err := base64.URLEncoding.DecodeString(string(id))
+	if err != nil {
+		return ""
+	}
+	i := strings.IndexByte(string(s), ':')
+	if i == -1 {
+		return ""
+	}
+	return string(s[:i])
+}
+
+func UnmarshalSpec(id graphql.ID, v interface{}) error {
+	s, err := base64.URLEncoding.DecodeString(string(id))
+	if err != nil {
+		return err
+	}
+	i := strings.IndexByte(string(s), ':')
+	if i == -1 {
+		return errors.New("invalid graphql.ID")
+	}
+	return json.Unmarshal([]byte(s[i+1:]), v)
+}
+
+type Handler struct {
+	Schema *graphql.Schema
+}
+
+func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	var params struct {
+		Query         string                 `json:"query"`
+		OperationName string                 `json:"operationName"`
+		Variables     map[string]interface{} `json:"variables"`
+	}
+	if err := json.NewDecoder(r.Body).Decode(&params); err != nil {
+		http.Error(w, err.Error(), http.StatusBadRequest)
+		return
+	}
+
+	response := h.Schema.Exec(r.Context(), params.Query, params.OperationName, params.Variables)
+	responseJSON, err := json.Marshal(response)
+	if err != nil {
+		http.Error(w, err.Error(), http.StatusInternalServerError)
+		return
+	}
+
+	w.Header().Set("Content-Type", "application/json")
+	w.Write(responseJSON)
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/time.go b/vendor/github.com/graph-gophers/graphql-go/time.go
new file mode 100644
index 000000000..829c50227
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/time.go
@@ -0,0 +1,51 @@
+package graphql
+
+import (
+	"encoding/json"
+	"fmt"
+	"time"
+)
+
+// Time is a custom GraphQL type to represent an instant in time. It has to be added to a schema
+// via "scalar Time" since it is not a predeclared GraphQL type like "ID".
+type Time struct {
+	time.Time
+}
+
+// ImplementsGraphQLType maps this custom Go type
+// to the graphql scalar type in the schema.
+func (Time) ImplementsGraphQLType(name string) bool {
+	return name == "Time"
+}
+
+// UnmarshalGraphQL is a custom unmarshaler for Time
+//
+// This function will be called whenever you use the
+// time scalar as an input
+func (t *Time) UnmarshalGraphQL(input interface{}) error {
+	switch input := input.(type) {
+	case time.Time:
+		t.Time = input
+		return nil
+	case string:
+		var err error
+		t.Time, err = time.Parse(time.RFC3339, input)
+		return err
+	case int:
+		t.Time = time.Unix(int64(input), 0)
+		return nil
+	case float64:
+		t.Time = time.Unix(int64(input), 0)
+		return nil
+	default:
+		return fmt.Errorf("wrong type")
+	}
+}
+
+// MarshalJSON is a custom marshaler for Time
+//
+// This function will be called whenever you
+// query for fields that use the Time type
+func (t Time) MarshalJSON() ([]byte, error) {
+	return json.Marshal(t.Time)
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/trace/trace.go b/vendor/github.com/graph-gophers/graphql-go/trace/trace.go
new file mode 100644
index 000000000..68b856ae7
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/trace/trace.go
@@ -0,0 +1,80 @@
+package trace
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/graph-gophers/graphql-go/errors"
+	"github.com/graph-gophers/graphql-go/introspection"
+	opentracing "github.com/opentracing/opentracing-go"
+	"github.com/opentracing/opentracing-go/ext"
+	"github.com/opentracing/opentracing-go/log"
+)
+
+type TraceQueryFinishFunc func([]*errors.QueryError)
+type TraceFieldFinishFunc func(*errors.QueryError)
+
+type Tracer interface {
+	TraceQuery(ctx context.Context, queryString string, operationName string, variables map[string]interface{}, varTypes map[string]*introspection.Type) (context.Context, TraceQueryFinishFunc)
+	TraceField(ctx context.Context, label, typeName, fieldName string, trivial bool, args map[string]interface{}) (context.Context, TraceFieldFinishFunc)
+}
+
+type OpenTracingTracer struct{}
+
+func (OpenTracingTracer) TraceQuery(ctx context.Context, queryString string, operationName string, variables map[string]interface{}, varTypes map[string]*introspection.Type) (context.Context, TraceQueryFinishFunc) {
+	span, spanCtx := opentracing.StartSpanFromContext(ctx, "GraphQL request")
+	span.SetTag("graphql.query", queryString)
+
+	if operationName != "" {
+		span.SetTag("graphql.operationName", operationName)
+	}
+
+	if len(variables) != 0 {
+		span.LogFields(log.Object("graphql.variables", variables))
+	}
+
+	return spanCtx, func(errs []*errors.QueryError) {
+		if len(errs) > 0 {
+			msg := errs[0].Error()
+			if len(errs) > 1 {
+				msg += fmt.Sprintf(" (and %d more errors)", len(errs)-1)
+			}
+			ext.Error.Set(span, true)
+			span.SetTag("graphql.error", msg)
+		}
+		span.Finish()
+	}
+}
+
+func (OpenTracingTracer) TraceField(ctx context.Context, label, typeName, fieldName string, trivial bool, args map[string]interface{}) (context.Context, TraceFieldFinishFunc) {
+	if trivial {
+		return ctx, noop
+	}
+
+	span, spanCtx := opentracing.StartSpanFromContext(ctx, label)
+	span.SetTag("graphql.type", typeName)
+	span.SetTag("graphql.field", fieldName)
+	for name, value := range args {
+		span.SetTag("graphql.args."+name, value)
+	}
+
+	return spanCtx, func(err *errors.QueryError) {
+		if err != nil {
+			ext.Error.Set(span, true)
+			span.SetTag("graphql.error", err.Error())
+		}
+		span.Finish()
+	}
+}
+
+func noop(*errors.QueryError) {}
+
+type NoopTracer struct{}
+
+func (NoopTracer) TraceQuery(ctx context.Context, queryString string, operationName string, variables map[string]interface{}, varTypes map[string]*introspection.Type) (context.Context, TraceQueryFinishFunc) {
+	return ctx, func(errs []*errors.QueryError) {}
+}
+
+func (NoopTracer) TraceField(ctx context.Context, label, typeName, fieldName string, trivial bool, args map[string]interface{}) (context.Context, TraceFieldFinishFunc) {
+	return ctx, func(err *errors.QueryError) {}
+}
diff --git a/vendor/github.com/graph-gophers/graphql-go/trace/validation_trace.go b/vendor/github.com/graph-gophers/graphql-go/trace/validation_trace.go
new file mode 100644
index 000000000..e223f0fdb
--- /dev/null
+++ b/vendor/github.com/graph-gophers/graphql-go/trace/validation_trace.go
@@ -0,0 +1,17 @@
+package trace
+
+import (
+	"github.com/graph-gophers/graphql-go/errors"
+)
+
+type TraceValidationFinishFunc = TraceQueryFinishFunc
+
+type ValidationTracer interface {
+	TraceValidation() TraceValidationFinishFunc
+}
+
+type NoopValidationTracer struct{}
+
+func (NoopValidationTracer) TraceValidation() TraceValidationFinishFunc {
+	return func(errs []*errors.QueryError) {}
+}
-- 
GitLab