diff --git a/lib/gat/handlers/pool/pools/basic/config.go b/lib/gat/handlers/pool/pools/basic/config.go
index 493159d91351e6d98b371aec6e6c327c6de8fcea..16f97cdc62bc8185bec822aa4bd3d51518459297 100644
--- a/lib/gat/handlers/pool/pools/basic/config.go
+++ b/lib/gat/handlers/pool/pools/basic/config.go
@@ -2,11 +2,13 @@ package basic
 
 import (
 	"encoding/json"
+	"time"
 
 	"github.com/caddyserver/caddy/v2"
 	"go.uber.org/zap"
 
 	"gfx.cafe/gfx/pggat/lib/gat/handlers/pool"
+	"gfx.cafe/gfx/pggat/lib/gat/handlers/pool/spool"
 	"gfx.cafe/gfx/pggat/lib/util/strutil"
 )
 
@@ -59,3 +61,16 @@ type Config struct {
 
 	Logger *zap.Logger `json:"-"`
 }
+
+func (T Config) Spool() spool.Config {
+	return spool.Config{
+		PoolerFactory:        T.PoolerFactory,
+		UsePS:                T.ParameterStatusSync == ParameterStatusSyncDynamic,
+		UseEQP:               T.ExtendedQuerySync,
+		ResetQuery:           T.ServerResetQuery,
+		IdleTimeout:          time.Duration(T.ServerIdleTimeout),
+		ReconnectInitialTime: time.Duration(T.ServerReconnectInitialTime),
+		ReconnectMaxTime:     time.Duration(T.ServerReconnectMaxTime),
+		Logger:               T.Logger,
+	}
+}
diff --git a/lib/gat/handlers/pool/pools/basic/pool.go b/lib/gat/handlers/pool/pools/basic/pool.go
index fde1ae0d43280d052e2066f16277897aba5457cb..d47d5262b765146b35fc7f4336a65e5b3c1bef46 100644
--- a/lib/gat/handlers/pool/pools/basic/pool.go
+++ b/lib/gat/handlers/pool/pools/basic/pool.go
@@ -3,27 +3,31 @@ package basic
 import (
 	"gfx.cafe/gfx/pggat/lib/fed"
 	"gfx.cafe/gfx/pggat/lib/gat/handlers/pool"
+	"gfx.cafe/gfx/pggat/lib/gat/handlers/pool/spool"
 	"gfx.cafe/gfx/pggat/lib/gat/metrics"
 )
 
 type Pool struct {
 	config Config
+
+	servers spool.Pool
 }
 
 func NewPool(config Config) *Pool {
-	return &Pool{
-		config: config,
+	p := &Pool{
+		config:  config,
+		servers: spool.MakePool(config.Spool()),
 	}
+	go p.servers.ScaleLoop()
+	return p
 }
 
 func (T *Pool) AddRecipe(name string, recipe *pool.Recipe) {
-	// TODO implement me
-	panic("implement me")
+	T.servers.AddRecipe(name, recipe)
 }
 
 func (T *Pool) RemoveRecipe(name string) {
-	// TODO implement me
-	panic("implement me")
+	T.servers.RemoveRecipe(name)
 }
 
 func (T *Pool) Serve(conn *fed.Conn) error {
@@ -37,13 +41,13 @@ func (T *Pool) Cancel(key fed.BackendKey) {
 }
 
 func (T *Pool) ReadMetrics(m *metrics.Pool) {
-	// TODO implement me
-	panic("implement me")
+	T.servers.ReadMetrics(m)
+
+	// TODO(garet) read client metrics
 }
 
 func (T *Pool) Close() {
-	// TODO implement me
-	panic("implement me")
+	T.servers.Close()
 }
 
 var _ pool.Pool = (*Pool)(nil)
diff --git a/lib/gat/handlers/pool/spool/config.go b/lib/gat/handlers/pool/spool/config.go
new file mode 100644
index 0000000000000000000000000000000000000000..cc29f91c286a19bd1834a8c62bb16ba03dd740c3
--- /dev/null
+++ b/lib/gat/handlers/pool/spool/config.go
@@ -0,0 +1,27 @@
+package spool
+
+import (
+	"time"
+
+	"go.uber.org/zap"
+
+	"gfx.cafe/gfx/pggat/lib/gat/handlers/pool"
+)
+
+type Config struct {
+	PoolerFactory pool.PoolerFactory
+
+	// UsePS controls whether to add the ps middleware to servers
+	UsePS bool
+	// UseEQP controls whether to add the eqp middleware to servers
+	UseEQP bool
+
+	ResetQuery string
+
+	IdleTimeout time.Duration
+
+	ReconnectInitialTime time.Duration
+	ReconnectMaxTime     time.Duration
+
+	Logger *zap.Logger
+}
diff --git a/lib/gat/handlers/pool/spool/pool.go b/lib/gat/handlers/pool/spool/pool.go
new file mode 100644
index 0000000000000000000000000000000000000000..71ccf96b6f2b91ff92365e52dbea0dbc0b32c045
--- /dev/null
+++ b/lib/gat/handlers/pool/spool/pool.go
@@ -0,0 +1,364 @@
+package spool
+
+import (
+	"sort"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/google/uuid"
+	"go.uber.org/zap"
+
+	"gfx.cafe/gfx/pggat/lib/bouncer/backends/v0"
+	"gfx.cafe/gfx/pggat/lib/fed/middlewares/eqp"
+	"gfx.cafe/gfx/pggat/lib/fed/middlewares/ps"
+	"gfx.cafe/gfx/pggat/lib/gat/handlers/pool"
+	"gfx.cafe/gfx/pggat/lib/gat/metrics"
+	"gfx.cafe/gfx/pggat/lib/util/slices"
+)
+
+type Pool struct {
+	config Config
+	pooler pool.Pooler
+
+	closed chan struct{}
+
+	pendingCount atomic.Int64
+	pending      chan struct{}
+
+	recipes          map[string]*Recipe
+	recipeScaleOrder []*Recipe
+	servers          map[uuid.UUID]*Server
+	mu               sync.RWMutex
+}
+
+// MakePool will create a new pool with config. ScaleLoop must be called if this is used instead of NewPool
+func MakePool(config Config) Pool {
+	pooler := config.PoolerFactory.NewPooler()
+	return Pool{
+		config: config,
+		pooler: pooler,
+
+		closed: make(chan struct{}),
+
+		pending: make(chan struct{}, 1),
+	}
+}
+
+func NewPool(config Config) *Pool {
+	p := MakePool(config)
+	go p.ScaleLoop()
+	return &p
+}
+
+func (T *Pool) removeServer(server *Server, deleteFromRecipe, freeFromRecipe bool) {
+	delete(T.servers, server.ID)
+
+	r := server.recipe
+	if deleteFromRecipe {
+		r.Servers = slices.Delete(r.Servers, server)
+	}
+	if freeFromRecipe {
+		r.Recipe.Free()
+	}
+}
+
+func (T *Pool) addRecipe(name string, recipe *pool.Recipe) *Recipe {
+	r := NewRecipe(name, recipe)
+
+	if T.recipes == nil {
+		T.recipes = make(map[string]*Recipe)
+	}
+	T.recipes[name] = r
+	T.recipeScaleOrder = append(T.recipeScaleOrder, r)
+	sort.Slice(T.recipeScaleOrder, func(i, j int) bool {
+		return len(T.recipeScaleOrder[i].Servers) < len(T.recipeScaleOrder[j].Servers)
+	})
+
+	return r
+}
+
+func (T *Pool) removeRecipe(name string) {
+	r, ok := T.recipes[name]
+	if !ok {
+		return
+	}
+	delete(T.recipes, name)
+	T.recipeScaleOrder = slices.Delete(T.recipeScaleOrder, r)
+
+	for _, server := range r.Servers {
+		T.removeServer(server, false, true)
+	}
+}
+
+func (T *Pool) AddRecipe(name string, recipe *pool.Recipe) {
+	r := func() *Recipe {
+		T.mu.Lock()
+		defer T.mu.Unlock()
+
+		T.removeRecipe(name)
+		return T.addRecipe(name, recipe)
+	}()
+
+	c := r.Recipe.AllocateInitial()
+	for i := 0; i < c; i++ {
+		T.ScaleUpOnce(r)
+	}
+}
+
+func (T *Pool) RemoveRecipe(name string) {
+	T.mu.Lock()
+	defer T.mu.Unlock()
+
+	T.removeRecipe(name)
+}
+
+func (T *Pool) scaleUpL0() *Recipe {
+	for _, recipe := range T.recipeScaleOrder {
+		if !recipe.Recipe.Allocate() {
+			continue
+		}
+		return recipe
+	}
+	return nil
+}
+
+func (T *Pool) ScaleUpOnce(recipe *Recipe) bool {
+	conn, err := recipe.Recipe.Dial()
+	if err != nil {
+		T.config.Logger.Error("failed to dial server", zap.Error(err))
+		recipe.Recipe.Free()
+		return false
+	}
+
+	if T.config.UsePS {
+		conn.Middleware = append(
+			conn.Middleware,
+			ps.NewServer(conn.InitialParameters),
+		)
+	}
+
+	if T.config.UseEQP {
+		conn.Middleware = append(
+			conn.Middleware,
+			eqp.NewServer(),
+		)
+	}
+
+	server := NewServer(recipe, conn)
+
+	T.mu.Lock()
+	defer T.mu.Unlock()
+	recipe.Servers = append(recipe.Servers, server)
+	if T.servers == nil {
+		T.servers = make(map[uuid.UUID]*Server)
+	}
+	T.servers[server.ID] = server
+	sort.Slice(T.recipeScaleOrder, func(i, j int) bool {
+		return len(T.recipeScaleOrder[i].Servers) < len(T.recipeScaleOrder[j].Servers)
+	})
+
+	return true
+}
+
+func (T *Pool) ScaleUp() bool {
+	r := func() *Recipe {
+		T.mu.RLock()
+		defer T.mu.RUnlock()
+
+		return T.scaleUpL0()
+	}()
+
+	if r == nil {
+		T.config.Logger.Warn("no available recipes to scale up pool")
+		return false
+	}
+	return T.ScaleUpOnce(r)
+}
+
+func (T *Pool) ScaleDown(now time.Time) time.Duration {
+	T.mu.RLock()
+	defer T.mu.RUnlock()
+
+	var m time.Duration
+
+	for _, s := range T.servers {
+		since, state, _ := s.GetState()
+
+		if state != metrics.ConnStateIdle {
+			continue
+		}
+
+		idle := now.Sub(since)
+		if idle > T.config.IdleTimeout {
+			// delete
+			if s.recipe.Recipe.TryFree() {
+				T.removeServer(s, true, false)
+			}
+		} else if idle > m {
+			m = idle
+		}
+	}
+
+	return T.config.IdleTimeout - m
+}
+
+func (T *Pool) ScaleLoop() {
+	var idle *time.Timer
+	defer func() {
+		if idle != nil {
+			idle.Stop()
+		}
+	}()
+	var idleC <-chan time.Time
+	if T.config.IdleTimeout != 0 {
+		idle = time.NewTimer(T.config.IdleTimeout)
+		idleC = idle.C
+	}
+
+	var backoff *time.Timer
+	defer func() {
+		if backoff != nil {
+			backoff.Stop()
+		}
+	}()
+	var backoffC <-chan time.Time
+	var backoffNext time.Duration
+
+	for {
+		var pending <-chan struct{}
+		if backoffNext == 0 {
+			pending = T.pending
+		}
+
+		select {
+		case <-T.closed:
+			return
+		case <-backoffC:
+			// scale up
+			if T.ScaleUp() {
+				backoffNext = 0
+				continue
+			}
+
+			backoffNext *= 2
+			if T.config.ReconnectMaxTime != 0 && backoffNext > T.config.ReconnectMaxTime {
+				backoffNext = T.config.ReconnectMaxTime
+			}
+			backoff.Reset(backoffNext)
+		case <-pending:
+			// scale up
+			ok := true
+			for T.pendingCount.Load() > 0 {
+				if !T.ScaleUp() {
+					ok = false
+					break
+				}
+			}
+			if ok {
+				continue
+			}
+
+			// backoff
+			backoffNext = T.config.ReconnectInitialTime
+			if backoffNext != 0 {
+				if backoff == nil {
+					backoff = time.NewTimer(backoffNext)
+					backoffC = backoff.C
+				} else {
+					backoff.Reset(backoffNext)
+				}
+			}
+		case now := <-idleC:
+			// scale down
+			idle.Reset(T.ScaleDown(now))
+		}
+	}
+}
+
+func (T *Pool) AddClient(client uuid.UUID) {
+	T.pooler.AddClient(client)
+}
+
+func (T *Pool) RemoveClient(client uuid.UUID) {
+	T.pooler.DeleteClient(client)
+}
+
+func (T *Pool) Acquire(client uuid.UUID) *Server {
+	for {
+		serverID := T.pooler.Acquire(client, pool.SyncModeNonBlocking)
+		if serverID == uuid.Nil {
+			T.pendingCount.Add(1)
+			select {
+			case T.pending <- struct{}{}:
+			default:
+			}
+
+			serverID = T.pooler.Acquire(client, pool.SyncModeBlocking)
+
+			T.pendingCount.Add(-1)
+
+			if serverID == uuid.Nil {
+				return nil
+			}
+		}
+
+		T.mu.RLock()
+		c, ok := T.servers[serverID]
+		T.mu.RUnlock()
+
+		if !ok {
+			T.pooler.DeleteServer(serverID)
+			continue
+		}
+
+		return c
+	}
+}
+
+func (T *Pool) Release(server *Server) {
+	if T.config.ResetQuery != "" {
+		server.SetState(metrics.ConnStateRunningResetQuery, uuid.Nil)
+
+		if err, _ := backends.QueryString(server.Conn, nil, T.config.ResetQuery); err != nil {
+			T.config.Logger.Error("failed to run reset query", zap.Error(err))
+			T.RemoveServer(server)
+			return
+		}
+	}
+
+	T.pooler.Release(server.ID)
+
+	server.SetState(metrics.ConnStateIdle, uuid.Nil)
+}
+
+func (T *Pool) RemoveServer(server *Server) {
+	T.mu.Lock()
+	defer T.mu.Unlock()
+
+	T.removeServer(server, true, true)
+}
+
+func (T *Pool) Cancel(server *Server) {
+	server.recipe.Recipe.Cancel(server.Conn.BackendKey)
+}
+
+func (T *Pool) ReadMetrics(m *metrics.Pool) {
+	T.mu.RLock()
+	defer T.mu.RUnlock()
+
+	if m.Servers == nil {
+		m.Servers = make(map[uuid.UUID]metrics.Conn)
+	}
+	for _, server := range T.servers {
+		var s metrics.Conn
+		server.ReadMetrics(&s)
+		m.Servers[server.ID] = s
+	}
+}
+
+func (T *Pool) Close() {
+	close(T.closed)
+
+	T.pooler.Close()
+}
diff --git a/lib/gat/handlers/pool/spool/recipe.go b/lib/gat/handlers/pool/spool/recipe.go
new file mode 100644
index 0000000000000000000000000000000000000000..b0e30a178a90e757112e8b2d385d9b304786bbf5
--- /dev/null
+++ b/lib/gat/handlers/pool/spool/recipe.go
@@ -0,0 +1,16 @@
+package spool
+
+import "gfx.cafe/gfx/pggat/lib/gat/handlers/pool"
+
+type Recipe struct {
+	Name    string
+	Recipe  *pool.Recipe
+	Servers []*Server
+}
+
+func NewRecipe(name string, recipe *pool.Recipe) *Recipe {
+	return &Recipe{
+		Name:   name,
+		Recipe: recipe,
+	}
+}
diff --git a/lib/gat/handlers/pool/spool/server.go b/lib/gat/handlers/pool/spool/server.go
new file mode 100644
index 0000000000000000000000000000000000000000..367f09a799b43dd7bb899dc464c11b785e253f06
--- /dev/null
+++ b/lib/gat/handlers/pool/spool/server.go
@@ -0,0 +1,96 @@
+package spool
+
+import (
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/google/uuid"
+
+	"gfx.cafe/gfx/pggat/lib/fed"
+	"gfx.cafe/gfx/pggat/lib/gat/metrics"
+)
+
+type Server struct {
+	ID   uuid.UUID
+	Conn *fed.Conn
+
+	recipe *Recipe
+
+	txnCount atomic.Int64
+
+	lastMetricsRead time.Time
+	state           metrics.ConnState
+	peer            uuid.UUID
+	since           time.Time
+	util            [metrics.ConnStateCount]time.Duration
+	mu              sync.Mutex
+}
+
+func NewServer(recipe *Recipe, conn *fed.Conn) *Server {
+	return &Server{
+		ID:     uuid.New(),
+		recipe: recipe,
+		Conn:   conn,
+
+		state: metrics.ConnStateIdle,
+		since: time.Now(),
+	}
+}
+
+func (T *Server) SetState(state metrics.ConnState, peer uuid.UUID) {
+	T.mu.Lock()
+	defer T.mu.Unlock()
+
+	now := time.Now()
+
+	var since time.Duration
+	if T.since.Before(T.lastMetricsRead) {
+		since = now.Sub(T.lastMetricsRead)
+	} else {
+		since = now.Sub(T.since)
+	}
+	T.util[T.state] += since
+
+	T.state = state
+	T.peer = peer
+	T.since = now
+}
+
+func (T *Server) GetState() (time.Time, metrics.ConnState, uuid.UUID) {
+	T.mu.Lock()
+	defer T.mu.Unlock()
+	return T.since, T.state, T.peer
+}
+
+func (T *Server) TransactionComplete() {
+	T.txnCount.Add(1)
+}
+
+func (T *Server) ReadMetrics(m *metrics.Conn) {
+	T.mu.Lock()
+	defer T.mu.Unlock()
+
+	now := time.Now()
+
+	m.Time = now
+
+	m.State = T.state
+	m.Peer = T.peer
+	m.Since = T.since
+
+	m.Utilization = T.util
+	T.util = [metrics.ConnStateCount]time.Duration{}
+
+	var since time.Duration
+	if m.Since.Before(T.lastMetricsRead) {
+		since = now.Sub(T.lastMetricsRead)
+	} else {
+		since = now.Sub(m.Since)
+	}
+	m.Utilization[m.State] += since
+
+	m.TransactionCount = int(T.txnCount.Swap(0))
+
+	T.lastMetricsRead = now
+}
diff --git a/lib/util/slices/sorted.go b/lib/util/slices/sorted.go
deleted file mode 100644
index 2c8a4eb87414d89701e7aa517d0cd210d0336672..0000000000000000000000000000000000000000
--- a/lib/util/slices/sorted.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package slices
-
-// Sorted is a sorted slice. As long as all items are inserted by Insert, updated by Update, and removed by Delete,
-// this slice will stay sorted
-type Sorted[V any] []V
-
-func (T Sorted[V]) Insert(value V, sorter func(V) int) Sorted[V] {
-	key := sorter(value)
-	for i, v := range T {
-		if sorter(v) < key {
-			continue
-		}
-
-		res := append(T, *new(V))
-		copy(res[i+1:], res[i:])
-		res[i] = value
-		return res
-	}
-
-	return append(T, value)
-}
-
-func (T Sorted[V]) Update(index int, sorter func(V) int) {
-	value := T[index]
-	key := sorter(value)
-
-	for i, v := range T {
-		switch {
-		case i < index:
-			if sorter(v) < key {
-				continue
-			}
-
-			// move all up by one, move from index to i
-			copy(T[i+1:], T[i:index])
-			T[i] = value
-			return
-		case i > index:
-			if sorter(v) < key {
-				continue
-			}
-
-			// move all down by one, move from index to i
-			copy(T[index:], T[index+1:i])
-			T[i-1] = value
-			return
-		default:
-			continue
-		}
-	}
-
-	// move all down by one, move from index to i
-	copy(T[index:], T[index+1:])
-	T[len(T)-1] = value
-}
diff --git a/lib/util/slices/sorted_test.go b/lib/util/slices/sorted_test.go
deleted file mode 100644
index bb8dd923b163bc59e0b42eb5ae9e271900a793ec..0000000000000000000000000000000000000000
--- a/lib/util/slices/sorted_test.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package slices
-
-import (
-	"log"
-	"sort"
-	"testing"
-)
-
-func TestSorted_Insert(t *testing.T) {
-	sorter := func(v string) int {
-		return len(v)
-	}
-
-	expected := []string{
-		"test",
-		"abc",
-		"this is a long string",
-		"gjkdfjgksg",
-		"retre",
-		"abd",
-		"def",
-		"ttierotiretiiret34t43t34534",
-	}
-
-	var x Sorted[string]
-	for _, v := range expected {
-		x = x.Insert(v, sorter)
-	}
-
-	if !sort.SliceIsSorted(x, func(i, j int) bool {
-		return sorter(x[i]) < sorter(x[j])
-	}) {
-		t.Errorf("slice isn't sorted: %#v", x)
-	}
-}
-
-func TestSorted_Update(t *testing.T) {
-	values := map[string]int{
-		"abc":               43,
-		"def":               32,
-		"cool":              594390069,
-		"amazing":           -432,
-		"i hope this works": 32,
-	}
-
-	sorter := func(v string) int {
-		return values[v]
-	}
-
-	var x Sorted[string]
-	for v := range values {
-		x = x.Insert(v, sorter)
-	}
-
-	if !sort.SliceIsSorted(x, func(i, j int) bool {
-		return sorter(x[i]) < sorter(x[j])
-	}) {
-		t.Errorf("slice isn't sorted: %#v", x)
-	}
-
-	log.Printf("%#v", x)
-
-	values["cool"] = -10
-	x.Update(Index(x, "cool"), sorter)
-	values["amazing"] = 543543
-	x.Update(Index(x, "amazing"), sorter)
-	x.Update(Index(x, "abc"), sorter)
-	values["i hope this works"] = 44
-	x.Update(Index(x, "i hope this works"), sorter)
-	values["abc"] = 31
-	x.Update(Index(x, "abc"), sorter)
-
-	if !sort.SliceIsSorted(x, func(i, j int) bool {
-		return sorter(x[i]) < sorter(x[j])
-	}) {
-		t.Errorf("slice isn't sorted: %#v", x)
-	}
-}