diff --git a/lib/gat/handlers/pool/poolers/rob/pooler.go b/lib/gat/handlers/pool/poolers/rob/pooler.go
index 7bb10a3457bc2ff98a0dc943fd6f41c3884ff7bc..fcb3fdbca8a108347aedf7e9bdff38fcd92fe940 100644
--- a/lib/gat/handlers/pool/poolers/rob/pooler.go
+++ b/lib/gat/handlers/pool/poolers/rob/pooler.go
@@ -5,7 +5,7 @@ import (
 
 	"gfx.cafe/gfx/pggat/lib/gat/handlers/pool"
 	"gfx.cafe/gfx/pggat/lib/rob"
-	"gfx.cafe/gfx/pggat/lib/rob/schedulers/v2"
+	"gfx.cafe/gfx/pggat/lib/rob/schedulers/v3"
 )
 
 type Pooler struct {
diff --git a/lib/rob/schedulers/v2/job/concurrent.go b/lib/rob/schedulers/v2/job/concurrent.go
deleted file mode 100644
index f868fe595dc83bfe42892ace1b53e026af6463c2..0000000000000000000000000000000000000000
--- a/lib/rob/schedulers/v2/job/concurrent.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package job
-
-import (
-	"github.com/google/uuid"
-)
-
-type Concurrent struct {
-	User uuid.UUID
-}
diff --git a/lib/rob/schedulers/v2/job/stalled.go b/lib/rob/schedulers/v2/job/stalled.go
deleted file mode 100644
index 202f974c7af219192d108c173922dc50368ba8dd..0000000000000000000000000000000000000000
--- a/lib/rob/schedulers/v2/job/stalled.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package job
-
-import (
-	"github.com/google/uuid"
-)
-
-type Stalled struct {
-	Concurrent
-	Ready chan<- uuid.UUID
-}
diff --git a/lib/rob/schedulers/v2/scheduler.go b/lib/rob/schedulers/v2/scheduler.go
deleted file mode 100644
index 9b8d593e8b9272496bb12c29d63a62a4752332bb..0000000000000000000000000000000000000000
--- a/lib/rob/schedulers/v2/scheduler.go
+++ /dev/null
@@ -1,265 +0,0 @@
-package schedulers
-
-import (
-	"sync"
-
-	"github.com/google/uuid"
-
-	"gfx.cafe/gfx/pggat/lib/rob"
-	"gfx.cafe/gfx/pggat/lib/rob/schedulers/v2/job"
-	"gfx.cafe/gfx/pggat/lib/rob/schedulers/v2/sink"
-	"gfx.cafe/gfx/pggat/lib/util/pools"
-)
-
-type Scheduler struct {
-	// resource pools
-	ready pools.Locked[chan uuid.UUID]
-
-	// backlog is the list of user
-	affinity map[uuid.UUID]uuid.UUID
-	amu      sync.RWMutex
-	backlog  []job.Stalled
-	bmu      sync.Mutex
-	sinks    map[uuid.UUID]*sink.Sink
-	closed   bool
-	mu       sync.RWMutex
-}
-
-func (T *Scheduler) AddWorker(worker uuid.UUID) {
-	s := sink.NewSink(worker)
-
-	T.mu.Lock()
-	defer T.mu.Unlock()
-	// if mu is locked, we don't need to lock bmu, because we are the only accessor
-	if T.sinks == nil {
-		T.sinks = make(map[uuid.UUID]*sink.Sink)
-	}
-	T.sinks[worker] = s
-
-	if len(T.backlog) > 0 {
-		s.Enqueue(T.backlog...)
-		T.backlog = T.backlog[:0]
-		return
-	}
-
-	T.stealFor(worker)
-	return
-}
-
-func (T *Scheduler) DeleteWorker(worker uuid.UUID) {
-	T.mu.Lock()
-	defer T.mu.Unlock()
-	s, ok := T.sinks[worker]
-	delete(T.sinks, worker)
-	if !ok {
-		return
-	}
-
-	// now we need to reschedule all the work that was scheduled to s (stalled only).
-	jobs := s.StealAll()
-
-	for _, j := range jobs {
-		if id := T.tryAcquire(j.Concurrent); id != uuid.Nil {
-			j.Ready <- id
-			continue
-		}
-		T.enqueue(j)
-	}
-}
-
-func (*Scheduler) AddUser(_ uuid.UUID) {}
-
-func (T *Scheduler) DeleteUser(user uuid.UUID) {
-	T.mu.RLock()
-	defer T.mu.RUnlock()
-
-	T.amu.Lock()
-	delete(T.affinity, user)
-	T.amu.Unlock()
-
-	for _, v := range T.sinks {
-		v.RemoveUser(user)
-	}
-}
-
-func (T *Scheduler) tryAcquire(j job.Concurrent) uuid.UUID {
-	T.amu.RLock()
-	affinity := T.affinity[j.User]
-	T.amu.RUnlock()
-
-	// try affinity first
-	if v, ok := T.sinks[affinity]; ok {
-		if v.Acquire(j) {
-			return affinity
-		}
-	}
-
-	for id, v := range T.sinks {
-		if v.Acquire(j) {
-			// set affinity
-			T.amu.Lock()
-			if T.affinity == nil {
-				T.affinity = make(map[uuid.UUID]uuid.UUID)
-			}
-			T.affinity[j.User] = id
-			T.amu.Unlock()
-			return id
-		}
-	}
-
-	return uuid.Nil
-}
-
-func (T *Scheduler) TryAcquire(j job.Concurrent) uuid.UUID {
-	T.mu.RLock()
-	defer T.mu.RUnlock()
-
-	if T.closed {
-		return uuid.Nil
-	}
-
-	return T.tryAcquire(j)
-}
-
-func (T *Scheduler) enqueue(j job.Stalled) {
-	T.amu.RLock()
-	affinity := T.affinity[j.User]
-	T.amu.RUnlock()
-
-	// try affinity first
-	if v, ok := T.sinks[affinity]; ok {
-		v.Enqueue(j)
-		return
-	}
-
-	for id, v := range T.sinks {
-		v.Enqueue(j)
-		T.amu.Lock()
-		if T.affinity == nil {
-			T.affinity = make(map[uuid.UUID]uuid.UUID)
-		}
-		T.affinity[j.User] = id
-		T.amu.Unlock()
-		return
-	}
-
-	// add to backlog
-	T.bmu.Lock()
-	defer T.bmu.Unlock()
-	T.backlog = append(T.backlog, j)
-}
-
-func (T *Scheduler) Enqueue(j ...job.Stalled) {
-	T.mu.RLock()
-	defer T.mu.RUnlock()
-
-	if T.closed {
-		for _, jj := range j {
-			close(jj.Ready)
-		}
-		return
-	}
-
-	for _, jj := range j {
-		T.enqueue(jj)
-	}
-}
-
-func (T *Scheduler) Acquire(user uuid.UUID, mode rob.SyncMode) uuid.UUID {
-	switch mode {
-	case rob.SyncModeNonBlocking:
-		return T.TryAcquire(job.Concurrent{
-			User: user,
-		})
-	case rob.SyncModeBlocking:
-		ready, ok := T.ready.Get()
-		if !ok {
-			ready = make(chan uuid.UUID, 1)
-		}
-
-		j := job.Stalled{
-			Concurrent: job.Concurrent{
-				User: user,
-			},
-			Ready: ready,
-		}
-		T.Enqueue(j)
-
-		s, ok := <-ready
-		if ok {
-			T.ready.Put(ready)
-		}
-		return s
-	case rob.SyncModeTryNonBlocking:
-		if id := T.Acquire(user, rob.SyncModeNonBlocking); id != uuid.Nil {
-			return id
-		}
-		return T.Acquire(user, rob.SyncModeBlocking)
-	default:
-		return uuid.Nil
-	}
-}
-
-func (T *Scheduler) Release(worker uuid.UUID) {
-	T.mu.RLock()
-	defer T.mu.RUnlock()
-
-	s, ok := T.sinks[worker]
-	if !ok {
-		return
-	}
-	hasMore := s.Release()
-	if !hasMore {
-		// try to steal
-		T.stealFor(worker)
-	}
-}
-
-// stealFor will try to steal work for the specified worker. Lock Scheduler.mu before executing
-func (T *Scheduler) stealFor(worker uuid.UUID) {
-	s, ok := T.sinks[worker]
-	if !ok {
-		return
-	}
-
-	for _, v := range T.sinks {
-		if v == s {
-			continue
-		}
-
-		if src := v.StealFor(s); src != uuid.Nil {
-			T.amu.Lock()
-			if T.affinity == nil {
-				T.affinity = make(map[uuid.UUID]uuid.UUID)
-			}
-			T.affinity[src] = worker
-			T.amu.Unlock()
-			return
-		}
-	}
-}
-
-func (T *Scheduler) Close() {
-	T.mu.Lock()
-	defer T.mu.Unlock()
-
-	T.closed = true
-
-	for worker, s := range T.sinks {
-		delete(T.sinks, worker)
-
-		// now we need to reschedule all the work that was scheduled to s (stalled only).
-		jobs := s.StealAll()
-
-		for _, j := range jobs {
-			close(j.Ready)
-		}
-	}
-
-	for _, j := range T.backlog {
-		close(j.Ready)
-	}
-	T.backlog = T.backlog[:0]
-}
-
-var _ rob.Scheduler = (*Scheduler)(nil)
diff --git a/lib/rob/schedulers/v2/sink/sink.go b/lib/rob/schedulers/v2/sink/sink.go
deleted file mode 100644
index d2b23af20c93dc7d26d6864c015976b48f318739..0000000000000000000000000000000000000000
--- a/lib/rob/schedulers/v2/sink/sink.go
+++ /dev/null
@@ -1,259 +0,0 @@
-package sink
-
-import (
-	"github.com/google/uuid"
-	"sync"
-	"time"
-
-	"gfx.cafe/gfx/pggat/lib/rob/schedulers/v2/job"
-	"gfx.cafe/gfx/pggat/lib/util/rbtree"
-	"gfx.cafe/gfx/pggat/lib/util/ring"
-)
-
-type Sink struct {
-	id uuid.UUID
-
-	// non final
-	active uuid.UUID
-	start  time.Time
-
-	floor     time.Duration
-	stride    map[uuid.UUID]time.Duration
-	pending   map[uuid.UUID]*ring.Ring[job.Stalled]
-	scheduled rbtree.RBTree[time.Duration, job.Stalled]
-
-	mu sync.Mutex
-}
-
-func NewSink(id uuid.UUID) *Sink {
-	return &Sink{
-		id: id,
-	}
-}
-
-func (T *Sink) schedule(j job.Stalled) bool {
-	if T.active == j.User {
-		return false
-	}
-
-	stride, ok := T.stride[j.User]
-	if !ok {
-		// set to max
-		stride = T.floor
-		if s, _, ok := T.scheduled.Max(); ok {
-			stride = s + 1
-		}
-		if T.stride == nil {
-			T.stride = make(map[uuid.UUID]time.Duration)
-		}
-		T.stride[j.User] = stride
-	} else if stride < T.floor {
-		stride = T.floor
-		T.stride[j.User] = stride
-	}
-
-	for {
-		// find unique stride to schedule on
-		s, ok := T.scheduled.Get(stride)
-		if !ok {
-			break
-		}
-
-		if s.User == j.User {
-			return false
-		}
-		stride += 1
-	}
-
-	T.scheduled.Set(stride, j)
-	return true
-}
-
-func (T *Sink) enqueue(j job.Stalled) {
-	if T.active == uuid.Nil {
-		// run it now
-		T.acquire(j.User)
-		j.Ready <- T.id
-		return
-	}
-
-	if T.schedule(j) {
-		return
-	}
-
-	p, ok := T.pending[j.User]
-
-	// add to pending queue
-	if !ok {
-		p = ring.NewRing[job.Stalled](0, 1)
-		if T.pending == nil {
-			T.pending = make(map[uuid.UUID]*ring.Ring[job.Stalled])
-		}
-		T.pending[j.User] = p
-	}
-
-	p.PushBack(j)
-}
-
-func (T *Sink) Enqueue(j ...job.Stalled) {
-	// enqueue job
-	T.mu.Lock()
-	defer T.mu.Unlock()
-
-	for _, jj := range j {
-		T.enqueue(jj)
-	}
-}
-
-func (T *Sink) acquire(user uuid.UUID) {
-	if T.active != uuid.Nil {
-		panic("acquire called when already in use")
-	}
-	T.active = user
-	T.start = time.Now()
-}
-
-func (T *Sink) Acquire(j job.Concurrent) bool {
-	T.mu.Lock()
-	defer T.mu.Unlock()
-
-	if T.active != uuid.Nil {
-		// already active
-		return false
-	}
-
-	T.acquire(j.User)
-
-	return true
-}
-
-func (T *Sink) enqueueNext(user uuid.UUID) {
-	pending, ok := T.pending[user]
-	if !ok {
-		return
-	}
-	j, ok := pending.PopFront()
-	if !ok {
-		return
-	}
-	if ok = T.schedule(j); !ok {
-		pending.PushFront(j)
-		return
-	}
-}
-
-func (T *Sink) next() bool {
-	now := time.Now()
-	if T.active != uuid.Nil {
-		user := T.active
-		dur := now.Sub(T.start)
-		T.active = uuid.Nil
-		T.start = now
-
-		if T.stride == nil {
-			T.stride = make(map[uuid.UUID]time.Duration)
-		}
-		T.stride[user] += dur
-
-		T.enqueueNext(user)
-	}
-
-	stride, j, ok := T.scheduled.Min()
-	if !ok {
-		return false
-	}
-	T.scheduled.Delete(stride)
-	if stride > T.floor {
-		T.floor = stride
-	}
-
-	T.acquire(j.User)
-	j.Ready <- T.id
-	return true
-}
-
-func (T *Sink) Release() (hasMore bool) {
-	T.mu.Lock()
-	defer T.mu.Unlock()
-
-	return T.next()
-}
-
-func (T *Sink) StealAll() []job.Stalled {
-	var all []job.Stalled
-
-	T.mu.Lock()
-	defer T.mu.Unlock()
-
-	for {
-		if k, j, ok := T.scheduled.Min(); ok {
-			T.scheduled.Delete(k)
-			all = append(all, j)
-		} else {
-			break
-		}
-	}
-
-	for _, value := range T.pending {
-		for {
-			if j, ok := value.PopFront(); ok {
-				all = append(all, j)
-			} else {
-				break
-			}
-		}
-	}
-
-	return all
-}
-
-func (T *Sink) StealFor(rhs *Sink) uuid.UUID {
-	if T == rhs {
-		return uuid.Nil
-	}
-
-	var j job.Stalled
-	var ok bool
-	var pending *ring.Ring[job.Stalled]
-	func() {
-		T.mu.Lock()
-		defer T.mu.Unlock()
-
-		var stride time.Duration
-		stride, j, ok = T.scheduled.Min()
-		if !ok {
-			return
-		}
-		T.scheduled.Delete(stride)
-
-		user := j.User
-
-		pending = T.pending[user]
-		delete(T.pending, user)
-	}()
-	if !ok {
-		return uuid.Nil
-	}
-
-	user := j.User
-
-	rhs.mu.Lock()
-	defer rhs.mu.Unlock()
-	rhs.enqueue(j)
-
-	if pending != nil {
-		for j, ok = pending.PopFront(); ok; j, ok = pending.PopFront() {
-			rhs.enqueue(j)
-		}
-	}
-
-	return user
-}
-
-func (T *Sink) RemoveUser(user uuid.UUID) {
-	T.mu.Lock()
-	defer T.mu.Unlock()
-
-	delete(T.pending, user)
-	delete(T.stride, user)
-}
diff --git a/lib/rob/schedulers/v3/job.go b/lib/rob/schedulers/v3/job.go
new file mode 100644
index 0000000000000000000000000000000000000000..b244c4b7c237e3a53e031059e5bc406959573ca5
--- /dev/null
+++ b/lib/rob/schedulers/v3/job.go
@@ -0,0 +1,8 @@
+package schedulers
+
+import "github.com/google/uuid"
+
+type Job struct {
+	User  *User
+	Ready chan<- uuid.UUID
+}
diff --git a/lib/rob/schedulers/v3/scheduler.go b/lib/rob/schedulers/v3/scheduler.go
new file mode 100644
index 0000000000000000000000000000000000000000..02f0aa4e0564cca761db826466d4aa2c92c0fa45
--- /dev/null
+++ b/lib/rob/schedulers/v3/scheduler.go
@@ -0,0 +1,247 @@
+package schedulers
+
+import (
+	"sync"
+	"time"
+
+	"github.com/google/uuid"
+
+	"gfx.cafe/gfx/pggat/lib/rob"
+	"gfx.cafe/gfx/pggat/lib/util/pools"
+	"gfx.cafe/gfx/pggat/lib/util/rbtree"
+	"gfx.cafe/gfx/pggat/lib/util/slices"
+)
+
+type Scheduler struct {
+	cc pools.Locked[chan uuid.UUID]
+
+	closed bool
+
+	queue   []*Worker
+	workers map[uuid.UUID]*Worker
+
+	floor    time.Duration
+	users    map[uuid.UUID]*User
+	schedule rbtree.RBTree[time.Duration, Job]
+
+	mu sync.Mutex
+}
+
+func (T *Scheduler) AddWorker(id uuid.UUID) {
+	T.mu.Lock()
+	defer T.mu.Unlock()
+
+	if T.closed {
+		return
+	}
+
+	if T.workers == nil {
+		T.workers = make(map[uuid.UUID]*Worker)
+	}
+	worker := &Worker{
+		ID: id,
+	}
+	T.workers[id] = worker
+
+	T.release(worker)
+}
+
+func (T *Scheduler) DeleteWorker(worker uuid.UUID) {
+	T.mu.Lock()
+	defer T.mu.Unlock()
+
+	if T.closed {
+		return
+	}
+
+	w, ok := T.workers[worker]
+	if !ok {
+		return
+	}
+	delete(T.workers, worker)
+	T.queue = slices.Delete(T.queue, w)
+}
+
+func (T *Scheduler) AddUser(user uuid.UUID) {
+	T.mu.Lock()
+	defer T.mu.Unlock()
+
+	if T.closed {
+		return
+	}
+
+	if T.users == nil {
+		T.users = make(map[uuid.UUID]*User)
+	}
+
+	stride := T.floor
+	if s, _, ok := T.schedule.Max(); ok {
+		stride = s + 1
+	}
+
+	T.users[user] = &User{
+		ID:     user,
+		Stride: stride,
+	}
+}
+
+func (T *Scheduler) DeleteUser(user uuid.UUID) {
+	T.mu.Lock()
+	defer T.mu.Unlock()
+
+	if T.closed {
+		return
+	}
+
+	u, ok := T.users[user]
+	if !ok {
+		return
+	}
+	delete(T.users, user)
+
+	if u.Scheduled {
+		var j Job
+		j, ok = T.schedule.Get(u.Stride)
+		if ok {
+			close(j.Ready)
+			T.schedule.Delete(u.Stride)
+		}
+	}
+}
+
+func (T *Scheduler) Acquire(user uuid.UUID, sync rob.SyncMode) uuid.UUID {
+	v, c := func() (uuid.UUID, chan uuid.UUID) {
+		T.mu.Lock()
+		defer T.mu.Unlock()
+
+		if T.closed {
+			return uuid.Nil, nil
+		}
+
+		u, ok := T.users[user]
+		if !ok {
+			return uuid.Nil, nil
+		}
+
+		if len(T.queue) > 0 {
+			worker := T.queue[len(T.queue)-1]
+			T.queue = T.queue[:len(T.queue)-1]
+
+			u.Worker = worker
+			worker.User = u
+			worker.Since = time.Now()
+
+			return worker.ID, nil
+		}
+
+		if sync == rob.SyncModeNonBlocking {
+			return uuid.Nil, nil
+		}
+
+		ready, _ := T.cc.Get()
+		if ready == nil {
+			ready = make(chan uuid.UUID, 1)
+		}
+		job := Job{
+			User:  u,
+			Ready: ready,
+		}
+
+		// find empty slot
+		if u.Stride < T.floor {
+			u.Stride = T.floor
+		}
+		for _, ok = T.schedule.Get(u.Stride); ok; _, ok = T.schedule.Get(u.Stride) {
+			u.Stride++
+		}
+		T.schedule.Set(u.Stride, job)
+		u.Scheduled = true
+
+		return uuid.Nil, ready
+	}()
+
+	if v != uuid.Nil {
+		return v
+	}
+
+	if c != nil {
+		var ok bool
+		v, ok = <-c
+		if ok {
+			T.cc.Put(c)
+		}
+	}
+
+	return v
+}
+
+func (T *Scheduler) release(worker *Worker) {
+	now := time.Now()
+
+	// update prev user and state
+	if worker.User != nil {
+		worker.User.Stride += now.Sub(worker.Since)
+		worker.User.Worker = nil
+
+		worker.Since = now
+		worker.User = nil
+	}
+
+	// try to give it to the next pending
+	stride, job, ok := T.schedule.Min()
+	if !ok {
+		// no work available, append to queue
+		T.queue = append(T.queue, worker)
+		return
+	}
+
+	T.floor = stride
+	T.schedule.Delete(stride)
+
+	job.User.Worker = worker
+	job.User.Scheduled = false
+
+	worker.Since = now
+	worker.User = job.User
+
+	job.Ready <- worker.ID
+
+	return
+}
+
+func (T *Scheduler) Release(worker uuid.UUID) {
+	T.mu.Lock()
+	defer T.mu.Unlock()
+
+	if T.closed {
+		return
+	}
+
+	w, ok := T.workers[worker]
+	if !ok {
+		return
+	}
+
+	T.release(w)
+}
+
+func (T *Scheduler) Close() {
+	T.mu.Lock()
+	defer T.mu.Unlock()
+
+	if T.closed {
+		return
+	}
+
+	T.closed = true
+
+	T.users = nil
+	T.workers = nil
+	for stride, job, ok := T.schedule.Min(); ok; stride, job, ok = T.schedule.Min() {
+		T.schedule.Delete(stride)
+		close(job.Ready)
+	}
+	T.queue = nil
+}
+
+var _ rob.Scheduler = (*Scheduler)(nil)
diff --git a/lib/rob/schedulers/v2/scheduler_test.go b/lib/rob/schedulers/v3/scheduler_test.go
similarity index 87%
rename from lib/rob/schedulers/v2/scheduler_test.go
rename to lib/rob/schedulers/v3/scheduler_test.go
index fdd5784f03cce30a78f6c722c066b8c02fc52367..e56599de5d5c038f9a7aae0353106431a9f856d3 100644
--- a/lib/rob/schedulers/v2/scheduler_test.go
+++ b/lib/rob/schedulers/v3/scheduler_test.go
@@ -52,23 +52,6 @@ func testSource(sched *Scheduler, tab *ShareTable, id int, dur time.Duration) {
 	}
 }
 
-func testMultiSource(sched *Scheduler, tab *ShareTable, id int, dur time.Duration, num int) {
-	source := uuid.New()
-	sched.AddUser(source)
-	for i := 0; i < num; i++ {
-		go func() {
-			for {
-				sink := sched.Acquire(source, rob.SyncModeTryNonBlocking)
-				start := time.Now()
-				for time.Since(start) < dur {
-				}
-				tab.Inc(id)
-				sched.Release(sink)
-			}
-		}()
-	}
-}
-
 func testStarver(sched *Scheduler, tab *ShareTable, id int, dur time.Duration) {
 	for {
 		func() {
@@ -403,33 +386,3 @@ func TestScheduler_RemoveSinkOuter(t *testing.T) {
 		t.Errorf("%s", allStacks())
 	}
 }
-
-func TestScheduler_MultiJob(t *testing.T) {
-	var table ShareTable
-	sched := new(Scheduler)
-	testSink(sched)
-	testSink(sched)
-
-	go testMultiSource(sched, &table, 0, 10*time.Millisecond, 2)
-	go testMultiSource(sched, &table, 1, 10*time.Millisecond, 3)
-	go testMultiSource(sched, &table, 2, 10*time.Millisecond, 4)
-
-	time.Sleep(20 * time.Second)
-	t0 := table.Get(0)
-	t1 := table.Get(1)
-	t2 := table.Get(2)
-
-	/*
-		Expectations:
-		- all users should get similar # of executions
-	*/
-
-	t.Log("share of 0:", t0)
-	t.Log("share of 1:", t1)
-	t.Log("share of 2:", t2)
-
-	if t0 == 0 || t1 == 0 || t2 == 0 {
-		t.Error("expected executions on all sources (is there a race in the balancer??)")
-		t.Errorf("%s", allStacks())
-	}
-}
diff --git a/lib/rob/schedulers/v3/user.go b/lib/rob/schedulers/v3/user.go
new file mode 100644
index 0000000000000000000000000000000000000000..41244124be563d8a0029f1235117e2dec035d88d
--- /dev/null
+++ b/lib/rob/schedulers/v3/user.go
@@ -0,0 +1,16 @@
+package schedulers
+
+import (
+	"time"
+
+	"github.com/google/uuid"
+)
+
+type User struct {
+	ID     uuid.UUID
+	Stride time.Duration
+
+	Scheduled bool
+
+	Worker *Worker
+}
diff --git a/lib/rob/schedulers/v3/worker.go b/lib/rob/schedulers/v3/worker.go
new file mode 100644
index 0000000000000000000000000000000000000000..ad3f1cc2760115e733571580cf586416b1120c32
--- /dev/null
+++ b/lib/rob/schedulers/v3/worker.go
@@ -0,0 +1,14 @@
+package schedulers
+
+import (
+	"time"
+
+	"github.com/google/uuid"
+)
+
+type Worker struct {
+	ID uuid.UUID
+
+	User  *User
+	Since time.Time
+}
diff --git a/presets/digitalocean_databases.Caddyfile b/presets/digitalocean_databases.Caddyfile
index 0ac631a704f4b5ccf62d273698032c0d887713a7..a8948adb4bad08df7cf8164eb4fd6e9e4267c32d 100644
--- a/presets/digitalocean_databases.Caddyfile
+++ b/presets/digitalocean_databases.Caddyfile
@@ -3,8 +3,8 @@
 
 	@ro user *_ro
 
-    parameter @ro hybrid.mode=ro
-    user @ro strip_suffix _ro
+	parameter @ro hybrid.mode=ro
+	user @ro strip_suffix _ro
 
 	discovery {
 	    discoverer digitalocean {$PGGAT_DO_API_KEY}