diff --git a/internal/sqladapter/testing/adapter.go.tpl b/internal/sqladapter/testing/adapter.go.tpl
index 855f6048d067fe28b93d21df6eb44650d062d5f3..5e3953b122a793dbd1270035ace48bbe10b5ded4 100644
--- a/internal/sqladapter/testing/adapter.go.tpl
+++ b/internal/sqladapter/testing/adapter.go.tpl
@@ -1099,7 +1099,7 @@ func TestBatchInsert(t *testing.T) {
 
 		err = batch.Wait()
 		assert.NoError(t, err)
-		assert.NoError(t, batch.Error())
+		assert.NoError(t, batch.Err())
 
 		c, err := sess.Collection("artist").Find().Count()
 		assert.NoError(t, err)
@@ -1144,7 +1144,7 @@ func TestBatchInsertReturningKeys(t *testing.T) {
 		assert.True(t, len(keyMap) <= batchSize)
 
 		// Find the elements we've just inserted
-		keys := make([]int, len(keyMap))
+		keys := make([]int, 0, len(keyMap))
 		for i := range keyMap {
 			keys = append(keys, keyMap[i].ID)
 		}
@@ -1154,7 +1154,7 @@ func TestBatchInsertReturningKeys(t *testing.T) {
 		assert.NoError(t, err)
 		assert.Equal(t, uint64(len(keyMap)), c)
 	}
-	assert.NoError(t, batch.Error())
+	assert.NoError(t, batch.Err())
 
 	// Count all new elements
 	c, err := sess.Collection("artist").Find().Count()
diff --git a/lib/sqlbuilder/batch.go b/lib/sqlbuilder/batch.go
index 79a3f8a4bb9009ea94adb1b430b66f04f310aceb..0177b8e686384797f07b1e82125b323a0d2d04df 100644
--- a/lib/sqlbuilder/batch.go
+++ b/lib/sqlbuilder/batch.go
@@ -1,5 +1,7 @@
 package sqlbuilder
 
+// BatchInserter provides a helper that can be used to do massive insertions in
+// batches.
 type BatchInserter struct {
 	inserter *inserter
 	size     int
@@ -25,7 +27,7 @@ func (b *BatchInserter) Values(values ...interface{}) *BatchInserter {
 	return b
 }
 
-func (b *BatchInserter) NextResult(dst interface{}) bool {
+func (b *BatchInserter) nextQuery() *inserter {
 	clone := b.inserter.clone()
 	i := 0
 	for values := range b.values {
@@ -36,23 +38,44 @@ func (b *BatchInserter) NextResult(dst interface{}) bool {
 		}
 	}
 	if i == 0 {
+		return nil
+	}
+	return clone
+}
+
+// NextResult is useful when using PostgreSQL and Returning(), it dumps the
+// next slice of results to dst, which can mean having the IDs of all inserted
+// elements in the batch.
+func (b *BatchInserter) NextResult(dst interface{}) bool {
+	clone := b.nextQuery()
+	if clone == nil {
 		return false
 	}
 	b.err = clone.Iterator().All(dst)
 	return (b.err == nil)
 }
 
+// Done means that no more elements are going to be added.
 func (b *BatchInserter) Done() {
 	close(b.values)
 }
 
+// Wait blocks until the whole batch is executed.
 func (b *BatchInserter) Wait() error {
-	var nop []struct{}
-	for b.NextResult(&nop) {
+	for {
+		q := b.nextQuery()
+		if q == nil {
+			break
+		}
+		if _, err := q.Exec(); err != nil {
+			b.err = err
+			break
+		}
 	}
-	return b.err
+	return b.Err()
 }
 
-func (b *BatchInserter) Error() error {
+// Err returns any error while executing the batch.
+func (b *BatchInserter) Err() error {
 	return b.err
 }