simpler migrations (just reindex everything) and migrations on mmm.

This commit is contained in:
fiatjaf
2025-06-10 14:00:38 -03:00
parent e9a08d669e
commit cc6ec3af73
7 changed files with 152 additions and 25 deletions

View File

@@ -37,7 +37,7 @@ func FuzzQuery(f *testing.F) {
db := &BadgerBackend{}
db.DB = bdb
if err := db.runMigrations(); err != nil {
if err := db.migrate(); err != nil {
t.Fatalf("error: %s", err)
return
}

View File

@@ -51,7 +51,7 @@ func (b *BadgerBackend) Init() error {
}
b.DB = db
if err := b.runMigrations(); err != nil {
if err := b.migrate(); err != nil {
return fmt.Errorf("error running migrations: %w", err)
}

View File

@@ -10,14 +10,14 @@ import (
"github.com/dgraph-io/badger/v4"
)
func (b *BadgerBackend) runMigrations() error {
func (b *BadgerBackend) migrate() error {
return b.Update(func(txn *badger.Txn) error {
item, err := txn.Get([]byte("version"))
if err != nil && err != badger.ErrKeyNotFound {
return fmt.Errorf("failed to get db version: %w", err)
}
var version uint16 = 0
var version uint16 = 1
if err == nil {
err = item.Value(func(val []byte) error {
version = binary.BigEndian.Uint16(val)
@@ -28,12 +28,11 @@ func (b *BadgerBackend) runMigrations() error {
}
}
// do the migrations in increasing steps (there is no rollback)
//
const target = 2
// the 5 first migrations go to trash because on version 5 we need to export and import all the data anyway
if version < 5 {
log.Println("[badger] migration 5: delete all indexes and recreate them")
// do the migrations in increasing steps (there is no rollback)
if version < target {
log.Printf("[badger] migration %d: delete all indexes and recreate them\n", target)
// delete all index entries
prefixes := []byte{
@@ -83,12 +82,12 @@ func (b *BadgerBackend) runMigrations() error {
err := item.Value(func(val []byte) error {
evt := nostr.Event{}
if err := betterbinary.Unmarshal(val, &evt); err != nil {
return fmt.Errorf("error decoding event %x on migration 5: %w", idx, err)
return fmt.Errorf("error decoding event %x on migration %d: %w", idx, target, err)
}
for key := range b.getIndexKeysForEvent(evt, idx[1:]) {
if err := txn.Set(key, nil); err != nil {
return fmt.Errorf("failed to save index for event %s on migration 5: %w", evt.ID, err)
return fmt.Errorf("failed to save index for event %s on migration %d: %w", evt.ID, target, err)
}
}
@@ -100,7 +99,7 @@ func (b *BadgerBackend) runMigrations() error {
}
// bump version
if err := b.bumpVersion(txn, 5); err != nil {
if err := b.bumpVersion(txn, target); err != nil {
return err
}
}

View File

@@ -194,5 +194,5 @@ func (b *LMDBBackend) initialize() error {
return err
}
return b.runMigrations()
return b.migrate()
}

View File

@@ -14,24 +14,23 @@ const (
DB_VERSION byte = 'v'
)
func (b *LMDBBackend) runMigrations() error {
func (b *LMDBBackend) migrate() error {
return b.lmdbEnv.Update(func(txn *lmdb.Txn) error {
val, err := txn.Get(b.settingsStore, []byte("version"))
if err != nil && !lmdb.IsNotFound(err) {
return fmt.Errorf("failed to get db version: %w", err)
}
var version uint16 = 0
var version uint16 = 1
if err == nil {
version = binary.BigEndian.Uint16(val)
}
// do the migrations in increasing steps (there is no rollback)
//
const target = 2
// this is when we reindex everything
if version < 9 {
log.Println("[lmdb] migration 9: reindex everything")
// do the migrations in increasing steps (there is no rollback)
if version < target {
log.Printf("[lmdb] migration %d: reindex everything\n", target)
if err := txn.Drop(b.indexId, false); err != nil {
return err
@@ -60,7 +59,7 @@ func (b *LMDBBackend) runMigrations() error {
cursor, err := txn.OpenCursor(b.rawEventStore)
if err != nil {
return fmt.Errorf("failed to open cursor in migration 9: %w", err)
return fmt.Errorf("failed to open cursor in migration %d: %w", target, err)
}
defer cursor.Close()
@@ -73,7 +72,7 @@ func (b *LMDBBackend) runMigrations() error {
break
}
if err != nil {
return fmt.Errorf("failed to get next in migration 9: %w", err)
return fmt.Errorf("failed to get next in migration %d: %w", target, err)
}
if err := betterbinary.Unmarshal(val, &evt); err != nil {
@@ -83,14 +82,14 @@ func (b *LMDBBackend) runMigrations() error {
for key := range b.getIndexKeysForEvent(evt) {
if err := txn.Put(key.dbi, key.key, idx, 0); err != nil {
return fmt.Errorf("failed to save index %s for event %s (%v) on migration 9: %w",
b.keyName(key), evt.ID, idx, err)
return fmt.Errorf("failed to save index %s for event %s (%v) on migration %d: %w",
b.keyName(key), evt.ID, idx, target, err)
}
}
}
// bump version
if err := b.setVersion(txn, 9); err != nil {
if err := b.setVersion(txn, target); err != nil {
return err
}
}

View File

@@ -21,6 +21,7 @@ type IndexingLayer struct {
lmdbEnv *lmdb.Env
settings lmdb.DBI
indexCreatedAt lmdb.DBI
indexKind lmdb.DBI
indexPubkey lmdb.DBI
@@ -75,6 +76,11 @@ func (il *IndexingLayer) Init() error {
// open each db
if err := il.lmdbEnv.Update(func(txn *lmdb.Txn) error {
if dbi, err := txn.OpenDBI("settings", lmdb.Create); err != nil {
return err
} else {
il.settings = dbi
}
if dbi, err := txn.OpenDBI("created_at", multiIndexCreationFlags); err != nil {
return err
} else {
@@ -120,6 +126,10 @@ func (il *IndexingLayer) Init() error {
return err
}
if err := il.migrate(); err != nil {
return err
}
return nil
}

119
eventstore/mmm/migration.go Normal file
View File

@@ -0,0 +1,119 @@
package mmm
import (
"encoding/binary"
"fmt"
"log"
"fiatjaf.com/nostr"
"github.com/PowerDNS/lmdb-go/lmdb"
)
func (il *IndexingLayer) migrate() error {
return il.lmdbEnv.Update(func(txn *lmdb.Txn) error {
val, err := txn.Get(il.settings, []byte("version"))
if err != nil && !lmdb.IsNotFound(err) {
return fmt.Errorf("failed to get db version: %w", err)
}
var version uint16 = 1
if err == nil {
version = binary.BigEndian.Uint16(val)
}
const target = 2
// do the migrations in increasing steps (there is no rollback)
if version < target {
log.Printf("[mmm/%s] migration %d: reindex everything\n", il.name, target)
if err := txn.Drop(il.indexKind, false); err != nil {
return err
}
if err := txn.Drop(il.indexPubkey, false); err != nil {
return err
}
if err := txn.Drop(il.indexPubkeyKind, false); err != nil {
return err
}
if err := txn.Drop(il.indexTag, false); err != nil {
return err
}
if err := txn.Drop(il.indexTag32, false); err != nil {
return err
}
if err := txn.Drop(il.indexTagAddr, false); err != nil {
return err
}
if err := txn.Drop(il.indexPTagKind, false); err != nil {
return err
}
// we can't just iterate this layer's events because we don't have this index
// so we must iterate all events in the mmap file and check if they belong to this layer
mmmtxn, err := il.mmmm.lmdbEnv.BeginTxn(nil, lmdb.Readonly)
if err != nil {
return err
}
defer mmmtxn.Abort()
cursor, err := mmmtxn.OpenCursor(il.mmmm.indexId)
if err != nil {
return fmt.Errorf("failed to open cursor in migration %d: %w", target, err)
}
defer cursor.Close()
var evt nostr.Event
var id, val []byte
for {
id, val, err = cursor.Get(nil, nil, lmdb.Next)
if lmdb.IsNotFound(err) {
break
}
if err != nil {
return fmt.Errorf("failed to get next in migration %d: %w", target, err)
}
// check if this event belongs to this layer
belongs := false
for i := 12; i < len(val); i += 2 {
ilid := binary.BigEndian.Uint16(val[i : i+2])
if ilid == il.id {
belongs = true
break
}
}
if !belongs {
continue
}
// load event and reindex
pos := positionFromBytes(val[0:12])
if err := il.mmmm.loadEvent(pos, &evt); err != nil {
log.Printf("failed to load event %x for reindexing on layer %s: %s", id, il.name, err)
continue
}
for key := range il.getIndexKeysForEvent(evt) {
if err := txn.Put(key.dbi, key.key, val[0:12], 0); err != nil {
return fmt.Errorf("failed to save index for event %s on migration %d: %w", evt.ID, target, err)
}
}
}
// bump version
if err := il.setVersion(txn, target); err != nil {
return err
}
}
return nil
})
}
func (il *IndexingLayer) setVersion(txn *lmdb.Txn, v uint16) error {
var newVersion [2]byte
binary.BigEndian.PutUint16(newVersion[:], v)
return txn.Put(il.settings, []byte("version"), newVersion[:], 0)
}