go mod tidy works now at least.
This commit is contained in:
@@ -1,12 +1,14 @@
|
||||
package bluge
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"github.com/blugelabs/bluge"
|
||||
"github.com/blugelabs/bluge/analysis/token"
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
@@ -50,3 +52,7 @@ func (b *BlugeBackend) Init() error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BlugeBackend) CountEvents(nostr.Filter) (int64, error) {
|
||||
return 0, errors.New("not supported")
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package bluge
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
@@ -9,7 +8,7 @@ import (
|
||||
"fiatjaf.com/nostr/eventstore/internal"
|
||||
)
|
||||
|
||||
func (b *BlugeBackend) ReplaceEvent(ctx context.Context, evt nostr.Event) error {
|
||||
func (b *BlugeBackend) ReplaceEvent(evt nostr.Event) error {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
|
||||
@@ -30,7 +29,7 @@ func (b *BlugeBackend) ReplaceEvent(ctx context.Context, evt nostr.Event) error
|
||||
}
|
||||
|
||||
if shouldStore {
|
||||
if err := b.SaveEvent(ctx, evt); err != nil && err != eventstore.ErrDupEvent {
|
||||
if err := b.SaveEvent(evt); err != nil && err != eventstore.ErrDupEvent {
|
||||
return fmt.Errorf("failed to save: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,16 +28,6 @@ func detect(dir string) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
if !f.IsDir() {
|
||||
f, err := os.Open(dir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
buf := make([]byte, 15)
|
||||
f.Read(buf)
|
||||
if string(buf) == "SQLite format 3" {
|
||||
return "sqlite", nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("unknown db format")
|
||||
}
|
||||
|
||||
|
||||
@@ -9,16 +9,11 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"fiatjaf.com/nostr/eventstore/badger"
|
||||
"fiatjaf.com/nostr/eventstore/elasticsearch"
|
||||
"fiatjaf.com/nostr/eventstore/lmdb"
|
||||
"fiatjaf.com/nostr/eventstore/mysql"
|
||||
"fiatjaf.com/nostr/eventstore/postgresql"
|
||||
"fiatjaf.com/nostr/eventstore/slicestore"
|
||||
"fiatjaf.com/nostr/eventstore/sqlite3"
|
||||
"fiatjaf.com/nostr/eventstore/strfry"
|
||||
"fiatjaf.com/nostr"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
@@ -38,7 +33,7 @@ var app = &cli.Command{
|
||||
&cli.StringFlag{
|
||||
Name: "type",
|
||||
Aliases: []string{"t"},
|
||||
Usage: "store type ('sqlite', 'lmdb', 'badger', 'postgres', 'mysql', 'elasticsearch', 'mmm')",
|
||||
Usage: "store type ('lmdb', 'badger', 'mmm')",
|
||||
},
|
||||
},
|
||||
Before: func(ctx context.Context, c *cli.Command) (context.Context, error) {
|
||||
@@ -57,8 +52,6 @@ var app = &cli.Command{
|
||||
case strings.HasPrefix(path, "https://"):
|
||||
// if we ever add something else that uses URLs we'll have to modify this
|
||||
typ = "elasticsearch"
|
||||
case strings.HasSuffix(path, ".conf"):
|
||||
typ = "strfry"
|
||||
case strings.HasSuffix(path, ".jsonl"):
|
||||
typ = "file"
|
||||
default:
|
||||
@@ -76,15 +69,6 @@ var app = &cli.Command{
|
||||
}
|
||||
|
||||
switch typ {
|
||||
case "sqlite":
|
||||
db = &sqlite3.SQLite3Backend{
|
||||
DatabaseURL: path,
|
||||
QueryLimit: 1_000_000,
|
||||
QueryAuthorsLimit: 1_000_000,
|
||||
QueryKindsLimit: 1_000_000,
|
||||
QueryIDsLimit: 1_000_000,
|
||||
QueryTagsLimit: 1_000_000,
|
||||
}
|
||||
case "lmdb":
|
||||
db = &lmdb.LMDBBackend{Path: path, MaxLimit: 1_000_000}
|
||||
case "badger":
|
||||
@@ -94,28 +78,6 @@ var app = &cli.Command{
|
||||
if db, err = doMmmInit(path); err != nil {
|
||||
return ctx, err
|
||||
}
|
||||
case "postgres", "postgresql":
|
||||
db = &postgresql.PostgresBackend{
|
||||
DatabaseURL: path,
|
||||
QueryLimit: 1_000_000,
|
||||
QueryAuthorsLimit: 1_000_000,
|
||||
QueryKindsLimit: 1_000_000,
|
||||
QueryIDsLimit: 1_000_000,
|
||||
QueryTagsLimit: 1_000_000,
|
||||
}
|
||||
case "mysql":
|
||||
db = &mysql.MySQLBackend{
|
||||
DatabaseURL: path,
|
||||
QueryLimit: 1_000_000,
|
||||
QueryAuthorsLimit: 1_000_000,
|
||||
QueryKindsLimit: 1_000_000,
|
||||
QueryIDsLimit: 1_000_000,
|
||||
QueryTagsLimit: 1_000_000,
|
||||
}
|
||||
case "elasticsearch":
|
||||
db = &elasticsearch.ElasticsearchStorage{URL: path}
|
||||
case "strfry":
|
||||
db = &strfry.StrfryBackend{ConfigPath: path}
|
||||
case "file":
|
||||
db = &slicestore.SliceStore{}
|
||||
|
||||
|
||||
@@ -5,13 +5,13 @@ import (
|
||||
"fiatjaf.com/nostr/eventstore/badger"
|
||||
"fiatjaf.com/nostr/eventstore/bluge"
|
||||
"fiatjaf.com/nostr/eventstore/lmdb"
|
||||
"fiatjaf.com/nostr/eventstore/strfry"
|
||||
"fiatjaf.com/nostr/eventstore/mmm"
|
||||
)
|
||||
|
||||
// compile-time checks to ensure all backends implement Store
|
||||
var (
|
||||
_ eventstore.Store = (*badger.BadgerBackend)(nil)
|
||||
_ eventstore.Store = (*lmdb.LMDBBackend)(nil)
|
||||
_ eventstore.Store = (*strfry.StrfryBackend)(nil)
|
||||
_ eventstore.Store = (*mmm.IndexingLayer)(nil)
|
||||
_ eventstore.Store = (*bluge.BlugeBackend)(nil)
|
||||
)
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
bin "fiatjaf.com/nostr/eventstore/internal/binary"
|
||||
"fiatjaf.com/nostr/eventstore/codec/betterbinary"
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
)
|
||||
|
||||
@@ -45,14 +45,14 @@ func (b *LMDBBackend) SaveEvent(evt nostr.Event) error {
|
||||
|
||||
func (b *LMDBBackend) save(txn *lmdb.Txn, evt nostr.Event) error {
|
||||
// encode to binary form so we'll save it
|
||||
bin, err := bin.Marshal(evt)
|
||||
if err != nil {
|
||||
buf := make([]byte, betterbinary.Measure(evt))
|
||||
if err := betterbinary.Marshal(evt, buf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
idx := b.Serial()
|
||||
// raw event store
|
||||
if err := txn.Put(b.rawEventStore, idx, bin, 0); err != nil {
|
||||
if err := txn.Put(b.rawEventStore, idx, buf, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -2,16 +2,15 @@ package mmm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"slices"
|
||||
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
"fiatjaf.com/nostr/eventstore/mmm/betterbinary"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore/codec/betterbinary"
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
)
|
||||
|
||||
func (il *IndexingLayer) CountEvents(ctx context.Context, filter nostr.Filter) (int64, error) {
|
||||
func (il *IndexingLayer) CountEvents(filter nostr.Filter) (int64, error) {
|
||||
var count int64 = 0
|
||||
|
||||
queries, extraAuthors, extraKinds, extraTagKey, extraTagValues, since, err := il.prepareQueries(filter)
|
||||
|
||||
@@ -1,39 +1,39 @@
|
||||
package mmm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
"fiatjaf.com/nostr"
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
)
|
||||
|
||||
func (il *IndexingLayer) DeleteEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
func (il *IndexingLayer) DeleteEvent(id nostr.ID) error {
|
||||
il.mmmm.writeMutex.Lock()
|
||||
defer il.mmmm.writeMutex.Unlock()
|
||||
|
||||
return il.mmmm.lmdbEnv.Update(func(mmmtxn *lmdb.Txn) error {
|
||||
return il.lmdbEnv.Update(func(iltxn *lmdb.Txn) error {
|
||||
return il.delete(mmmtxn, iltxn, evt)
|
||||
return il.delete(mmmtxn, iltxn, id)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func (il *IndexingLayer) delete(mmmtxn *lmdb.Txn, iltxn *lmdb.Txn, evt *nostr.Event) error {
|
||||
func (il *IndexingLayer) delete(mmmtxn *lmdb.Txn, iltxn *lmdb.Txn, id nostr.ID) error {
|
||||
zeroRefs := false
|
||||
b := il.mmmm
|
||||
|
||||
b.Logger.Debug().Str("layer", il.name).Uint16("il", il.id).Msg("deleting")
|
||||
|
||||
// first in the mmmm txn we check if we have the event still
|
||||
idPrefix8, _ := hex.DecodeString(evt.ID[0 : 8*2])
|
||||
val, err := mmmtxn.Get(b.indexId, idPrefix8)
|
||||
val, err := mmmtxn.Get(b.indexId, id[0:8])
|
||||
if err != nil {
|
||||
if lmdb.IsNotFound(err) {
|
||||
// we already do not have this anywhere
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("failed to check if we have the event %x: %w", idPrefix8, err)
|
||||
return fmt.Errorf("failed to check if we have the event %x: %w", id, err)
|
||||
}
|
||||
|
||||
// we have this, but do we have it in the current layer?
|
||||
@@ -49,8 +49,8 @@ func (il *IndexingLayer) delete(mmmtxn *lmdb.Txn, iltxn *lmdb.Txn, evt *nostr.Ev
|
||||
copy(nextval, val[0:i])
|
||||
copy(nextval[i:], val[i+2:])
|
||||
|
||||
if err := mmmtxn.Put(b.indexId, idPrefix8, nextval, 0); err != nil {
|
||||
return fmt.Errorf("failed to update references for %x: %w", idPrefix8, err)
|
||||
if err := mmmtxn.Put(b.indexId, id[0:8], nextval, 0); err != nil {
|
||||
return fmt.Errorf("failed to update references for %x: %w", id[:], err)
|
||||
}
|
||||
|
||||
// if there are no more layers we will delete everything later
|
||||
@@ -60,6 +60,12 @@ func (il *IndexingLayer) delete(mmmtxn *lmdb.Txn, iltxn *lmdb.Txn, evt *nostr.Ev
|
||||
}
|
||||
}
|
||||
|
||||
// load the event so we can compute the indexes
|
||||
var evt nostr.Event
|
||||
if err := il.mmmm.loadEvent(pos, &evt); err != nil {
|
||||
return fmt.Errorf("failed to load event %x when deleting: %w", id[:], err)
|
||||
}
|
||||
|
||||
// calculate all index keys we have for this event and delete them
|
||||
for k := range il.getIndexKeysForEvent(evt) {
|
||||
if err := iltxn.Del(k.dbi, k.key, val[0:12]); err != nil && !lmdb.IsNotFound(err) {
|
||||
@@ -69,7 +75,7 @@ func (il *IndexingLayer) delete(mmmtxn *lmdb.Txn, iltxn *lmdb.Txn, evt *nostr.Ev
|
||||
|
||||
// if there are no more refs we delete the event from the id index and mmap
|
||||
if zeroRefs {
|
||||
if err := b.purge(mmmtxn, idPrefix8, pos); err != nil {
|
||||
if err := b.purge(mmmtxn, id[0:8], pos); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
"fiatjaf.com/nostr"
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
)
|
||||
|
||||
// this iterator always goes backwards
|
||||
@@ -46,7 +46,7 @@ type key struct {
|
||||
key []byte
|
||||
}
|
||||
|
||||
func (il *IndexingLayer) getIndexKeysForEvent(evt *nostr.Event) iter.Seq[key] {
|
||||
func (il *IndexingLayer) getIndexKeysForEvent(evt nostr.Event) iter.Seq[key] {
|
||||
return func(yield func(key) bool) {
|
||||
{
|
||||
// ~ by pubkey+date
|
||||
|
||||
@@ -1,15 +1,11 @@
|
||||
package mmm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"fiatjaf.com/nostr"
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
)
|
||||
|
||||
var _ eventstore.Store = (*IndexingLayer)(nil)
|
||||
@@ -18,10 +14,8 @@ type IndexingLayer struct {
|
||||
isInitialized bool
|
||||
name string
|
||||
|
||||
ShouldIndex func(context.Context, *nostr.Event) bool
|
||||
MaxLimit int
|
||||
|
||||
mmmm *MultiMmapManager
|
||||
MaxLimit int
|
||||
mmmm *MultiMmapManager
|
||||
|
||||
// this is stored in the knownLayers db as a value, and used to keep track of which layer owns each event
|
||||
id uint16
|
||||
@@ -136,65 +130,6 @@ func (il *IndexingLayer) Init() error {
|
||||
|
||||
func (il *IndexingLayer) Name() string { return il.name }
|
||||
|
||||
func (il *IndexingLayer) runThroughEvents(txn *lmdb.Txn) error {
|
||||
ctx := context.Background()
|
||||
b := il.mmmm
|
||||
|
||||
// run through all events we have and see if this new index wants them
|
||||
cursor, err := txn.OpenCursor(b.indexId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("when opening cursor on %v: %w", b.indexId, err)
|
||||
}
|
||||
defer cursor.Close()
|
||||
|
||||
for {
|
||||
idPrefix8, val, err := cursor.Get(nil, nil, lmdb.Next)
|
||||
if lmdb.IsNotFound(err) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("when moving the cursor: %w", err)
|
||||
}
|
||||
|
||||
update := false
|
||||
|
||||
posb := val[0:12]
|
||||
pos := positionFromBytes(posb)
|
||||
evt := &nostr.Event{}
|
||||
if err := b.loadEvent(pos, evt); err != nil {
|
||||
return fmt.Errorf("when loading event from mmap: %w", err)
|
||||
}
|
||||
|
||||
if il.ShouldIndex != nil && il.ShouldIndex(ctx, evt) {
|
||||
// add the current reference
|
||||
val = binary.BigEndian.AppendUint16(val, il.id)
|
||||
|
||||
// if we were already updating to remove the reference
|
||||
// now that we've added the reference back we don't really have to update
|
||||
update = !update
|
||||
|
||||
// actually index
|
||||
if err := il.lmdbEnv.Update(func(iltxn *lmdb.Txn) error {
|
||||
for k := range il.getIndexKeysForEvent(evt) {
|
||||
if err := iltxn.Put(k.dbi, k.key, posb, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("failed to index: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if update {
|
||||
if err := txn.Put(b.indexId, idPrefix8, val, 0); err != nil {
|
||||
return fmt.Errorf("failed to put updated index+refs: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (il *IndexingLayer) Close() {
|
||||
il.lmdbEnv.Close()
|
||||
}
|
||||
|
||||
@@ -10,9 +10,9 @@ import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
"fiatjaf.com/nostr/eventstore/mmm/betterbinary"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore/codec/betterbinary"
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
@@ -30,14 +30,14 @@ type MultiMmapManager struct {
|
||||
mmapf mmap
|
||||
mmapfEnd uint64
|
||||
|
||||
writeMutex sync.Mutex
|
||||
|
||||
lmdbEnv *lmdb.Env
|
||||
stuff lmdb.DBI
|
||||
knownLayers lmdb.DBI
|
||||
indexId lmdb.DBI
|
||||
|
||||
freeRanges []position
|
||||
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
func (b *MultiMmapManager) String() string {
|
||||
@@ -147,8 +147,8 @@ func (b *MultiMmapManager) Init() error {
|
||||
}
|
||||
|
||||
func (b *MultiMmapManager) EnsureLayer(name string, il *IndexingLayer) error {
|
||||
b.mutex.Lock()
|
||||
defer b.mutex.Unlock()
|
||||
b.writeMutex.Lock()
|
||||
defer b.writeMutex.Unlock()
|
||||
|
||||
il.mmmm = b
|
||||
il.name = name
|
||||
@@ -168,9 +168,6 @@ func (b *MultiMmapManager) EnsureLayer(name string, il *IndexingLayer) error {
|
||||
return fmt.Errorf("failed to init new layer %s: %w", name, err)
|
||||
}
|
||||
|
||||
if err := il.runThroughEvents(txn); err != nil {
|
||||
return fmt.Errorf("failed to run %s through events: %w", name, err)
|
||||
}
|
||||
return txn.Put(b.knownLayers, []byte(name), binary.BigEndian.AppendUint16(nil, il.id), 0)
|
||||
} else if err == nil {
|
||||
il.id = binary.BigEndian.Uint16(idv)
|
||||
@@ -193,8 +190,8 @@ func (b *MultiMmapManager) EnsureLayer(name string, il *IndexingLayer) error {
|
||||
}
|
||||
|
||||
func (b *MultiMmapManager) DropLayer(name string) error {
|
||||
b.mutex.Lock()
|
||||
defer b.mutex.Unlock()
|
||||
b.writeMutex.Lock()
|
||||
defer b.writeMutex.Unlock()
|
||||
|
||||
// get layer reference
|
||||
idx := slices.IndexFunc(b.layers, func(il *IndexingLayer) bool { return il.name == name })
|
||||
|
||||
@@ -2,42 +2,46 @@ package mmm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"iter"
|
||||
"log"
|
||||
"slices"
|
||||
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
"fiatjaf.com/nostr/eventstore/internal"
|
||||
"fiatjaf.com/nostr/eventstore/mmm/betterbinary"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore/codec/betterbinary"
|
||||
"fiatjaf.com/nostr/eventstore/internal"
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
)
|
||||
|
||||
// GetByID returns the event -- if found in this mmm -- and all the IndexingLayers it belongs to.
|
||||
func (b *MultiMmapManager) GetByID(id string) (*nostr.Event, IndexingLayers) {
|
||||
events := make(chan *nostr.Event)
|
||||
func (b *MultiMmapManager) GetByID(id nostr.ID) (*nostr.Event, IndexingLayers) {
|
||||
presence := make(chan []uint16)
|
||||
b.queryByIDs(events, []string{id}, presence)
|
||||
for evt := range events {
|
||||
|
||||
var event *nostr.Event
|
||||
b.queryByIDs(func(evt nostr.Event) bool {
|
||||
event = &evt
|
||||
return false
|
||||
}, []nostr.ID{id}, presence)
|
||||
|
||||
if event != nil {
|
||||
p := <-presence
|
||||
present := make([]*IndexingLayer, len(p))
|
||||
for i, id := range p {
|
||||
present[i] = b.layers.ByID(id)
|
||||
}
|
||||
return evt, present
|
||||
return event, present
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// queryByIDs emits the events of the given id to the given channel if they exist anywhere in this mmm.
|
||||
// if presence is given it will also be used to emit slices of the ids of the IndexingLayers this event is stored in.
|
||||
// it closes the channels when it ends.
|
||||
func (b *MultiMmapManager) queryByIDs(ch chan *nostr.Event, ids []string, presence chan []uint16) {
|
||||
go b.lmdbEnv.View(func(txn *lmdb.Txn) error {
|
||||
func (b *MultiMmapManager) queryByIDs(yield func(nostr.Event) bool, ids []nostr.ID, presence chan []uint16) {
|
||||
b.lmdbEnv.View(func(txn *lmdb.Txn) error {
|
||||
txn.RawRead = true
|
||||
defer close(ch)
|
||||
if presence != nil {
|
||||
defer close(presence)
|
||||
}
|
||||
@@ -47,15 +51,17 @@ func (b *MultiMmapManager) queryByIDs(ch chan *nostr.Event, ids []string, presen
|
||||
continue
|
||||
}
|
||||
|
||||
idPrefix8, _ := hex.DecodeString(id[0 : 8*2])
|
||||
val, err := txn.Get(b.indexId, idPrefix8)
|
||||
val, err := txn.Get(b.indexId, id[0:8])
|
||||
if err == nil {
|
||||
pos := positionFromBytes(val[0:12])
|
||||
evt := &nostr.Event{}
|
||||
if err := b.loadEvent(pos, evt); err != nil {
|
||||
evt := nostr.Event{}
|
||||
if err := b.loadEvent(pos, &evt); err != nil {
|
||||
panic(fmt.Errorf("failed to decode event from %v: %w", pos, err))
|
||||
}
|
||||
ch <- evt
|
||||
|
||||
if !yield(evt) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if presence != nil {
|
||||
layers := make([]uint16, 0, (len(val)-12)/2)
|
||||
@@ -71,45 +77,42 @@ func (b *MultiMmapManager) queryByIDs(ch chan *nostr.Event, ids []string, presen
|
||||
})
|
||||
}
|
||||
|
||||
func (il *IndexingLayer) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) {
|
||||
ch := make(chan *nostr.Event)
|
||||
|
||||
if len(filter.IDs) > 0 {
|
||||
il.mmmm.queryByIDs(ch, filter.IDs, nil)
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
if filter.Search != "" {
|
||||
close(ch)
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
// max number of events we'll return
|
||||
limit := il.MaxLimit / 4
|
||||
if filter.Limit > 0 && filter.Limit < il.MaxLimit {
|
||||
limit = filter.Limit
|
||||
}
|
||||
if tlimit := nostr.GetTheoreticalLimit(filter); tlimit == 0 {
|
||||
close(ch)
|
||||
return ch, nil
|
||||
} else if tlimit > 0 {
|
||||
limit = tlimit
|
||||
}
|
||||
|
||||
go il.lmdbEnv.View(func(txn *lmdb.Txn) error {
|
||||
txn.RawRead = true
|
||||
defer close(ch)
|
||||
|
||||
results, err := il.query(txn, filter, limit)
|
||||
|
||||
for _, ie := range results {
|
||||
ch <- ie.Event
|
||||
func (il *IndexingLayer) QueryEvents(filter nostr.Filter) iter.Seq[nostr.Event] {
|
||||
return func(yield func(nostr.Event) bool) {
|
||||
if len(filter.IDs) > 0 {
|
||||
il.mmmm.queryByIDs(yield, filter.IDs, nil)
|
||||
return
|
||||
}
|
||||
|
||||
return err
|
||||
})
|
||||
if filter.Search != "" {
|
||||
return
|
||||
}
|
||||
|
||||
return ch, nil
|
||||
// max number of events we'll return
|
||||
limit := il.MaxLimit / 4
|
||||
if filter.Limit > 0 && filter.Limit < il.MaxLimit {
|
||||
limit = filter.Limit
|
||||
}
|
||||
if tlimit := nostr.GetTheoreticalLimit(filter); tlimit == 0 {
|
||||
return
|
||||
} else if tlimit > 0 {
|
||||
limit = tlimit
|
||||
}
|
||||
|
||||
il.lmdbEnv.View(func(txn *lmdb.Txn) error {
|
||||
txn.RawRead = true
|
||||
|
||||
results, err := il.query(txn, filter, limit)
|
||||
|
||||
for _, ie := range results {
|
||||
if !yield(ie.Event) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (il *IndexingLayer) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]internal.IterEvent, error) {
|
||||
@@ -128,16 +131,16 @@ func (il *IndexingLayer) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([
|
||||
// we will continue to pull from it as soon as some other iterator takes the position
|
||||
oldest := internal.IterEvent{Q: -1}
|
||||
|
||||
secondPhase := false // after we have gathered enough events we will change the way we iterate
|
||||
sndPhase := false // after we have gathered enough events we will change the way we iterate
|
||||
secondBatch := make([][]internal.IterEvent, 0, len(queries)+1)
|
||||
secondPhaseParticipants := make([]int, 0, len(queries)+1)
|
||||
sndPhaseParticipants := make([]int, 0, len(queries)+1)
|
||||
|
||||
// while merging results in the second phase we will alternate between these two lists
|
||||
// to avoid having to create new lists all the time
|
||||
var secondPhaseResultsA []internal.IterEvent
|
||||
var secondPhaseResultsB []internal.IterEvent
|
||||
var secondPhaseResultsToggle bool // this is just a dummy thing we use to keep track of the alternating
|
||||
var secondPhaseHasResultsPending bool
|
||||
var sndPhaseResultsA []internal.IterEvent
|
||||
var sndPhaseResultsB []internal.IterEvent
|
||||
var sndPhaseResultsToggle bool // this is just a dummy thing we use to keep track of the alternating
|
||||
var sndPhaseHasResultsPending bool
|
||||
|
||||
remainingUnexhausted := len(queries) // when all queries are exhausted we can finally end this thing
|
||||
batchSizePerQuery := internal.BatchSizePerNumberOfQueries(limit, remainingUnexhausted)
|
||||
@@ -221,8 +224,8 @@ func (il *IndexingLayer) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([
|
||||
}
|
||||
|
||||
// decode the entire thing (TODO: do a conditional decode while also checking the extra tag)
|
||||
event := &nostr.Event{}
|
||||
if err := betterbinary.Unmarshal(bin, event); err != nil {
|
||||
event := nostr.Event{}
|
||||
if err := betterbinary.Unmarshal(bin, &event); err != nil {
|
||||
log.Printf("mmm: value read error (id %x) on query prefix %x sp %x dbi %d: %s\n",
|
||||
bin[0:32], query.prefix, query.startingPoint, query.dbi, err)
|
||||
return nil, fmt.Errorf("event read error: %w", err)
|
||||
@@ -240,18 +243,18 @@ func (il *IndexingLayer) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([
|
||||
evt := internal.IterEvent{Event: event, Q: q}
|
||||
//
|
||||
//
|
||||
if secondPhase {
|
||||
if sndPhase {
|
||||
// do the process described below at HIWAWVRTP.
|
||||
// if we've reached here this means we've already passed the `since` check.
|
||||
// now we have to eliminate the event currently at the `since` threshold.
|
||||
nextThreshold := firstPhaseResults[len(firstPhaseResults)-2]
|
||||
if oldest.Event == nil {
|
||||
if oldest.Event.ID == nostr.ZeroID {
|
||||
// fmt.Println(" b1", evt.ID[0:8])
|
||||
// BRANCH WHEN WE DON'T HAVE THE OLDEST EVENT (BWWDHTOE)
|
||||
// when we don't have the oldest set, we will keep the results
|
||||
// and not change the cutting point -- it's bad, but hopefully not that bad.
|
||||
results[q] = append(results[q], evt)
|
||||
secondPhaseHasResultsPending = true
|
||||
sndPhaseHasResultsPending = true
|
||||
} else if nextThreshold.CreatedAt > oldest.CreatedAt {
|
||||
// fmt.Println(" b2", nextThreshold.CreatedAt, ">", oldest.CreatedAt, evt.ID[0:8])
|
||||
// one of the events we have stored is the actual next threshold
|
||||
@@ -268,7 +271,7 @@ func (il *IndexingLayer) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([
|
||||
// finally
|
||||
// add this to the results to be merged later
|
||||
results[q] = append(results[q], evt)
|
||||
secondPhaseHasResultsPending = true
|
||||
sndPhaseHasResultsPending = true
|
||||
} else if nextThreshold.CreatedAt < evt.CreatedAt {
|
||||
// the next last event in the firstPhaseResults is the next threshold
|
||||
// fmt.Println(" b3", nextThreshold.CreatedAt, "<", oldest.CreatedAt, evt.ID[0:8])
|
||||
@@ -278,7 +281,7 @@ func (il *IndexingLayer) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([
|
||||
// fmt.Println(" new since", since)
|
||||
// add this to the results to be merged later
|
||||
results[q] = append(results[q], evt)
|
||||
secondPhaseHasResultsPending = true
|
||||
sndPhaseHasResultsPending = true
|
||||
// update the oldest event
|
||||
if evt.CreatedAt < oldest.CreatedAt {
|
||||
oldest = evt
|
||||
@@ -297,7 +300,7 @@ func (il *IndexingLayer) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([
|
||||
firstPhaseTotalPulled++
|
||||
|
||||
// update the oldest event
|
||||
if oldest.Event == nil || evt.CreatedAt < oldest.CreatedAt {
|
||||
if oldest.Event.ID == nostr.ZeroID || evt.CreatedAt < oldest.CreatedAt {
|
||||
oldest = evt
|
||||
}
|
||||
}
|
||||
@@ -323,20 +326,20 @@ func (il *IndexingLayer) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([
|
||||
|
||||
// we will do this check if we don't accumulated the requested number of events yet
|
||||
// fmt.Println("oldest", oldest.Event, "from iter", oldest.Q)
|
||||
if secondPhase && secondPhaseHasResultsPending && (oldest.Event == nil || remainingUnexhausted == 0) {
|
||||
if sndPhase && sndPhaseHasResultsPending && (oldest.Event.ID == nostr.ZeroID || remainingUnexhausted == 0) {
|
||||
// fmt.Println("second phase aggregation!")
|
||||
// when we are in the second phase we will aggressively aggregate results on every iteration
|
||||
//
|
||||
secondBatch = secondBatch[:0]
|
||||
for s := 0; s < len(secondPhaseParticipants); s++ {
|
||||
q := secondPhaseParticipants[s]
|
||||
for s := 0; s < len(sndPhaseParticipants); s++ {
|
||||
q := sndPhaseParticipants[s]
|
||||
|
||||
if len(results[q]) > 0 {
|
||||
secondBatch = append(secondBatch, results[q])
|
||||
}
|
||||
|
||||
if exhausted[q] {
|
||||
secondPhaseParticipants = internal.SwapDelete(secondPhaseParticipants, s)
|
||||
sndPhaseParticipants = internal.SwapDelete(sndPhaseParticipants, s)
|
||||
s--
|
||||
}
|
||||
}
|
||||
@@ -344,29 +347,29 @@ func (il *IndexingLayer) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([
|
||||
// every time we get here we will alternate between these A and B lists
|
||||
// combining everything we have into a new partial results list.
|
||||
// after we've done that we can again set the oldest.
|
||||
// fmt.Println(" xxx", secondPhaseResultsToggle)
|
||||
if secondPhaseResultsToggle {
|
||||
secondBatch = append(secondBatch, secondPhaseResultsB)
|
||||
secondPhaseResultsA = internal.MergeSortMultiple(secondBatch, limit, secondPhaseResultsA)
|
||||
oldest = secondPhaseResultsA[len(secondPhaseResultsA)-1]
|
||||
// fmt.Println(" new aggregated a", len(secondPhaseResultsB))
|
||||
// fmt.Println(" xxx", sndPhaseResultsToggle)
|
||||
if sndPhaseResultsToggle {
|
||||
secondBatch = append(secondBatch, sndPhaseResultsB)
|
||||
sndPhaseResultsA = internal.MergeSortMultiple(secondBatch, limit, sndPhaseResultsA)
|
||||
oldest = sndPhaseResultsA[len(sndPhaseResultsA)-1]
|
||||
// fmt.Println(" new aggregated a", len(sndPhaseResultsB))
|
||||
} else {
|
||||
secondBatch = append(secondBatch, secondPhaseResultsA)
|
||||
secondPhaseResultsB = internal.MergeSortMultiple(secondBatch, limit, secondPhaseResultsB)
|
||||
oldest = secondPhaseResultsB[len(secondPhaseResultsB)-1]
|
||||
// fmt.Println(" new aggregated b", len(secondPhaseResultsB))
|
||||
secondBatch = append(secondBatch, sndPhaseResultsA)
|
||||
sndPhaseResultsB = internal.MergeSortMultiple(secondBatch, limit, sndPhaseResultsB)
|
||||
oldest = sndPhaseResultsB[len(sndPhaseResultsB)-1]
|
||||
// fmt.Println(" new aggregated b", len(sndPhaseResultsB))
|
||||
}
|
||||
secondPhaseResultsToggle = !secondPhaseResultsToggle
|
||||
sndPhaseResultsToggle = !sndPhaseResultsToggle
|
||||
|
||||
since = uint32(oldest.CreatedAt)
|
||||
// fmt.Println(" new since", since)
|
||||
|
||||
// reset the `results` list so we can keep using it
|
||||
results = results[:len(queries)]
|
||||
for _, q := range secondPhaseParticipants {
|
||||
for _, q := range sndPhaseParticipants {
|
||||
results[q] = results[q][:0]
|
||||
}
|
||||
} else if !secondPhase && firstPhaseTotalPulled >= limit && remainingUnexhausted > 0 {
|
||||
} else if !sndPhase && firstPhaseTotalPulled >= limit && remainingUnexhausted > 0 {
|
||||
// fmt.Println("have enough!", firstPhaseTotalPulled, "/", limit, "remaining", remainingUnexhausted)
|
||||
|
||||
// we will exclude this oldest number as it is not relevant anymore
|
||||
@@ -410,16 +413,16 @@ func (il *IndexingLayer) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([
|
||||
results[q] = results[q][:0]
|
||||
|
||||
// build this index of indexes with everybody who remains
|
||||
secondPhaseParticipants = append(secondPhaseParticipants, q)
|
||||
sndPhaseParticipants = append(sndPhaseParticipants, q)
|
||||
}
|
||||
|
||||
// we create these two lists and alternate between them so we don't have to create a
|
||||
// a new one every time
|
||||
secondPhaseResultsA = make([]internal.IterEvent, 0, limit*2)
|
||||
secondPhaseResultsB = make([]internal.IterEvent, 0, limit*2)
|
||||
sndPhaseResultsA = make([]internal.IterEvent, 0, limit*2)
|
||||
sndPhaseResultsB = make([]internal.IterEvent, 0, limit*2)
|
||||
|
||||
// from now on we won't run this block anymore
|
||||
secondPhase = true
|
||||
sndPhase = true
|
||||
}
|
||||
|
||||
// fmt.Println("remaining", remainingUnexhausted)
|
||||
@@ -428,27 +431,27 @@ func (il *IndexingLayer) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([
|
||||
}
|
||||
}
|
||||
|
||||
// fmt.Println("is secondPhase?", secondPhase)
|
||||
// fmt.Println("is sndPhase?", sndPhase)
|
||||
|
||||
var combinedResults []internal.IterEvent
|
||||
|
||||
if secondPhase {
|
||||
if sndPhase {
|
||||
// fmt.Println("ending second phase")
|
||||
// when we reach this point either secondPhaseResultsA or secondPhaseResultsB will be full of stuff,
|
||||
// when we reach this point either sndPhaseResultsA or sndPhaseResultsB will be full of stuff,
|
||||
// the other will be empty
|
||||
var secondPhaseResults []internal.IterEvent
|
||||
// fmt.Println("xxx", secondPhaseResultsToggle, len(secondPhaseResultsA), len(secondPhaseResultsB))
|
||||
if secondPhaseResultsToggle {
|
||||
secondPhaseResults = secondPhaseResultsB
|
||||
combinedResults = secondPhaseResultsA[0:limit] // reuse this
|
||||
// fmt.Println(" using b", len(secondPhaseResultsA))
|
||||
var sndPhaseResults []internal.IterEvent
|
||||
// fmt.Println("xxx", sndPhaseResultsToggle, len(sndPhaseResultsA), len(sndPhaseResultsB))
|
||||
if sndPhaseResultsToggle {
|
||||
sndPhaseResults = sndPhaseResultsB
|
||||
combinedResults = sndPhaseResultsA[0:limit] // reuse this
|
||||
// fmt.Println(" using b", len(sndPhaseResultsA))
|
||||
} else {
|
||||
secondPhaseResults = secondPhaseResultsA
|
||||
combinedResults = secondPhaseResultsB[0:limit] // reuse this
|
||||
// fmt.Println(" using a", len(secondPhaseResultsA))
|
||||
sndPhaseResults = sndPhaseResultsA
|
||||
combinedResults = sndPhaseResultsB[0:limit] // reuse this
|
||||
// fmt.Println(" using a", len(sndPhaseResultsA))
|
||||
}
|
||||
|
||||
all := [][]internal.IterEvent{firstPhaseResults, secondPhaseResults}
|
||||
all := [][]internal.IterEvent{firstPhaseResults, sndPhaseResults}
|
||||
combinedResults = internal.MergeSortMultiple(all, limit, combinedResults)
|
||||
// fmt.Println("final combinedResults", len(combinedResults), cap(combinedResults), limit)
|
||||
} else {
|
||||
|
||||
@@ -5,9 +5,9 @@ import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
"fiatjaf.com/nostr/eventstore/internal"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore/internal"
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
)
|
||||
|
||||
type query struct {
|
||||
@@ -127,7 +127,7 @@ func (il *IndexingLayer) prepareQueries(filter nostr.Filter) (
|
||||
if filter.Authors != nil {
|
||||
extraAuthors = make([][32]byte, len(filter.Authors))
|
||||
for i, pk := range filter.Authors {
|
||||
hex.Decode(extraAuthors[i][:], []byte(pk))
|
||||
copy(extraAuthors[i][:], pk[:])
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,22 +1,28 @@
|
||||
package mmm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"runtime"
|
||||
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
"fiatjaf.com/nostr/eventstore/internal"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore/internal"
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
)
|
||||
|
||||
func (il *IndexingLayer) ReplaceEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
func (il *IndexingLayer) ReplaceEvent(evt nostr.Event) error {
|
||||
// sanity checking
|
||||
if evt.CreatedAt > math.MaxUint32 || evt.Kind > math.MaxUint16 {
|
||||
return fmt.Errorf("event with values out of expected boundaries")
|
||||
}
|
||||
|
||||
filter := nostr.Filter{Limit: 1, Kinds: []int{evt.Kind}, Authors: []string{evt.PubKey}}
|
||||
il.mmmm.writeMutex.Lock()
|
||||
defer il.mmmm.writeMutex.Unlock()
|
||||
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
filter := nostr.Filter{Limit: 1, Kinds: []uint16{evt.Kind}, Authors: []nostr.PubKey{evt.PubKey}}
|
||||
if nostr.IsAddressableKind(evt.Kind) {
|
||||
// when addressable, add the "d" tag to the filter
|
||||
filter.Tags = nostr.TagMap{"d": []string{evt.Tags.GetD()}}
|
||||
@@ -35,7 +41,7 @@ func (il *IndexingLayer) ReplaceEvent(ctx context.Context, evt *nostr.Event) err
|
||||
shouldStore := true
|
||||
for _, previous := range prevResults {
|
||||
if internal.IsOlder(previous.Event, evt) {
|
||||
if err := il.delete(mmmtxn, iltxn, previous.Event); err != nil {
|
||||
if err := il.delete(mmmtxn, iltxn, previous.Event.ID); err != nil {
|
||||
return fmt.Errorf("failed to delete event %s for replacing: %w", previous.Event.ID, err)
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
package mmm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
@@ -11,73 +9,15 @@ import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
"fiatjaf.com/nostr/eventstore/mmm/betterbinary"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore/codec/betterbinary"
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
)
|
||||
|
||||
func (b *MultiMmapManager) StoreGlobal(ctx context.Context, evt *nostr.Event) (stored bool, err error) {
|
||||
someoneWantsIt := false
|
||||
func (il *IndexingLayer) SaveEvent(evt nostr.Event) error {
|
||||
il.mmmm.writeMutex.Lock()
|
||||
defer il.mmmm.writeMutex.Unlock()
|
||||
|
||||
b.mutex.Lock()
|
||||
defer b.mutex.Unlock()
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
// do this just so it's cleaner, we're already locking the thread and the mutex anyway
|
||||
mmmtxn, err := b.lmdbEnv.BeginTxn(nil, 0)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to begin global transaction: %w", err)
|
||||
}
|
||||
mmmtxn.RawRead = true
|
||||
|
||||
iltxns := make([]*lmdb.Txn, 0, len(b.layers))
|
||||
ils := make([]*IndexingLayer, 0, len(b.layers))
|
||||
|
||||
// ask if any of the indexing layers want this
|
||||
for _, il := range b.layers {
|
||||
if il.ShouldIndex != nil && il.ShouldIndex(ctx, evt) {
|
||||
someoneWantsIt = true
|
||||
|
||||
iltxn, err := il.lmdbEnv.BeginTxn(nil, 0)
|
||||
if err != nil {
|
||||
mmmtxn.Abort()
|
||||
for _, txn := range iltxns {
|
||||
txn.Abort()
|
||||
}
|
||||
return false, fmt.Errorf("failed to start txn on %s: %w", il.name, err)
|
||||
}
|
||||
|
||||
ils = append(ils, il)
|
||||
iltxns = append(iltxns, iltxn)
|
||||
}
|
||||
}
|
||||
|
||||
if !someoneWantsIt {
|
||||
// no one wants it
|
||||
mmmtxn.Abort()
|
||||
return false, fmt.Errorf("not wanted")
|
||||
}
|
||||
|
||||
stored, err = b.storeOn(mmmtxn, ils, iltxns, evt)
|
||||
if stored {
|
||||
mmmtxn.Commit()
|
||||
for _, txn := range iltxns {
|
||||
txn.Commit()
|
||||
}
|
||||
} else {
|
||||
mmmtxn.Abort()
|
||||
for _, txn := range iltxns {
|
||||
txn.Abort()
|
||||
}
|
||||
}
|
||||
|
||||
return stored, err
|
||||
}
|
||||
|
||||
func (il *IndexingLayer) SaveEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
il.mmmm.mutex.Lock()
|
||||
defer il.mmmm.mutex.Unlock()
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
@@ -111,7 +51,7 @@ func (b *MultiMmapManager) storeOn(
|
||||
mmmtxn *lmdb.Txn,
|
||||
ils []*IndexingLayer,
|
||||
iltxns []*lmdb.Txn,
|
||||
evt *nostr.Event,
|
||||
evt nostr.Event,
|
||||
) (stored bool, err error) {
|
||||
// sanity checking
|
||||
if evt.CreatedAt > maxuint32 || evt.Kind > maxuint16 {
|
||||
@@ -119,8 +59,7 @@ func (b *MultiMmapManager) storeOn(
|
||||
}
|
||||
|
||||
// check if we already have this id
|
||||
idPrefix8, _ := hex.DecodeString(evt.ID[0 : 8*2])
|
||||
val, err := mmmtxn.Get(b.indexId, idPrefix8)
|
||||
val, err := mmmtxn.Get(b.indexId, evt.ID[0:8])
|
||||
if err == nil {
|
||||
// we found the event, now check if it is already indexed by the layers that want to store it
|
||||
for i := len(ils) - 1; i >= 0; i-- {
|
||||
@@ -149,7 +88,7 @@ func (b *MultiMmapManager) storeOn(
|
||||
|
||||
// get event binary size
|
||||
pos := position{
|
||||
size: uint32(betterbinary.Measure(*evt)),
|
||||
size: uint32(betterbinary.Measure(evt)),
|
||||
}
|
||||
if pos.size >= 1<<16 {
|
||||
return false, fmt.Errorf("event too large to store, max %d, got %d", 1<<16, pos.size)
|
||||
@@ -193,7 +132,7 @@ func (b *MultiMmapManager) storeOn(
|
||||
}
|
||||
|
||||
// write to the mmap
|
||||
if err := betterbinary.Marshal(*evt, b.mmapf[pos.start:]); err != nil {
|
||||
if err := betterbinary.Marshal(evt, b.mmapf[pos.start:]); err != nil {
|
||||
return false, fmt.Errorf("error marshaling to %d: %w", pos.start, err)
|
||||
}
|
||||
|
||||
@@ -219,8 +158,8 @@ func (b *MultiMmapManager) storeOn(
|
||||
}
|
||||
|
||||
// store the id index with the refcounts
|
||||
if err := mmmtxn.Put(b.indexId, idPrefix8, val, 0); err != nil {
|
||||
panic(fmt.Errorf("failed to store %x by id: %w", idPrefix8, err))
|
||||
if err := mmmtxn.Put(b.indexId, evt.ID[0:8], val, 0); err != nil {
|
||||
panic(fmt.Errorf("failed to store %x by id: %w", evt.ID[:], err))
|
||||
}
|
||||
|
||||
// msync
|
||||
|
||||
@@ -11,8 +11,6 @@ type RelayWrapper struct {
|
||||
Store
|
||||
}
|
||||
|
||||
var _ nostr.RelayStore = (*RelayWrapper)(nil)
|
||||
|
||||
func (w RelayWrapper) Publish(ctx context.Context, evt nostr.Event) error {
|
||||
if nostr.IsEphemeralKind(evt.Kind) {
|
||||
// do not store ephemeral events
|
||||
@@ -24,33 +22,14 @@ func (w RelayWrapper) Publish(ctx context.Context, evt nostr.Event) error {
|
||||
|
||||
if nostr.IsRegularKind(evt.Kind) {
|
||||
// regular events are just saved directly
|
||||
if err := w.SaveEvent(ctx, &evt); err != nil && err != ErrDupEvent {
|
||||
if err := w.SaveEvent(evt); err != nil && err != ErrDupEvent {
|
||||
return fmt.Errorf("failed to save: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// others are replaced
|
||||
w.Store.ReplaceEvent(ctx, &evt)
|
||||
w.Store.ReplaceEvent(evt)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w RelayWrapper) QuerySync(ctx context.Context, filter nostr.Filter) ([]*nostr.Event, error) {
|
||||
ch, err := w.Store.QueryEvents(ctx, filter)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query: %w", err)
|
||||
}
|
||||
|
||||
n := filter.Limit
|
||||
if n == 0 {
|
||||
n = 500
|
||||
}
|
||||
|
||||
results := make([]*nostr.Event, 0, n)
|
||||
for evt := range ch {
|
||||
results = append(results, evt)
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
@@ -1,164 +0,0 @@
|
||||
package strfry
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"github.com/mailru/easyjson"
|
||||
"fiatjaf.com/nostr"
|
||||
)
|
||||
|
||||
var _ eventstore.Store = (*StrfryBackend)(nil)
|
||||
|
||||
type StrfryBackend struct {
|
||||
ConfigPath string
|
||||
ExecutablePath string
|
||||
}
|
||||
|
||||
func (s *StrfryBackend) Init() error {
|
||||
if s.ExecutablePath == "" {
|
||||
configPath := filepath.Dir(s.ConfigPath)
|
||||
os.Setenv("PATH", configPath+":"+os.Getenv("PATH"))
|
||||
exe, err := exec.LookPath("strfry")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find strfry executable: %w (better provide it manually)", err)
|
||||
}
|
||||
s.ExecutablePath = exe
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_ StrfryBackend) Close() {}
|
||||
|
||||
func (s StrfryBackend) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) {
|
||||
stdout, err := s.baseStrfryScan(ctx, filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ch := make(chan *nostr.Event)
|
||||
go func() {
|
||||
defer close(ch)
|
||||
for {
|
||||
line, err := stdout.ReadBytes('\n')
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
evt := &nostr.Event{}
|
||||
easyjson.Unmarshal(line, evt)
|
||||
if evt.ID == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- evt
|
||||
}
|
||||
}()
|
||||
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
func (s *StrfryBackend) ReplaceEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
return s.SaveEvent(ctx, evt)
|
||||
}
|
||||
|
||||
func (s StrfryBackend) SaveEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
args := make([]string, 0, 4)
|
||||
if s.ConfigPath != "" {
|
||||
args = append(args, "--config="+s.ConfigPath)
|
||||
}
|
||||
args = append(args, "import")
|
||||
args = append(args, "--show-rejected")
|
||||
args = append(args, "--no-verify")
|
||||
|
||||
cmd := exec.CommandContext(ctx, s.ExecutablePath, args...)
|
||||
var stderr bytes.Buffer
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
// event is sent on stdin
|
||||
j, _ := easyjson.Marshal(evt)
|
||||
cmd.Stdin = bytes.NewBuffer(j)
|
||||
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"%s %s failed: %w, (%s)",
|
||||
s.ExecutablePath, strings.Join(args, " "), err, stderr.String(),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s StrfryBackend) DeleteEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
args := make([]string, 0, 3)
|
||||
if s.ConfigPath != "" {
|
||||
args = append(args, "--config="+s.ConfigPath)
|
||||
}
|
||||
args = append(args, "delete")
|
||||
args = append(args, "--filter={\"ids\":[\""+evt.ID+"\"]}")
|
||||
|
||||
cmd := exec.CommandContext(ctx, s.ExecutablePath, args...)
|
||||
var stderr bytes.Buffer
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"%s %s failed: %w, (%s)",
|
||||
s.ExecutablePath, strings.Join(args, " "), err, stderr.String(),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s StrfryBackend) CountEvents(ctx context.Context, filter nostr.Filter) (int64, error) {
|
||||
stdout, err := s.baseStrfryScan(ctx, filter)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var count int64
|
||||
for {
|
||||
_, err := stdout.ReadBytes('\n')
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
count++
|
||||
}
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
||||
func (s StrfryBackend) baseStrfryScan(ctx context.Context, filter nostr.Filter) (*bytes.Buffer, error) {
|
||||
args := make([]string, 0, 3)
|
||||
if s.ConfigPath != "" {
|
||||
args = append(args, "--config="+s.ConfigPath)
|
||||
}
|
||||
args = append(args, "scan")
|
||||
args = append(args, filter.String())
|
||||
|
||||
cmd := exec.CommandContext(ctx, s.ExecutablePath, args...)
|
||||
var stdout bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
var stderr bytes.Buffer
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"%s %s failed: %w, (%s)",
|
||||
s.ExecutablePath, strings.Join(args, " "), err, stderr.String(),
|
||||
)
|
||||
}
|
||||
|
||||
return &stdout, nil
|
||||
}
|
||||
@@ -7,12 +7,11 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"fiatjaf.com/nostr/eventstore/badger"
|
||||
"fiatjaf.com/nostr/eventstore/lmdb"
|
||||
"fiatjaf.com/nostr/eventstore/slicestore"
|
||||
"fiatjaf.com/nostr/eventstore/sqlite3"
|
||||
"fiatjaf.com/nostr"
|
||||
)
|
||||
|
||||
func BenchmarkSliceStore(b *testing.B) {
|
||||
@@ -35,61 +34,53 @@ func BenchmarkBadger(b *testing.B) {
|
||||
runBenchmarkOn(b, d)
|
||||
}
|
||||
|
||||
func BenchmarkSQLite(b *testing.B) {
|
||||
os.RemoveAll(dbpath + "sqlite")
|
||||
q := &sqlite3.SQLite3Backend{DatabaseURL: dbpath + "sqlite", QueryTagsLimit: 50}
|
||||
q.Init()
|
||||
|
||||
runBenchmarkOn(b, q)
|
||||
}
|
||||
|
||||
func runBenchmarkOn(b *testing.B, db eventstore.Store) {
|
||||
for i := 0; i < 10000; i++ {
|
||||
eTag := make([]byte, 32)
|
||||
binary.BigEndian.PutUint16(eTag, uint16(i))
|
||||
|
||||
ref, _ := nostr.GetPublicKey(sk3)
|
||||
ref := nostr.GetPublicKey(sk3)
|
||||
if i%3 == 0 {
|
||||
ref, _ = nostr.GetPublicKey(sk4)
|
||||
ref = nostr.GetPublicKey(sk4)
|
||||
}
|
||||
|
||||
evt := &nostr.Event{
|
||||
evt := nostr.Event{
|
||||
CreatedAt: nostr.Timestamp(i*10 + 2),
|
||||
Content: fmt.Sprintf("hello %d", i),
|
||||
Tags: nostr.Tags{
|
||||
{"t", fmt.Sprintf("t%d", i)},
|
||||
{"e", hex.EncodeToString(eTag)},
|
||||
{"p", ref},
|
||||
{"p", ref.Hex()},
|
||||
},
|
||||
Kind: i % 10,
|
||||
Kind: uint16(i % 10),
|
||||
}
|
||||
sk := sk3
|
||||
if i%3 == 0 {
|
||||
sk = sk4
|
||||
}
|
||||
evt.Sign(sk)
|
||||
db.SaveEvent(ctx, evt)
|
||||
db.SaveEvent(evt)
|
||||
}
|
||||
|
||||
filters := make([]nostr.Filter, 0, 10)
|
||||
filters = append(filters, nostr.Filter{Kinds: []int{1, 4, 8, 16}})
|
||||
pk3, _ := nostr.GetPublicKey(sk3)
|
||||
filters = append(filters, nostr.Filter{Authors: []string{pk3, nostr.GeneratePrivateKey()}})
|
||||
filters = append(filters, nostr.Filter{Authors: []string{pk3, nostr.GeneratePrivateKey()}, Kinds: []int{3, 4}})
|
||||
filters = append(filters, nostr.Filter{Kinds: []uint16{1, 4, 8, 16}})
|
||||
pk3 := nostr.GetPublicKey(sk3)
|
||||
filters = append(filters, nostr.Filter{Authors: []nostr.PubKey{pk3, nostr.Generate().Public()}})
|
||||
filters = append(filters, nostr.Filter{Authors: []nostr.PubKey{pk3, nostr.Generate().Public()}, Kinds: []uint16{3, 4}})
|
||||
filters = append(filters, nostr.Filter{})
|
||||
filters = append(filters, nostr.Filter{Limit: 20})
|
||||
filters = append(filters, nostr.Filter{Kinds: []int{8, 9}, Tags: nostr.TagMap{"p": []string{pk3}}})
|
||||
pk4, _ := nostr.GetPublicKey(sk4)
|
||||
filters = append(filters, nostr.Filter{Kinds: []int{8, 9}, Tags: nostr.TagMap{"p": []string{pk3, pk4}}})
|
||||
filters = append(filters, nostr.Filter{Kinds: []int{8, 9}, Tags: nostr.TagMap{"p": []string{pk3, pk4}}})
|
||||
filters = append(filters, nostr.Filter{Kinds: []uint16{8, 9}, Tags: nostr.TagMap{"p": []string{pk3.Hex()}}})
|
||||
pk4 := nostr.GetPublicKey(sk4)
|
||||
filters = append(filters, nostr.Filter{Kinds: []uint16{8, 9}, Tags: nostr.TagMap{"p": []string{pk3.Hex(), pk4.Hex()}}})
|
||||
filters = append(filters, nostr.Filter{Kinds: []uint16{8, 9}, Tags: nostr.TagMap{"p": []string{pk3.Hex(), pk4.Hex()}}})
|
||||
eTags := make([]string, 20)
|
||||
for i := 0; i < 20; i++ {
|
||||
eTag := make([]byte, 32)
|
||||
binary.BigEndian.PutUint16(eTag, uint16(i))
|
||||
eTags[i] = hex.EncodeToString(eTag)
|
||||
}
|
||||
filters = append(filters, nostr.Filter{Kinds: []int{9}, Tags: nostr.TagMap{"e": eTags}})
|
||||
filters = append(filters, nostr.Filter{Kinds: []int{5}, Tags: nostr.TagMap{"e": eTags, "t": []string{"t5"}}})
|
||||
filters = append(filters, nostr.Filter{Kinds: []uint16{9}, Tags: nostr.TagMap{"e": eTags}})
|
||||
filters = append(filters, nostr.Filter{Kinds: []uint16{5}, Tags: nostr.TagMap{"e": eTags, "t": []string{"t5"}}})
|
||||
filters = append(filters, nostr.Filter{Tags: nostr.TagMap{"e": eTags}})
|
||||
filters = append(filters, nostr.Filter{Tags: nostr.TagMap{"e": eTags}, Limit: 50})
|
||||
|
||||
@@ -97,17 +88,17 @@ func runBenchmarkOn(b *testing.B, db eventstore.Store) {
|
||||
for q, filter := range filters {
|
||||
b.Run(fmt.Sprintf("q-%d", q), func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, _ = db.QueryEvents(ctx, filter)
|
||||
_ = db.QueryEvents(filter)
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("insert", func(b *testing.B) {
|
||||
evt := &nostr.Event{Kind: 788, CreatedAt: nostr.Now(), Content: "blergh", Tags: nostr.Tags{{"t", "spam"}}}
|
||||
evt := nostr.Event{Kind: 788, CreatedAt: nostr.Now(), Content: "blergh", Tags: nostr.Tags{{"t", "spam"}}}
|
||||
evt.Sign(sk4)
|
||||
for i := 0; i < b.N; i++ {
|
||||
db.SaveEvent(ctx, evt)
|
||||
db.SaveEvent(evt)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -5,16 +5,17 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"fiatjaf.com/nostr/eventstore/badger"
|
||||
"fiatjaf.com/nostr/eventstore/lmdb"
|
||||
"fiatjaf.com/nostr/eventstore/slicestore"
|
||||
)
|
||||
|
||||
const (
|
||||
var (
|
||||
dbpath = "/tmp/eventstore-test"
|
||||
sk3 = "0000000000000000000000000000000000000000000000000000000000000003"
|
||||
sk4 = "0000000000000000000000000000000000000000000000000000000000000004"
|
||||
sk3 = nostr.MustSecretKeyFromHex("0000000000000000000000000000000000000000000000000000000000000003")
|
||||
sk4 = nostr.MustSecretKeyFromHex("0000000000000000000000000000000000000000000000000000000000000004")
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -16,11 +16,11 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
|
||||
err := db.Init()
|
||||
require.NoError(t, err)
|
||||
|
||||
allEvents := make([]*nostr.Event, 0, 10)
|
||||
allEvents := make([]nostr.Event, 0, 10)
|
||||
|
||||
// insert
|
||||
for i := 0; i < 10; i++ {
|
||||
evt := &nostr.Event{
|
||||
evt := nostr.Event{
|
||||
CreatedAt: nostr.Timestamp(i*10 + 2),
|
||||
Content: fmt.Sprintf("hello %d", i),
|
||||
Tags: nostr.Tags{
|
||||
@@ -38,7 +38,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
|
||||
}
|
||||
evt.Sign(sk)
|
||||
allEvents = append(allEvents, evt)
|
||||
err = db.SaveEvent(ctx, evt)
|
||||
err = db.SaveEvent(evt)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -67,60 +67,61 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
|
||||
}
|
||||
|
||||
{
|
||||
results, err := w.QuerySync(ctx, nostr.Filter{IDs: []string{allEvents[7].ID, allEvents[9].ID}})
|
||||
results, err := w.QuerySync(ctx, nostr.Filter{IDs: []nostr.ID{allEvents[7].ID, allEvents[9].ID}})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, 2)
|
||||
require.ElementsMatch(t,
|
||||
[]*nostr.Event{allEvents[7], allEvents[9]},
|
||||
[]nostr.Event{allEvents[7], allEvents[9]},
|
||||
results,
|
||||
"id query error")
|
||||
}
|
||||
|
||||
{
|
||||
results, err := w.QuerySync(ctx, nostr.Filter{Kinds: []int{1}})
|
||||
results, err := w.QuerySync(ctx, nostr.Filter{Kinds: []uint16{1}})
|
||||
require.NoError(t, err)
|
||||
require.ElementsMatch(t,
|
||||
[]*nostr.Event{allEvents[1], allEvents[3], allEvents[5], allEvents[7], allEvents[9]},
|
||||
[]nostr.Event{allEvents[1], allEvents[3], allEvents[5], allEvents[7], allEvents[9]},
|
||||
results,
|
||||
"kind query error")
|
||||
}
|
||||
|
||||
{
|
||||
results, err := w.QuerySync(ctx, nostr.Filter{Kinds: []int{9}})
|
||||
results, err := w.QuerySync(ctx, nostr.Filter{Kinds: []uint16{9}})
|
||||
require.NoError(t, err)
|
||||
require.ElementsMatch(t,
|
||||
[]*nostr.Event{allEvents[0], allEvents[2], allEvents[4], allEvents[6], allEvents[8]},
|
||||
[]nostr.Event{allEvents[0], allEvents[2], allEvents[4], allEvents[6], allEvents[8]},
|
||||
results,
|
||||
"second kind query error")
|
||||
}
|
||||
|
||||
{
|
||||
pk4, _ := nostr.GetPublicKey(sk4)
|
||||
results, err := w.QuerySync(ctx, nostr.Filter{Authors: []string{pk4}})
|
||||
pk4 := nostr.GetPublicKey(sk4)
|
||||
results, err := w.QuerySync(ctx, nostr.Filter{Authors: []nostr.PubKey{pk4}})
|
||||
require.NoError(t, err)
|
||||
require.ElementsMatch(t,
|
||||
[]*nostr.Event{allEvents[0], allEvents[3], allEvents[6], allEvents[9]},
|
||||
[]nostr.Event{allEvents[0], allEvents[3], allEvents[6], allEvents[9]},
|
||||
results,
|
||||
"pubkey query error")
|
||||
}
|
||||
|
||||
{
|
||||
pk3, _ := nostr.GetPublicKey(sk3)
|
||||
results, err := w.QuerySync(ctx, nostr.Filter{Kinds: []int{9}, Authors: []string{pk3}})
|
||||
pk3 := nostr.GetPublicKey(sk3)
|
||||
results, err := w.QuerySync(ctx, nostr.Filter{Kinds: []uint16{9}, Authors: []nostr.PubKey{pk3}})
|
||||
require.NoError(t, err)
|
||||
require.ElementsMatch(t,
|
||||
[]*nostr.Event{allEvents[2], allEvents[4], allEvents[8]},
|
||||
[]nostr.Event{allEvents[2], allEvents[4], allEvents[8]},
|
||||
results,
|
||||
"pubkey kind query error")
|
||||
}
|
||||
|
||||
{
|
||||
pk3, _ := nostr.GetPublicKey(sk3)
|
||||
pk4, _ := nostr.GetPublicKey(sk4)
|
||||
results, err := w.QuerySync(ctx, nostr.Filter{Kinds: []int{9, 5, 7}, Authors: []string{pk3, pk4, pk4[1:] + "a"}})
|
||||
pk3 := nostr.GetPublicKey(sk3)
|
||||
pk4 := nostr.GetPublicKey(sk4)
|
||||
pk4[1] = 'a'
|
||||
results, err := w.QuerySync(ctx, nostr.Filter{Kinds: []uint16{9, 5, 7}, Authors: []nostr.PubKey{pk3, pk4}})
|
||||
require.NoError(t, err)
|
||||
require.ElementsMatch(t,
|
||||
[]*nostr.Event{allEvents[0], allEvents[2], allEvents[4], allEvents[6], allEvents[8]},
|
||||
[]nostr.Event{allEvents[0], allEvents[2], allEvents[4], allEvents[6], allEvents[8]},
|
||||
results,
|
||||
"2 pubkeys and kind query error")
|
||||
}
|
||||
@@ -129,14 +130,14 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
|
||||
results, err := w.QuerySync(ctx, nostr.Filter{Tags: nostr.TagMap{"t": []string{"2", "4", "6"}}})
|
||||
require.NoError(t, err)
|
||||
require.ElementsMatch(t,
|
||||
[]*nostr.Event{allEvents[2], allEvents[4], allEvents[6]},
|
||||
[]nostr.Event{allEvents[2], allEvents[4], allEvents[6]},
|
||||
results,
|
||||
"tag query error")
|
||||
}
|
||||
|
||||
// delete
|
||||
require.NoError(t, db.DeleteEvent(ctx, allEvents[4]), "delete 1 error")
|
||||
require.NoError(t, db.DeleteEvent(ctx, allEvents[5]), "delete 2 error")
|
||||
require.NoError(t, db.DeleteEvent(allEvents[4].ID), "delete 1 error")
|
||||
require.NoError(t, db.DeleteEvent(allEvents[5].ID), "delete 2 error")
|
||||
|
||||
// query again
|
||||
{
|
||||
@@ -152,7 +153,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
|
||||
results, err := w.QuerySync(ctx, nostr.Filter{Tags: nostr.TagMap{"t": []string{"2", "6"}}})
|
||||
require.NoError(t, err)
|
||||
require.ElementsMatch(t,
|
||||
[]*nostr.Event{allEvents[2], allEvents[6]},
|
||||
[]nostr.Event{allEvents[2], allEvents[6]},
|
||||
results,
|
||||
"second tag query error")
|
||||
}
|
||||
@@ -161,7 +162,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
|
||||
results, err := w.QuerySync(ctx, nostr.Filter{Tags: nostr.TagMap{"e": []string{allEvents[3].Tags[1][1]}}})
|
||||
require.NoError(t, err)
|
||||
require.ElementsMatch(t,
|
||||
[]*nostr.Event{allEvents[3]},
|
||||
[]nostr.Event{allEvents[3]},
|
||||
results,
|
||||
"'e' tag query error")
|
||||
}
|
||||
@@ -184,7 +185,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
|
||||
p := "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"
|
||||
p2 := "2eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"
|
||||
|
||||
newEvents := []*nostr.Event{
|
||||
newEvents := []nostr.Event{
|
||||
{Tags: nostr.Tags{nostr.Tag{"p", p}}, Kind: 1984, CreatedAt: nostr.Timestamp(100), Content: "first"},
|
||||
{Tags: nostr.Tags{nostr.Tag{"p", p}, nostr.Tag{"t", "x"}}, Kind: 1984, CreatedAt: nostr.Timestamp(101), Content: "middle"},
|
||||
{Tags: nostr.Tags{nostr.Tag{"p", p}}, Kind: 1984, CreatedAt: nostr.Timestamp(102), Content: "last"},
|
||||
@@ -195,21 +196,21 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
|
||||
{Tags: nostr.Tags{nostr.Tag{"p", p}, nostr.Tag{"p", p2}}, Kind: 1, CreatedAt: nostr.Timestamp(104), Content: "trololo"},
|
||||
}
|
||||
|
||||
sk := nostr.GeneratePrivateKey()
|
||||
sk := nostr.Generate()
|
||||
for _, newEvent := range newEvents {
|
||||
newEvent.Sign(sk)
|
||||
require.NoError(t, db.SaveEvent(ctx, newEvent))
|
||||
require.NoError(t, db.SaveEvent(newEvent))
|
||||
}
|
||||
|
||||
{
|
||||
results, err := w.QuerySync(ctx, nostr.Filter{
|
||||
Tags: nostr.TagMap{"p": []string{p}},
|
||||
Kinds: []int{1984},
|
||||
Kinds: []uint16{1984},
|
||||
Limit: 2,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.ElementsMatch(t,
|
||||
[]*nostr.Event{newEvents[2], newEvents[1]},
|
||||
[]nostr.Event{newEvents[2], newEvents[1]},
|
||||
results,
|
||||
"'p' tag 1 query error")
|
||||
}
|
||||
@@ -222,7 +223,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
|
||||
require.NoError(t, err)
|
||||
require.ElementsMatch(t,
|
||||
// the results won't be in canonical time order because this query is too awful, needs a kind
|
||||
[]*nostr.Event{newEvents[1]},
|
||||
[]nostr.Event{newEvents[1]},
|
||||
results,
|
||||
"'p' tag 2 query error")
|
||||
}
|
||||
@@ -230,7 +231,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
|
||||
{
|
||||
results, err := w.QuerySync(ctx, nostr.Filter{
|
||||
Tags: nostr.TagMap{"p": []string{p, p2}},
|
||||
Kinds: []int{1},
|
||||
Kinds: []uint16{1},
|
||||
Limit: 4,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -2,13 +2,12 @@ package test
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -18,35 +17,35 @@ func manyAuthorsTest(t *testing.T, db eventstore.Store) {
|
||||
const total = 10000
|
||||
const limit = 500
|
||||
const authors = 1700
|
||||
kinds := []int{6, 7, 8}
|
||||
kinds := []uint16{6, 7, 8}
|
||||
|
||||
bigfilter := nostr.Filter{
|
||||
Authors: make([]string, authors),
|
||||
Authors: make([]nostr.PubKey, authors),
|
||||
Kinds: kinds,
|
||||
Limit: limit,
|
||||
}
|
||||
for i := 0; i < authors; i++ {
|
||||
sk := make([]byte, 32)
|
||||
binary.BigEndian.PutUint32(sk, uint32(i%(total/5))+1)
|
||||
pk, _ := nostr.GetPublicKey(hex.EncodeToString(sk))
|
||||
pk := nostr.GetPublicKey([32]byte(sk))
|
||||
bigfilter.Authors[i] = pk
|
||||
}
|
||||
|
||||
ordered := make([]*nostr.Event, 0, total)
|
||||
ordered := make([]nostr.Event, 0, total)
|
||||
for i := 0; i < total; i++ {
|
||||
sk := make([]byte, 32)
|
||||
binary.BigEndian.PutUint32(sk, uint32(i%(total/5))+1)
|
||||
|
||||
evt := &nostr.Event{
|
||||
evt := nostr.Event{
|
||||
CreatedAt: nostr.Timestamp(i*i) / 4,
|
||||
Content: fmt.Sprintf("lots of stuff %d", i),
|
||||
Tags: nostr.Tags{},
|
||||
Kind: i % 10,
|
||||
Kind: uint16(i % 10),
|
||||
}
|
||||
err := evt.Sign(hex.EncodeToString(sk))
|
||||
err := evt.Sign([32]byte(sk))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.SaveEvent(ctx, evt)
|
||||
err = db.SaveEvent(evt)
|
||||
require.NoError(t, err)
|
||||
|
||||
if bigfilter.Matches(evt) {
|
||||
@@ -56,12 +55,11 @@ func manyAuthorsTest(t *testing.T, db eventstore.Store) {
|
||||
|
||||
w := eventstore.RelayWrapper{Store: db}
|
||||
|
||||
res, err := w.QuerySync(ctx, bigfilter)
|
||||
res := slices.Collect(w.QueryEvents(bigfilter))
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Len(t, res, limit)
|
||||
require.True(t, slices.IsSortedFunc(res, nostr.CompareEventPtrReverse))
|
||||
slices.SortFunc(ordered, nostr.CompareEventPtrReverse)
|
||||
require.True(t, slices.IsSortedFunc(res, nostr.CompareEventReverse))
|
||||
slices.SortFunc(ordered, nostr.CompareEventReverse)
|
||||
require.Equal(t, ordered[0], res[0])
|
||||
require.Equal(t, ordered[limit-1], res[limit-1])
|
||||
require.Equal(t, ordered[0:limit], res)
|
||||
|
||||
@@ -2,16 +2,17 @@ package test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"fiatjaf.com/nostr/eventstore/slicestore"
|
||||
"fiatjaf.com/nostr"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var sk = "486d5f6d4891f4ce3cd5f4d6b62d184ec8ea10db455830ab7918ca43d4d7ad24"
|
||||
var sk = nostr.MustSecretKeyFromHex("486d5f6d4891f4ce3cd5f4d6b62d184ec8ea10db455830ab7918ca43d4d7ad24")
|
||||
|
||||
func TestRelayWrapper(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
@@ -44,6 +45,6 @@ func TestRelayWrapper(t *testing.T) {
|
||||
}
|
||||
time.Sleep(time.Millisecond * 200)
|
||||
|
||||
evts, _ := w.QuerySync(ctx, nostr.Filter{Kinds: []int{3}})
|
||||
evts := slices.Collect(w.QueryEvents(nostr.Filter{Kinds: []uint16{3}}))
|
||||
require.Len(t, evts, 1)
|
||||
}
|
||||
|
||||
@@ -4,10 +4,11 @@ import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -18,33 +19,33 @@ func runSecondTestOn(t *testing.T, db eventstore.Store) {
|
||||
eTag := make([]byte, 32)
|
||||
binary.BigEndian.PutUint16(eTag, uint16(i))
|
||||
|
||||
ref, _ := nostr.GetPublicKey(sk3)
|
||||
ref := nostr.GetPublicKey(sk3)
|
||||
if i%3 == 0 {
|
||||
ref, _ = nostr.GetPublicKey(sk4)
|
||||
ref = nostr.GetPublicKey(sk4)
|
||||
}
|
||||
|
||||
evt := &nostr.Event{
|
||||
evt := nostr.Event{
|
||||
CreatedAt: nostr.Timestamp(i*10 + 2),
|
||||
Content: fmt.Sprintf("hello %d", i),
|
||||
Tags: nostr.Tags{
|
||||
{"t", fmt.Sprintf("t%d", i)},
|
||||
{"e", hex.EncodeToString(eTag)},
|
||||
{"p", ref},
|
||||
{"p", ref.Hex()},
|
||||
},
|
||||
Kind: i % 10,
|
||||
Kind: uint16(i % 10),
|
||||
}
|
||||
sk := sk3
|
||||
if i%3 == 0 {
|
||||
sk = sk4
|
||||
}
|
||||
evt.Sign(sk)
|
||||
err := db.SaveEvent(ctx, evt)
|
||||
err := db.SaveEvent(evt)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
w := eventstore.RelayWrapper{Store: db}
|
||||
pk3, _ := nostr.GetPublicKey(sk3)
|
||||
pk4, _ := nostr.GetPublicKey(sk4)
|
||||
pk3 := nostr.GetPublicKey(sk3)
|
||||
pk4 := nostr.GetPublicKey(sk4)
|
||||
eTags := make([]string, 20)
|
||||
for i := 0; i < 20; i++ {
|
||||
eTag := make([]byte, 32)
|
||||
@@ -53,16 +54,16 @@ func runSecondTestOn(t *testing.T, db eventstore.Store) {
|
||||
}
|
||||
|
||||
filters := make([]nostr.Filter, 0, 10)
|
||||
filters = append(filters, nostr.Filter{Kinds: []int{1, 4, 8, 16}})
|
||||
filters = append(filters, nostr.Filter{Authors: []string{pk3, nostr.GeneratePrivateKey()}})
|
||||
filters = append(filters, nostr.Filter{Authors: []string{pk3, nostr.GeneratePrivateKey()}, Kinds: []int{3, 4}})
|
||||
filters = append(filters, nostr.Filter{Kinds: []uint16{1, 4, 8, 16}})
|
||||
filters = append(filters, nostr.Filter{Authors: []nostr.PubKey{pk3, nostr.Generate().Public()}})
|
||||
filters = append(filters, nostr.Filter{Authors: []nostr.PubKey{pk3, nostr.Generate().Public()}, Kinds: []uint16{3, 4}})
|
||||
filters = append(filters, nostr.Filter{})
|
||||
filters = append(filters, nostr.Filter{Limit: 20})
|
||||
filters = append(filters, nostr.Filter{Kinds: []int{8, 9}, Tags: nostr.TagMap{"p": []string{pk3}}})
|
||||
filters = append(filters, nostr.Filter{Kinds: []int{8, 9}, Tags: nostr.TagMap{"p": []string{pk3, pk4}}})
|
||||
filters = append(filters, nostr.Filter{Kinds: []int{8, 9}, Tags: nostr.TagMap{"p": []string{pk3, pk4}}})
|
||||
filters = append(filters, nostr.Filter{Kinds: []int{9}, Tags: nostr.TagMap{"e": eTags}})
|
||||
filters = append(filters, nostr.Filter{Kinds: []int{5}, Tags: nostr.TagMap{"e": eTags, "t": []string{"t5"}}})
|
||||
filters = append(filters, nostr.Filter{Kinds: []uint16{8, 9}, Tags: nostr.TagMap{"p": []string{pk3.Hex()}}})
|
||||
filters = append(filters, nostr.Filter{Kinds: []uint16{8, 9}, Tags: nostr.TagMap{"p": []string{pk3.Hex(), pk4.Hex()}}})
|
||||
filters = append(filters, nostr.Filter{Kinds: []uint16{8, 9}, Tags: nostr.TagMap{"p": []string{pk3.Hex(), pk4.Hex()}}})
|
||||
filters = append(filters, nostr.Filter{Kinds: []uint16{9}, Tags: nostr.TagMap{"e": eTags}})
|
||||
filters = append(filters, nostr.Filter{Kinds: []uint16{5}, Tags: nostr.TagMap{"e": eTags, "t": []string{"t5"}}})
|
||||
filters = append(filters, nostr.Filter{Tags: nostr.TagMap{"e": eTags}})
|
||||
filters = append(filters, nostr.Filter{Tags: nostr.TagMap{"e": eTags}, Limit: 50})
|
||||
|
||||
@@ -73,8 +74,7 @@ func runSecondTestOn(t *testing.T, db eventstore.Store) {
|
||||
label := fmt.Sprintf("filter %d: %s", q, filter)
|
||||
|
||||
t.Run(fmt.Sprintf("q-%d", q), func(t *testing.T) {
|
||||
results, err := w.QuerySync(ctx, filter)
|
||||
require.NoError(t, err, filter)
|
||||
results := slices.Collect(w.QueryEvents(filter))
|
||||
require.NotEmpty(t, results, label)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2,13 +2,12 @@ package test
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -21,33 +20,33 @@ func unbalancedTest(t *testing.T, db eventstore.Store) {
|
||||
const authors = 1400
|
||||
|
||||
bigfilter := nostr.Filter{
|
||||
Authors: make([]string, authors),
|
||||
Authors: make([]nostr.PubKey, authors),
|
||||
Limit: limit,
|
||||
}
|
||||
for i := 0; i < authors; i++ {
|
||||
sk := make([]byte, 32)
|
||||
binary.BigEndian.PutUint32(sk, uint32(i%(authors*2))+1)
|
||||
pk, _ := nostr.GetPublicKey(hex.EncodeToString(sk))
|
||||
pk := nostr.GetPublicKey([32]byte(sk))
|
||||
bigfilter.Authors[i] = pk
|
||||
}
|
||||
// fmt.Println("filter", bigfilter)
|
||||
|
||||
expected := make([]*nostr.Event, 0, total)
|
||||
expected := make([]nostr.Event, 0, total)
|
||||
for i := 0; i < total; i++ {
|
||||
skseed := uint32(i%(authors*2)) + 1
|
||||
sk := make([]byte, 32)
|
||||
binary.BigEndian.PutUint32(sk, skseed)
|
||||
|
||||
evt := &nostr.Event{
|
||||
evt := nostr.Event{
|
||||
CreatedAt: nostr.Timestamp(skseed)*1000 + nostr.Timestamp(i),
|
||||
Content: fmt.Sprintf("unbalanced %d", i),
|
||||
Tags: nostr.Tags{},
|
||||
Kind: 1,
|
||||
}
|
||||
err := evt.Sign(hex.EncodeToString(sk))
|
||||
err := evt.Sign([32]byte(sk))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.SaveEvent(ctx, evt)
|
||||
err = db.SaveEvent(evt)
|
||||
require.NoError(t, err)
|
||||
|
||||
if bigfilter.Matches(evt) {
|
||||
@@ -55,7 +54,7 @@ func unbalancedTest(t *testing.T, db eventstore.Store) {
|
||||
}
|
||||
}
|
||||
|
||||
slices.SortFunc(expected, nostr.CompareEventPtrReverse)
|
||||
slices.SortFunc(expected, nostr.CompareEventReverse)
|
||||
if len(expected) > limit {
|
||||
expected = expected[0:limit]
|
||||
}
|
||||
@@ -63,11 +62,10 @@ func unbalancedTest(t *testing.T, db eventstore.Store) {
|
||||
|
||||
w := eventstore.RelayWrapper{Store: db}
|
||||
|
||||
res, err := w.QuerySync(ctx, bigfilter)
|
||||
res := slices.Collect(w.QueryEvents(bigfilter))
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, limit, len(res))
|
||||
require.True(t, slices.IsSortedFunc(res, nostr.CompareEventPtrReverse))
|
||||
require.True(t, slices.IsSortedFunc(res, nostr.CompareEventReverse))
|
||||
require.Equal(t, expected[0], res[0])
|
||||
|
||||
// fmt.Println(" expected result")
|
||||
|
||||
Reference in New Issue
Block a user