diff --git a/eventstore/badger/badger_test.go b/eventstore/badger/badger_test.go new file mode 100644 index 0000000..bf444a7 --- /dev/null +++ b/eventstore/badger/badger_test.go @@ -0,0 +1,61 @@ +package badger + +import ( + "os" + "testing" + + "fiatjaf.com/nostr" + "fiatjaf.com/nostr/eventstore" + "github.com/stretchr/testify/require" +) + +func TestBasicStoreAndQuery(t *testing.T) { + // create a temporary directory for the test database + dir, err := os.MkdirTemp("", "badger-test-*") + require.NoError(t, err) + defer os.RemoveAll(dir) + + // initialize the store + db := &BadgerBackend{Path: dir} + err = db.Init() + require.NoError(t, err) + defer db.Close() + + // create a test event + evt := nostr.Event{ + Content: "hello world", + CreatedAt: 1000, + Kind: 1, + Tags: nostr.Tags{}, + } + err = evt.Sign(nostr.Generate()) + require.NoError(t, err) + + // save the event + err = db.SaveEvent(evt) + require.NoError(t, err) + + // try to save it again, should fail with ErrDupEvent + err = db.SaveEvent(evt) + require.Error(t, err) + require.Equal(t, eventstore.ErrDupEvent, err) + + // query the event by its ID + filter := nostr.Filter{ + IDs: []nostr.ID{evt.ID}, + } + + // collect results + results := make([]nostr.Event, 0) + for event := range db.QueryEvents(filter) { + results = append(results, event) + } + + // verify we got exactly one event and it matches + require.Len(t, results, 1) + require.Equal(t, evt.ID, results[0].ID) + require.Equal(t, evt.Content, results[0].Content) + require.Equal(t, evt.CreatedAt, results[0].CreatedAt) + require.Equal(t, evt.Kind, results[0].Kind) + require.Equal(t, evt.PubKey, results[0].PubKey) +} diff --git a/eventstore/badger/fuzz_test.go b/eventstore/badger/fuzz_test.go index 1e56ae6..ee613ac 100644 --- a/eventstore/badger/fuzz_test.go +++ b/eventstore/badger/fuzz_test.go @@ -2,23 +2,18 @@ package badger import ( "cmp" - "context" "encoding/binary" - "encoding/hex" "fmt" + "slices" "testing" "time" - "github.com/dgraph-io/badger/v4" - "fiatjaf.com/nostr/eventstore" "fiatjaf.com/nostr" + "github.com/dgraph-io/badger/v4" "github.com/stretchr/testify/require" - "golang.org/x/exp/slices" ) func FuzzQuery(f *testing.F) { - ctx := context.Background() - f.Add(uint(200), uint(50), uint(13), uint(2), uint(2), uint(0), uint(1)) f.Fuzz(func(t *testing.T, total, limit, authors, timestampAuthorFactor, seedFactor, kinds, kindFactor uint) { total++ @@ -72,41 +67,41 @@ func FuzzQuery(f *testing.F) { // ~ start actual test filter := nostr.Filter{ - Authors: make([]string, authors), + Authors: make([]nostr.PubKey, authors), Limit: int(limit), } - maxKind := 1 + var maxKind uint16 = 1 if kinds > 0 { - filter.Kinds = make([]int, kinds) + filter.Kinds = make([]uint16, kinds) for i := range filter.Kinds { - filter.Kinds[i] = int(kindFactor) * i + filter.Kinds[i] = uint16(kindFactor) * uint16(i) } maxKind = filter.Kinds[len(filter.Kinds)-1] } for i := 0; i < int(authors); i++ { - sk := make([]byte, 32) - binary.BigEndian.PutUint32(sk, uint32(i%int(authors*seedFactor))+1) - pk, _ := nostr.GetPublicKey(hex.EncodeToString(sk)) + sk := nostr.SecretKey{} + binary.BigEndian.PutUint32(sk[:], uint32(i%int(authors*seedFactor))+1) + pk := nostr.GetPublicKey(sk) filter.Authors[i] = pk } - expected := make([]*nostr.Event, 0, total) + expected := make([]nostr.Event, 0, total) for i := 0; i < int(total); i++ { skseed := uint32(i%int(authors*seedFactor)) + 1 - sk := make([]byte, 32) - binary.BigEndian.PutUint32(sk, skseed) + sk := nostr.SecretKey{} + binary.BigEndian.PutUint32(sk[:], skseed) - evt := &nostr.Event{ + evt := nostr.Event{ CreatedAt: nostr.Timestamp(skseed)*nostr.Timestamp(timestampAuthorFactor) + nostr.Timestamp(i), Content: fmt.Sprintf("unbalanced %d", i), Tags: nostr.Tags{}, - Kind: i % maxKind, + Kind: uint16(i) % maxKind, } - err := evt.Sign(hex.EncodeToString(sk)) + err := evt.Sign(sk) require.NoError(t, err) - err = db.SaveEvent(ctx, evt) + err = db.SaveEvent(evt) require.NoError(t, err) if filter.Matches(evt) { @@ -114,27 +109,25 @@ func FuzzQuery(f *testing.F) { } } - slices.SortFunc(expected, nostr.CompareEventPtrReverse) + slices.SortFunc(expected, nostr.CompareEventReverse) if len(expected) > int(limit) { expected = expected[0:limit] } - w := eventstore.RelayWrapper{Store: db} - start := time.Now() // fmt.Println(filter) - res, err := w.QuerySync(ctx, filter) + res := slices.Collect(db.QueryEvents(filter)) end := time.Now() require.NoError(t, err) require.Equal(t, len(expected), len(res), "number of results is different than expected") require.Less(t, end.Sub(start).Milliseconds(), int64(1500), "query took too long") - require.True(t, slices.IsSortedFunc(res, func(a, b *nostr.Event) int { return cmp.Compare(b.CreatedAt, a.CreatedAt) }), "results are not sorted") + require.True(t, slices.IsSortedFunc(res, func(a, b nostr.Event) int { return cmp.Compare(b.CreatedAt, a.CreatedAt) }), "results are not sorted") nresults := len(expected) - getTimestamps := func(events []*nostr.Event) []nostr.Timestamp { + getTimestamps := func(events []nostr.Event) []nostr.Timestamp { res := make([]nostr.Timestamp, len(events)) for i, evt := range events { res[i] = evt.CreatedAt diff --git a/eventstore/badger/helpers.go b/eventstore/badger/helpers.go index 2ebb559..c96afe5 100644 --- a/eventstore/badger/helpers.go +++ b/eventstore/badger/helpers.go @@ -4,11 +4,11 @@ import ( "encoding/binary" "encoding/hex" "iter" + "slices" "strconv" "strings" "fiatjaf.com/nostr" - "golang.org/x/exp/slices" ) func getTagIndexPrefix(tagValue string) ([]byte, int) { diff --git a/eventstore/badger/query.go b/eventstore/badger/query.go index 04fb2cb..7611e49 100644 --- a/eventstore/badger/query.go +++ b/eventstore/badger/query.go @@ -6,12 +6,12 @@ import ( "fmt" "iter" "log" + "slices" "fiatjaf.com/nostr" "fiatjaf.com/nostr/eventstore/codec/betterbinary" "fiatjaf.com/nostr/eventstore/internal" "github.com/dgraph-io/badger/v4" - "golang.org/x/exp/slices" ) var batchFilled = errors.New("batch-filled") diff --git a/eventstore/badger/query_planner.go b/eventstore/badger/query_planner.go index 1efe6fe..0d05e9b 100644 --- a/eventstore/badger/query_planner.go +++ b/eventstore/badger/query_planner.go @@ -2,11 +2,9 @@ package badger import ( "encoding/binary" - "encoding/hex" - "fmt" - "fiatjaf.com/nostr/eventstore/internal" "fiatjaf.com/nostr" + "fiatjaf.com/nostr/eventstore/internal" ) type query struct { @@ -51,13 +49,10 @@ func prepareQueries(filter nostr.Filter) ( if len(filter.IDs) > 0 { queries = make([]query, len(filter.IDs)) - for i, idHex := range filter.IDs { + for i, id := range filter.IDs { prefix := make([]byte, 1+8) prefix[0] = indexIdPrefix - if len(idHex) != 64 { - return nil, nil, 0, fmt.Errorf("invalid id '%s'", idHex) - } - hex.Decode(prefix[1:], []byte(idHex[0:8*2])) + copy(prefix[1:1+8], id[0:8]) queries[i] = query{i: i, prefix: prefix, skipTimestamp: true} } @@ -96,27 +91,20 @@ pubkeyMatching: if len(filter.Authors) > 0 { if len(filter.Kinds) == 0 { queries = make([]query, len(filter.Authors)) - for i, pubkeyHex := range filter.Authors { - if len(pubkeyHex) != 64 { - return nil, nil, 0, fmt.Errorf("invalid pubkey '%s'", pubkeyHex) - } + for i, pk := range filter.Authors { prefix := make([]byte, 1+8) prefix[0] = indexPubkeyPrefix - hex.Decode(prefix[1:], []byte(pubkeyHex[0:8*2])) + copy(prefix[1:1+8], pk[0:8]) queries[i] = query{i: i, prefix: prefix} } } else { queries = make([]query, len(filter.Authors)*len(filter.Kinds)) i := 0 - for _, pubkeyHex := range filter.Authors { + for _, pk := range filter.Authors { for _, kind := range filter.Kinds { - if len(pubkeyHex) != 64 { - return nil, nil, 0, fmt.Errorf("invalid pubkey '%s'", pubkeyHex) - } - prefix := make([]byte, 1+8+2) prefix[0] = indexPubkeyKindPrefix - hex.Decode(prefix[1:], []byte(pubkeyHex[0:8*2])) + copy(prefix[1:1+8], pk[0:8]) binary.BigEndian.PutUint16(prefix[1+8:], uint16(kind)) queries[i] = query{i: i, prefix: prefix} i++ diff --git a/eventstore/bluge/bluge_test.go b/eventstore/bluge/bluge_test.go index 26298fc..9a6eed3 100644 --- a/eventstore/bluge/bluge_test.go +++ b/eventstore/bluge/bluge_test.go @@ -1,12 +1,11 @@ package bluge import ( - "context" "os" "testing" - "fiatjaf.com/nostr/eventstore/badger" "fiatjaf.com/nostr" + "fiatjaf.com/nostr/eventstore/badger" "github.com/stretchr/testify/assert" ) @@ -25,9 +24,7 @@ func TestBlugeFlow(t *testing.T) { bl.Init() defer bl.Close() - ctx := context.Background() - - willDelete := make([]*nostr.Event, 0, 3) + willDelete := make([]nostr.Event, 0, 3) for i, content := range []string{ "good morning mr paper maker", @@ -36,11 +33,11 @@ func TestBlugeFlow(t *testing.T) { "tonight we dine in my house", "the paper in this house if very good, mr", } { - evt := &nostr.Event{Content: content, Tags: nostr.Tags{}} - evt.Sign("0000000000000000000000000000000000000000000000000000000000000001") + evt := nostr.Event{Content: content, Tags: nostr.Tags{}} + evt.Sign(nostr.MustSecretKeyFromHex("0000000000000000000000000000000000000000000000000000000000000001")) - bb.SaveEvent(ctx, evt) - bl.SaveEvent(ctx, evt) + bb.SaveEvent(evt) + bl.SaveEvent(evt) if i%2 == 0 { willDelete = append(willDelete, evt) @@ -48,33 +45,26 @@ func TestBlugeFlow(t *testing.T) { } { - ch, err := bl.QueryEvents(ctx, nostr.Filter{Search: "good"}) - if err != nil { - t.Fatalf("QueryEvents error: %s", err) - return - } n := 0 - for range ch { + for range bl.QueryEvents(nostr.Filter{Search: "good"}) { n++ } assert.Equal(t, 3, n) } for _, evt := range willDelete { - bl.DeleteEvent(ctx, evt) + bl.DeleteEvent(evt.ID) } { - ch, err := bl.QueryEvents(ctx, nostr.Filter{Search: "good"}) - if err != nil { - t.Fatalf("QueryEvents error: %s", err) - return - } n := 0 - for res := range ch { + for res := range bl.QueryEvents(nostr.Filter{Search: "good"}) { n++ assert.Equal(t, res.Content, "good night") - assert.Equal(t, res.PubKey, "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798") + assert.Equal(t, + nostr.MustPubKeyFromHex("79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798"), + res.PubKey, + ) } assert.Equal(t, 1, n) } diff --git a/eventstore/bluge/helpers.go b/eventstore/bluge/helpers.go index 4468112..726be7a 100644 --- a/eventstore/bluge/helpers.go +++ b/eventstore/bluge/helpers.go @@ -1,6 +1,8 @@ package bluge import ( + "encoding/hex" + "fiatjaf.com/nostr" ) @@ -20,5 +22,7 @@ func (id eventIdentifier) Field() string { } func (id eventIdentifier) Term() []byte { - return id[:] + idhex := make([]byte, 64) + hex.Encode(idhex, id[:]) + return idhex } diff --git a/eventstore/bluge/query.go b/eventstore/bluge/query.go index 23c5175..59c6f6a 100644 --- a/eventstore/bluge/query.go +++ b/eventstore/bluge/query.go @@ -90,8 +90,11 @@ func (b *BlugeBackend) QueryEvents(filter nostr.Filter) iter.Seq[nostr.Event] { var next *search.DocumentMatch for next, err = dmi.Next(); next != nil; next, err = dmi.Next() { next.VisitStoredFields(func(field string, value []byte) bool { - for evt := range b.RawEventStore.QueryEvents(nostr.Filter{IDs: []nostr.ID{nostr.ID(value)}}) { - yield(evt) + id, err := nostr.IDFromHex(string(value)) + if err == nil { + for evt := range b.RawEventStore.QueryEvents(nostr.Filter{IDs: []nostr.ID{id}}) { + yield(evt) + } } return false }) diff --git a/eventstore/lmdb/count.go b/eventstore/lmdb/count.go index 2aec237..d50d3c6 100644 --- a/eventstore/lmdb/count.go +++ b/eventstore/lmdb/count.go @@ -4,13 +4,13 @@ import ( "bytes" "encoding/binary" "encoding/hex" + "slices" "fiatjaf.com/nostr" "fiatjaf.com/nostr/eventstore/codec/betterbinary" "fiatjaf.com/nostr/nip45" "fiatjaf.com/nostr/nip45/hyperloglog" "github.com/PowerDNS/lmdb-go/lmdb" - "golang.org/x/exp/slices" ) func (b *LMDBBackend) CountEvents(filter nostr.Filter) (uint32, error) { diff --git a/eventstore/lmdb/delete.go b/eventstore/lmdb/delete.go index 3d85de3..c3bb348 100644 --- a/eventstore/lmdb/delete.go +++ b/eventstore/lmdb/delete.go @@ -40,13 +40,13 @@ func (b *LMDBBackend) delete(txn *lmdb.Txn, id nostr.ID) error { for k := range b.getIndexKeysForEvent(evt) { err := txn.Del(k.dbi, k.key, idx) if err != nil { - return fmt.Errorf("failed to delete index entry %s for %x: %w", b.keyName(k), evt.ID[0:8*2], err) + return fmt.Errorf("failed to delete index entry %s for %x: %w", b.keyName(k), evt.ID[0:8], err) } } // delete the raw event if err := txn.Del(b.rawEventStore, idx, nil); err != nil { - return fmt.Errorf("failed to delete raw event %x (idx %x): %w", evt.ID[0:8*2], idx, err) + return fmt.Errorf("failed to delete raw event %x (idx %x): %w", evt.ID[0:8], idx, err) } return nil diff --git a/eventstore/lmdb/fuzz_test.go b/eventstore/lmdb/fuzz_test.go index 6be5120..9c484e0 100644 --- a/eventstore/lmdb/fuzz_test.go +++ b/eventstore/lmdb/fuzz_test.go @@ -2,24 +2,19 @@ package lmdb import ( "cmp" - "context" "encoding/binary" - "encoding/hex" "fmt" "os" + "slices" "testing" "time" - "github.com/PowerDNS/lmdb-go/lmdb" - "fiatjaf.com/nostr/eventstore" "fiatjaf.com/nostr" + "github.com/PowerDNS/lmdb-go/lmdb" "github.com/stretchr/testify/require" - "golang.org/x/exp/slices" ) func FuzzQuery(f *testing.F) { - ctx := context.Background() - f.Add(uint(200), uint(50), uint(13), uint(2), uint(2), uint(0), uint(1)) f.Fuzz(func(t *testing.T, total, limit, authors, timestampAuthorFactor, seedFactor, kinds, kindFactor uint) { total++ @@ -51,41 +46,41 @@ func FuzzQuery(f *testing.F) { // ~ start actual test filter := nostr.Filter{ - Authors: make([]string, authors), + Authors: make([]nostr.PubKey, authors), Limit: int(limit), } - maxKind := 1 + var maxKind uint16 = 1 if kinds > 0 { - filter.Kinds = make([]int, kinds) + filter.Kinds = make([]uint16, kinds) for i := range filter.Kinds { - filter.Kinds[i] = int(kindFactor) * i + filter.Kinds[i] = uint16(int(kindFactor) * i) } maxKind = filter.Kinds[len(filter.Kinds)-1] } for i := 0; i < int(authors); i++ { - sk := make([]byte, 32) - binary.BigEndian.PutUint32(sk, uint32(i%int(authors*seedFactor))+1) - pk, _ := nostr.GetPublicKey(hex.EncodeToString(sk)) + var sk nostr.SecretKey + binary.BigEndian.PutUint32(sk[:], uint32(i%int(authors*seedFactor))+1) + pk := nostr.GetPublicKey(sk) filter.Authors[i] = pk } - expected := make([]*nostr.Event, 0, total) + expected := make([]nostr.Event, 0, total) for i := 0; i < int(total); i++ { skseed := uint32(i%int(authors*seedFactor)) + 1 - sk := make([]byte, 32) - binary.BigEndian.PutUint32(sk, skseed) + sk := nostr.SecretKey{} + binary.BigEndian.PutUint32(sk[:], skseed) - evt := &nostr.Event{ + evt := nostr.Event{ CreatedAt: nostr.Timestamp(skseed)*nostr.Timestamp(timestampAuthorFactor) + nostr.Timestamp(i), Content: fmt.Sprintf("unbalanced %d", i), Tags: nostr.Tags{}, - Kind: i % maxKind, + Kind: uint16(i) % maxKind, } - err := evt.Sign(hex.EncodeToString(sk)) + err := evt.Sign(sk) require.NoError(t, err) - err = db.SaveEvent(ctx, evt) + err = db.SaveEvent(evt) require.NoError(t, err) if filter.Matches(evt) { @@ -93,25 +88,21 @@ func FuzzQuery(f *testing.F) { } } - slices.SortFunc(expected, nostr.CompareEventPtrReverse) + slices.SortFunc(expected, nostr.CompareEventReverse) if len(expected) > int(limit) { expected = expected[0:limit] } - w := eventstore.RelayWrapper{Store: db} - start := time.Now() - res, err := w.QuerySync(ctx, filter) + res := slices.Collect(db.QueryEvents(filter)) end := time.Now() - require.NoError(t, err) require.Equal(t, len(expected), len(res), "number of results is different than expected") - require.Less(t, end.Sub(start).Milliseconds(), int64(1500), "query took too long") nresults := len(expected) - getTimestamps := func(events []*nostr.Event) []nostr.Timestamp { + getTimestamps := func(events []nostr.Event) []nostr.Timestamp { res := make([]nostr.Timestamp, len(events)) for i, evt := range events { res[i] = evt.CreatedAt @@ -132,6 +123,6 @@ func FuzzQuery(f *testing.F) { require.True(t, filter.Matches(evt), "event %s doesn't match filter %s", evt, filter) } - require.True(t, slices.IsSortedFunc(res, func(a, b *nostr.Event) int { return cmp.Compare(b.CreatedAt, a.CreatedAt) }), "results are not sorted") + require.True(t, slices.IsSortedFunc(res, func(a, b nostr.Event) int { return cmp.Compare(b.CreatedAt, a.CreatedAt) }), "results are not sorted") }) } diff --git a/eventstore/lmdb/helpers.go b/eventstore/lmdb/helpers.go index 2147f49..b9988f2 100644 --- a/eventstore/lmdb/helpers.go +++ b/eventstore/lmdb/helpers.go @@ -6,12 +6,12 @@ import ( "encoding/hex" "fmt" "iter" + "slices" "strconv" "strings" "fiatjaf.com/nostr" "github.com/PowerDNS/lmdb-go/lmdb" - "golang.org/x/exp/slices" ) // this iterator always goes backwards @@ -64,7 +64,7 @@ func (b *LMDBBackend) getIndexKeysForEvent(evt nostr.Event) iter.Seq[key] { { // ~ by pubkey+date k := make([]byte, 8+4) - hex.Decode(k[0:8], []byte(evt.PubKey[0:8*2])) + copy(k[0:8], evt.PubKey[0:8]) binary.BigEndian.PutUint32(k[8:8+4], uint32(evt.CreatedAt)) if !yield(key{dbi: b.indexPubkey, key: k[0 : 8+4]}) { return @@ -84,7 +84,7 @@ func (b *LMDBBackend) getIndexKeysForEvent(evt nostr.Event) iter.Seq[key] { { // ~ by pubkey+kind+date k := make([]byte, 8+2+4) - hex.Decode(k[0:8], []byte(evt.PubKey[0:8*2])) + copy(k[0:8], evt.PubKey[0:8]) binary.BigEndian.PutUint16(k[8:8+2], uint16(evt.Kind)) binary.BigEndian.PutUint32(k[8+2:8+2+4], uint32(evt.CreatedAt)) if !yield(key{dbi: b.indexPubkeyKind, key: k[0 : 8+2+4]}) { diff --git a/eventstore/lmdb/query.go b/eventstore/lmdb/query.go index 8bab417..e5eda72 100644 --- a/eventstore/lmdb/query.go +++ b/eventstore/lmdb/query.go @@ -99,7 +99,7 @@ func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]in results[q] = make([]internal.IterEvent, 0, batchSizePerQuery*2) } - // fmt.Println("queries", len(queries)) + // fmt.Println("queries", filter, len(queries)) for c := 0; ; c++ { batchSizePerQuery = internal.BatchSizePerNumberOfQueries(limit, remainingUnexhausted) @@ -113,7 +113,7 @@ func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]in if oldest.Q == q && remainingUnexhausted > 1 { continue } - // fmt.Println(" query", q, unsafe.Pointer(&results[q]), hex.EncodeToString(query.prefix), len(results[q])) + // fmt.Println(" query", q, unsafe.Pointer(&results[q]), b.dbiName(query.dbi), hex.EncodeToString(query.prefix), len(results[q])) it := iterators[q] pulledThisIteration := 0 diff --git a/eventstore/lmdb/query_planner.go b/eventstore/lmdb/query_planner.go index 4f4e7b9..a6ac57f 100644 --- a/eventstore/lmdb/query_planner.go +++ b/eventstore/lmdb/query_planner.go @@ -53,14 +53,9 @@ func (b *LMDBBackend) prepareQueries(filter nostr.Filter) ( if filter.IDs != nil { // when there are ids we ignore everything else queries = make([]query, len(filter.IDs)) - for i, idHex := range filter.IDs { - if len(idHex) != 64 { - return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid id '%s'", idHex) - } + for i, id := range filter.IDs { prefix := make([]byte, 8) - if _, err := hex.Decode(prefix[0:8], []byte(idHex[0:8*2])); err != nil { - return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid id '%s'", idHex) - } + copy(prefix[0:8], id[0:8]) queries[i] = query{i: i, dbi: b.indexId, prefix: prefix[0:8], keySize: 8, timestampSize: 0} } return queries, nil, nil, "", nil, 0, nil @@ -161,29 +156,17 @@ pubkeyMatching: if len(filter.Kinds) == 0 { // will use pubkey index queries = make([]query, len(filter.Authors)) - for i, pubkeyHex := range filter.Authors { - if len(pubkeyHex) != 64 { - return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid author '%s'", pubkeyHex) - } - prefix := make([]byte, 8) - if _, err := hex.Decode(prefix[0:8], []byte(pubkeyHex[0:8*2])); err != nil { - return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid author '%s'", pubkeyHex) - } - queries[i] = query{i: i, dbi: b.indexPubkey, prefix: prefix[0:8], keySize: 8 + 4, timestampSize: 4} + for i, pk := range filter.Authors { + queries[i] = query{i: i, dbi: b.indexPubkey, prefix: pk[0:8], keySize: 8 + 4, timestampSize: 4} } } else { // will use pubkeyKind index queries = make([]query, len(filter.Authors)*len(filter.Kinds)) i := 0 - for _, pubkeyHex := range filter.Authors { + for _, pk := range filter.Authors { for _, kind := range filter.Kinds { - if len(pubkeyHex) != 64 { - return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid author '%s'", pubkeyHex) - } prefix := make([]byte, 8+2) - if _, err := hex.Decode(prefix[0:8], []byte(pubkeyHex[0:8*2])); err != nil { - return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid author '%s'", pubkeyHex) - } + copy(prefix[0:8], pk[0:8]) binary.BigEndian.PutUint16(prefix[8:8+2], uint16(kind)) queries[i] = query{i: i, dbi: b.indexPubkeyKind, prefix: prefix[0 : 8+2], keySize: 10 + 4, timestampSize: 4} i++ diff --git a/eventstore/lmdb/replace.go b/eventstore/lmdb/replace.go index daba48e..5407a38 100644 --- a/eventstore/lmdb/replace.go +++ b/eventstore/lmdb/replace.go @@ -12,7 +12,7 @@ import ( func (b *LMDBBackend) ReplaceEvent(evt nostr.Event) error { // sanity checking if evt.CreatedAt > math.MaxUint32 || evt.Kind > math.MaxUint16 { - return fmt.Errorf("event with values out of expected boundaries") + return fmt.Errorf("event with values out of expected boundaries %d/%d", evt.CreatedAt, evt.Kind) } return b.lmdbEnv.Update(func(txn *lmdb.Txn) error { diff --git a/eventstore/lmdb/save.go b/eventstore/lmdb/save.go index 0d06ccd..fa6a646 100644 --- a/eventstore/lmdb/save.go +++ b/eventstore/lmdb/save.go @@ -13,7 +13,7 @@ import ( func (b *LMDBBackend) SaveEvent(evt nostr.Event) error { // sanity checking if evt.CreatedAt > math.MaxUint32 || evt.Kind > math.MaxUint16 { - return fmt.Errorf("event with values out of expected boundaries") + return fmt.Errorf("event with values out of expected boundaries %d/%d", evt.CreatedAt, evt.Kind) } return b.lmdbEnv.Update(func(txn *lmdb.Txn) error { diff --git a/eventstore/mmm/fuzz_test.go b/eventstore/mmm/fuzz_test.go index 982a136..fb10746 100644 --- a/eventstore/mmm/fuzz_test.go +++ b/eventstore/mmm/fuzz_test.go @@ -1,8 +1,8 @@ package mmm import ( - "context" "fmt" + "iter" "math/rand/v2" "os" "slices" @@ -47,11 +47,10 @@ func FuzzTest(f *testing.F) { } // create test events - ctx := context.Background() - sk := "945e01e37662430162121b804d3645a86d97df9d256917d86735d0eb219393eb" - storedIds := make([]string, nevents) - nTags := make(map[string]int) - storedByLayer := make(map[string][]string) + sk := nostr.MustSecretKeyFromHex("945e01e37662430162121b804d3645a86d97df9d256917d86735d0eb219393eb") + storedIds := make([]nostr.ID, nevents) + nTags := make(map[nostr.ID]int) + storedByLayer := make(map[string][]nostr.ID) // create n events with random combinations of tags for i := 0; i < int(nevents); i++ { @@ -68,9 +67,9 @@ func FuzzTest(f *testing.F) { } } - evt := &nostr.Event{ + evt := nostr.Event{ CreatedAt: nostr.Timestamp(i), - Kind: i, // hack to query by serial id + Kind: uint16(i), // hack to query by serial id Tags: tags, Content: fmt.Sprintf("test content %d", i), } @@ -78,23 +77,20 @@ func FuzzTest(f *testing.F) { for _, layer := range mmm.layers { if evt.Tags.FindWithValue("t", layer.name) != nil { - err := layer.SaveEvent(ctx, evt) + err := layer.SaveEvent(evt) require.NoError(t, err) storedByLayer[layer.name] = append(storedByLayer[layer.name], evt.ID) } } - storedIds = append(storedIds, evt.ID) + storedIds[i] = evt.ID nTags[evt.ID] = len(evt.Tags) } // verify each layer has the correct events for _, layer := range mmm.layers { - results, err := layer.QueryEvents(ctx, nostr.Filter{}) - require.NoError(t, err) - count := 0 - for evt := range results { + for evt := range layer.QueryEvents(nostr.Filter{}) { require.True(t, evt.Tags.ContainsAny("t", []string{layer.name})) count++ } @@ -102,7 +98,7 @@ func FuzzTest(f *testing.F) { } // randomly select n events to delete from random layers - deleted := make(map[string][]*IndexingLayer) + deleted := make(map[nostr.ID][]*IndexingLayer) for range ndeletes { id := storedIds[rnd.Int()%len(storedIds)] @@ -117,7 +113,7 @@ func FuzzTest(f *testing.F) { require.Contains(t, layers, layer) // delete now - layer.DeleteEvent(ctx, evt) + layer.DeleteEvent(evt.ID) deleted[id] = append(deleted[id], layer) } else { // was never saved to this in the first place @@ -152,16 +148,16 @@ func FuzzTest(f *testing.F) { for _, layer := range mmm.layers { // verify event still accessible from other layers if slices.Contains(foundlayers, layer) { - ch, err := layer.QueryEvents(ctx, nostr.Filter{Kinds: []int{evt.Kind}}) // hack - require.NoError(t, err) - fetched := <-ch - require.NotNil(t, fetched) + next, stop := iter.Pull(layer.QueryEvents(nostr.Filter{Kinds: []uint16{evt.Kind}})) // hack + _, fetched := next() + require.True(t, fetched) + stop() } else { // and not accessible from this layer we just deleted - ch, err := layer.QueryEvents(ctx, nostr.Filter{Kinds: []int{evt.Kind}}) // hack - require.NoError(t, err) - fetched := <-ch - require.Nil(t, fetched) + next, stop := iter.Pull(layer.QueryEvents(nostr.Filter{Kinds: []uint16{evt.Kind}})) // hack + _, fetched := next() + require.True(t, fetched) + stop() } } } @@ -169,11 +165,8 @@ func FuzzTest(f *testing.F) { // now delete a layer and events that only exist in that layer should vanish layer := mmm.layers[rnd.Int()%len(mmm.layers)] - ch, err := layer.QueryEvents(ctx, nostr.Filter{}) - require.NoError(t, err) - - eventsThatShouldVanish := make([]string, 0, nevents/2) - for evt := range ch { + eventsThatShouldVanish := make([]nostr.ID, 0, nevents/2) + for evt := range layer.QueryEvents(nostr.Filter{}) { if len(evt.Tags) == 1+len(deleted[evt.ID]) { eventsThatShouldVanish = append(eventsThatShouldVanish, evt.ID) } diff --git a/eventstore/mmm/helpers.go b/eventstore/mmm/helpers.go index d0b136c..5c10894 100644 --- a/eventstore/mmm/helpers.go +++ b/eventstore/mmm/helpers.go @@ -51,7 +51,7 @@ func (il *IndexingLayer) getIndexKeysForEvent(evt nostr.Event) iter.Seq[key] { { // ~ by pubkey+date k := make([]byte, 8+4) - hex.Decode(k[0:8], []byte(evt.PubKey[0:8*2])) + copy(k[0:8], evt.PubKey[0:8]) binary.BigEndian.PutUint32(k[8:8+4], uint32(evt.CreatedAt)) if !yield(key{dbi: il.indexPubkey, key: k[0 : 8+4]}) { return @@ -71,7 +71,7 @@ func (il *IndexingLayer) getIndexKeysForEvent(evt nostr.Event) iter.Seq[key] { { // ~ by pubkey+kind+date k := make([]byte, 8+2+4) - hex.Decode(k[0:8], []byte(evt.PubKey[0:8*2])) + copy(k[0:8], evt.PubKey[0:8]) binary.BigEndian.PutUint16(k[8:8+2], uint16(evt.Kind)) binary.BigEndian.PutUint32(k[8+2:8+2+4], uint32(evt.CreatedAt)) if !yield(key{dbi: il.indexPubkeyKind, key: k[0 : 8+2+4]}) { diff --git a/eventstore/mmm/mmmm_test.go b/eventstore/mmm/mmmm_test.go deleted file mode 100644 index bf116c6..0000000 --- a/eventstore/mmm/mmmm_test.go +++ /dev/null @@ -1,386 +0,0 @@ -package mmm - -import ( - "context" - "encoding/binary" - "encoding/hex" - "fmt" - "os" - "testing" - - "github.com/PowerDNS/lmdb-go/lmdb" - "fiatjaf.com/nostr" - "github.com/rs/zerolog" - "github.com/stretchr/testify/require" -) - -func TestMultiLayerIndexing(t *testing.T) { - // Create a temporary directory for the test - tmpDir := "/tmp/eventstore-mmm-test" - os.RemoveAll(tmpDir) - - logger := zerolog.New(zerolog.ConsoleWriter{Out: os.Stderr}) - - // initialize MMM with three layers: - // 1. odd timestamps layer - // 2. even timestamps layer - // 3. all events layer - mmm := &MultiMmapManager{ - Dir: tmpDir, - Logger: &logger, - } - - err := mmm.Init() - require.NoError(t, err) - defer mmm.Close() - - // create layers - err = mmm.EnsureLayer("odd", &IndexingLayer{ - MaxLimit: 100, - ShouldIndex: func(ctx context.Context, evt *nostr.Event) bool { - return evt.CreatedAt%2 == 1 - }, - }) - require.NoError(t, err) - err = mmm.EnsureLayer("even", &IndexingLayer{ - MaxLimit: 100, - ShouldIndex: func(ctx context.Context, evt *nostr.Event) bool { - return evt.CreatedAt%2 == 0 - }, - }) - require.NoError(t, err) - err = mmm.EnsureLayer("all", &IndexingLayer{ - MaxLimit: 100, - ShouldIndex: func(ctx context.Context, evt *nostr.Event) bool { - return true - }, - }) - require.NoError(t, err) - - // create test events - ctx := context.Background() - baseTime := nostr.Timestamp(0) - sk := "945e01e37662430162121b804d3645a86d97df9d256917d86735d0eb219393eb" - events := make([]*nostr.Event, 10) - for i := 0; i < 10; i++ { - evt := &nostr.Event{ - CreatedAt: baseTime + nostr.Timestamp(i), - Kind: 1, - Tags: nostr.Tags{}, - Content: "test content", - } - evt.Sign(sk) - events[i] = evt - stored, err := mmm.StoreGlobal(ctx, evt) - require.NoError(t, err) - require.True(t, stored) - } - - { - // query odd layer - oddResults, err := mmm.layers[0].QueryEvents(ctx, nostr.Filter{ - Kinds: []int{1}, - }) - require.NoError(t, err) - - oddCount := 0 - for evt := range oddResults { - require.Equal(t, evt.CreatedAt%2, nostr.Timestamp(1)) - oddCount++ - } - require.Equal(t, 5, oddCount) - } - - { - // query even layer - evenResults, err := mmm.layers[1].QueryEvents(ctx, nostr.Filter{ - Kinds: []int{1}, - }) - require.NoError(t, err) - - evenCount := 0 - for evt := range evenResults { - require.Equal(t, evt.CreatedAt%2, nostr.Timestamp(0)) - evenCount++ - } - require.Equal(t, 5, evenCount) - } - - { - // query all layer - allResults, err := mmm.layers[2].QueryEvents(ctx, nostr.Filter{ - Kinds: []int{1}, - }) - require.NoError(t, err) - - allCount := 0 - for range allResults { - allCount++ - } - require.Equal(t, 10, allCount) - } - - // delete some events - err = mmm.layers[0].DeleteEvent(ctx, events[1]) // odd timestamp - require.NoError(t, err) - err = mmm.layers[1].DeleteEvent(ctx, events[2]) // even timestamp - - // verify deletions - { - oddResults, err := mmm.layers[0].QueryEvents(ctx, nostr.Filter{ - Kinds: []int{1}, - }) - require.NoError(t, err) - oddCount := 0 - for range oddResults { - oddCount++ - } - require.Equal(t, 4, oddCount) - } - - { - evenResults, err := mmm.layers[1].QueryEvents(ctx, nostr.Filter{ - Kinds: []int{1}, - }) - require.NoError(t, err) - evenCount := 0 - for range evenResults { - evenCount++ - } - require.Equal(t, 4, evenCount) - } - - { - allResults, err := mmm.layers[2].QueryEvents(ctx, nostr.Filter{ - Kinds: []int{1}, - }) - require.NoError(t, err) - allCount := 0 - for range allResults { - allCount++ - } - require.Equal(t, 10, allCount) - } - - // save events directly to layers regardless of timestamp - { - oddEvent := &nostr.Event{ - CreatedAt: baseTime + 100, // even timestamp - Kind: 1, - Content: "forced odd", - } - oddEvent.Sign(sk) - err = mmm.layers[0].SaveEvent(ctx, oddEvent) // save even timestamp to odd layer - require.NoError(t, err) - - // it is added to the odd il - oddResults, err := mmm.layers[0].QueryEvents(ctx, nostr.Filter{ - Kinds: []int{1}, - }) - require.NoError(t, err) - oddCount := 0 - for range oddResults { - oddCount++ - } - require.Equal(t, 5, oddCount) - - // it doesn't affect the event il - evenResults, err := mmm.layers[1].QueryEvents(ctx, nostr.Filter{ - Kinds: []int{1}, - }) - require.NoError(t, err) - evenCount := 0 - for range evenResults { - evenCount++ - } - require.Equal(t, 4, evenCount) - } - - // test replaceable events - for _, layer := range mmm.layers { - replaceable := &nostr.Event{ - CreatedAt: baseTime + 0, - Kind: 0, - Content: fmt.Sprintf("first"), - } - replaceable.Sign(sk) - err := layer.ReplaceEvent(ctx, replaceable) - require.NoError(t, err) - } - - // replace events alternating between layers - for i := range mmm.layers { - content := fmt.Sprintf("last %d", i) - - newEvt := &nostr.Event{ - CreatedAt: baseTime + 1000, - Kind: 0, - Content: content, - } - newEvt.Sign(sk) - - layer := mmm.layers[i] - err = layer.ReplaceEvent(ctx, newEvt) - require.NoError(t, err) - - // verify replacement in the layer that did it - results, err := layer.QueryEvents(ctx, nostr.Filter{ - Kinds: []int{0}, - }) - require.NoError(t, err) - - count := 0 - for evt := range results { - require.Equal(t, content, evt.Content) - count++ - } - require.Equal(t, 1, count) - - // verify other layers still have the old version - for j := 0; j < 3; j++ { - if mmm.layers[j] == layer { - continue - } - results, err := mmm.layers[j].QueryEvents(ctx, nostr.Filter{ - Kinds: []int{0}, - }) - require.NoError(t, err) - - count := 0 - for evt := range results { - if i < j { - require.Equal(t, "first", evt.Content) - } else { - require.Equal(t, evt.Content, fmt.Sprintf("last %d", j)) - } - count++ - } - - require.Equal(t, 1, count, "%d/%d", i, j) - } - } -} - -func TestLayerReferenceTracking(t *testing.T) { - // Create a temporary directory for the test - tmpDir, err := os.MkdirTemp("", "mmm-test-*") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - - logger := zerolog.New(zerolog.ConsoleWriter{Out: os.Stderr}) - - // initialize MMM with three layers - mmm := &MultiMmapManager{ - Dir: tmpDir, - Logger: &logger, - } - - err = mmm.Init() - require.NoError(t, err) - defer mmm.Close() - - // create three layers - err = mmm.EnsureLayer("layer1", &IndexingLayer{ - MaxLimit: 100, - ShouldIndex: func(ctx context.Context, evt *nostr.Event) bool { return true }, - }) - require.NoError(t, err) - err = mmm.EnsureLayer("layer2", &IndexingLayer{ - MaxLimit: 100, - ShouldIndex: func(ctx context.Context, evt *nostr.Event) bool { return true }, - }) - require.NoError(t, err) - err = mmm.EnsureLayer("layer3", &IndexingLayer{ - MaxLimit: 100, - ShouldIndex: func(ctx context.Context, evt *nostr.Event) bool { return true }, - }) - require.NoError(t, err) - err = mmm.EnsureLayer("layer4", &IndexingLayer{ - MaxLimit: 100, - ShouldIndex: func(ctx context.Context, evt *nostr.Event) bool { return true }, - }) - require.NoError(t, err) - - // create test events - ctx := context.Background() - sk := "945e01e37662430162121b804d3645a86d97df9d256917d86735d0eb219393eb" - evt1 := &nostr.Event{ - CreatedAt: 1000, - Kind: 1, - Tags: nostr.Tags{}, - Content: "event 1", - } - evt1.Sign(sk) - - evt2 := &nostr.Event{ - CreatedAt: 2000, - Kind: 1, - Tags: nostr.Tags{}, - Content: "event 2", - } - evt2.Sign(sk) - - // save evt1 to layer1 - err = mmm.layers[0].SaveEvent(ctx, evt1) - require.NoError(t, err) - - // save evt1 to layer2 - err = mmm.layers[1].SaveEvent(ctx, evt1) - require.NoError(t, err) - - // save evt1 to layer4 - err = mmm.layers[0].SaveEvent(ctx, evt1) - require.NoError(t, err) - - // delete evt1 from layer1 - err = mmm.layers[0].DeleteEvent(ctx, evt1) - require.NoError(t, err) - - // save evt2 to layer3 - err = mmm.layers[2].SaveEvent(ctx, evt2) - require.NoError(t, err) - - // save evt2 to layer4 - err = mmm.layers[3].SaveEvent(ctx, evt2) - require.NoError(t, err) - - // save evt2 to layer3 again - err = mmm.layers[2].SaveEvent(ctx, evt2) - require.NoError(t, err) - - // delete evt1 from layer4 - err = mmm.layers[3].DeleteEvent(ctx, evt1) - require.NoError(t, err) - - // verify the state of the indexId database - err = mmm.lmdbEnv.View(func(txn *lmdb.Txn) error { - cursor, err := txn.OpenCursor(mmm.indexId) - if err != nil { - return err - } - defer cursor.Close() - - count := 0 - for k, v, err := cursor.Get(nil, nil, lmdb.First); err == nil; k, v, err = cursor.Get(nil, nil, lmdb.Next) { - count++ - if hex.EncodeToString(k) == evt1.ID[:16] { - // evt1 should only reference layer2 - require.Equal(t, 14, len(v), "evt1 should have one layer reference") - layerRef := binary.BigEndian.Uint16(v[12:14]) - require.Equal(t, mmm.layers[1].id, layerRef, "evt1 should reference layer2") - } else if hex.EncodeToString(k) == evt2.ID[:16] { - // evt2 should references to layer3 and layer4 - require.Equal(t, 16, len(v), "evt2 should have two layer references") - layer3Ref := binary.BigEndian.Uint16(v[12:14]) - require.Equal(t, mmm.layers[2].id, layer3Ref, "evt2 should reference layer3") - layer4Ref := binary.BigEndian.Uint16(v[14:16]) - require.Equal(t, mmm.layers[3].id, layer4Ref, "evt2 should reference layer4") - } else { - t.Errorf("unexpected event in indexId: %x", k) - } - } - require.Equal(t, 2, count, "should have exactly two events in indexId") - return nil - }) - require.NoError(t, err) -} diff --git a/eventstore/mmm/query_planner.go b/eventstore/mmm/query_planner.go index 34f3c6f..f792460 100644 --- a/eventstore/mmm/query_planner.go +++ b/eventstore/mmm/query_planner.go @@ -145,29 +145,19 @@ pubkeyMatching: if len(filter.Kinds) == 0 { // will use pubkey index queries = make([]query, len(filter.Authors)) - for i, pubkeyHex := range filter.Authors { - if len(pubkeyHex) != 64 { - return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid author '%s'", pubkeyHex) - } + for i, pk := range filter.Authors { prefix := make([]byte, 8) - if _, err := hex.Decode(prefix[0:8], []byte(pubkeyHex[0:8*2])); err != nil { - return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid author '%s'", pubkeyHex) - } + copy(prefix[0:8], pk[0:8]) queries[i] = query{i: i, dbi: il.indexPubkey, prefix: prefix[0:8], keySize: 8 + 4, timestampSize: 4} } } else { // will use pubkeyKind index queries = make([]query, len(filter.Authors)*len(filter.Kinds)) i := 0 - for _, pubkeyHex := range filter.Authors { + for _, pk := range filter.Authors { for _, kind := range filter.Kinds { - if len(pubkeyHex) != 64 { - return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid author '%s'", pubkeyHex) - } prefix := make([]byte, 8+2) - if _, err := hex.Decode(prefix[0:8], []byte(pubkeyHex[0:8*2])); err != nil { - return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid author '%s'", pubkeyHex) - } + copy(prefix[0:8], pk[0:8]) binary.BigEndian.PutUint16(prefix[8:8+2], uint16(kind)) queries[i] = query{i: i, dbi: il.indexPubkeyKind, prefix: prefix[0 : 8+2], keySize: 10 + 4, timestampSize: 4} i++ diff --git a/eventstore/slicestore/lib.go b/eventstore/slicestore/lib.go index 45d7222..a0b5adc 100644 --- a/eventstore/slicestore/lib.go +++ b/eventstore/slicestore/lib.go @@ -5,12 +5,12 @@ import ( "cmp" "fmt" "iter" + "slices" "sync" "fiatjaf.com/nostr" "fiatjaf.com/nostr/eventstore" "fiatjaf.com/nostr/eventstore/internal" - "golang.org/x/exp/slices" ) var _ eventstore.Store = (*SliceStore)(nil) @@ -80,6 +80,13 @@ func (b *SliceStore) CountEvents(filter nostr.Filter) (uint32, error) { } func (b *SliceStore) SaveEvent(evt nostr.Event) error { + b.Lock() + defer b.Unlock() + + return b.save(evt) +} + +func (b *SliceStore) save(evt nostr.Event) error { idx, found := slices.BinarySearchFunc(b.internal, evt, eventComparator) if found { return eventstore.ErrDupEvent @@ -93,8 +100,22 @@ func (b *SliceStore) SaveEvent(evt nostr.Event) error { } func (b *SliceStore) DeleteEvent(id nostr.ID) error { - idx, found := slices.BinarySearchFunc(b.internal, id, eventIDComparator) - if !found { + b.Lock() + defer b.Unlock() + + return b.delete(id) +} + +func (b *SliceStore) delete(id nostr.ID) error { + var idx int = -1 + for i, event := range b.internal { + if event.ID == id { + idx = i + break + } + } + + if idx == -1 { // we don't have this event return nil } @@ -117,7 +138,7 @@ func (b *SliceStore) ReplaceEvent(evt nostr.Event) error { shouldStore := true for previous := range b.QueryEvents(filter) { if internal.IsOlder(previous, evt) { - if err := b.DeleteEvent(previous.ID); err != nil { + if err := b.delete(previous.ID); err != nil { return fmt.Errorf("failed to delete event for replacing: %w", err) } } else { @@ -126,7 +147,7 @@ func (b *SliceStore) ReplaceEvent(evt nostr.Event) error { } if shouldStore { - if err := b.SaveEvent(evt); err != nil && err != eventstore.ErrDupEvent { + if err := b.save(evt); err != nil && err != eventstore.ErrDupEvent { return fmt.Errorf("failed to save: %w", err) } } @@ -135,17 +156,13 @@ func (b *SliceStore) ReplaceEvent(evt nostr.Event) error { } func eventTimestampComparator(e nostr.Event, t nostr.Timestamp) int { - return int(t) - int(e.CreatedAt) -} - -func eventIDComparator(e nostr.Event, i nostr.ID) int { - return bytes.Compare(i[:], e.ID[:]) + return cmp.Compare(t, e.CreatedAt) } func eventComparator(a nostr.Event, b nostr.Event) int { - c := cmp.Compare(b.CreatedAt, a.CreatedAt) - if c != 0 { - return c + v := cmp.Compare(b.CreatedAt, a.CreatedAt) + if v == 0 { + v = bytes.Compare(b.ID[:], a.ID[:]) } - return bytes.Compare(b.ID[:], a.ID[:]) + return v } diff --git a/eventstore/slicestore/slicestore_test.go b/eventstore/slicestore/slicestore_test.go index d89741d..4e0eaf2 100644 --- a/eventstore/slicestore/slicestore_test.go +++ b/eventstore/slicestore/slicestore_test.go @@ -4,6 +4,7 @@ import ( "testing" "fiatjaf.com/nostr" + "github.com/stretchr/testify/require" ) func TestBasicStuff(t *testing.T) { @@ -20,17 +21,17 @@ func TestBasicStuff(t *testing.T) { if i%3 == 0 { kind = 12 } - ss.SaveEvent(nostr.Event{CreatedAt: nostr.Timestamp(v), Kind: uint16(kind)}) + evt := nostr.Event{CreatedAt: nostr.Timestamp(v), Kind: uint16(kind)} + evt.Sign(nostr.Generate()) + ss.SaveEvent(evt) } list := make([]nostr.Event, 0, 20) for event := range ss.QueryEvents(nostr.Filter{}) { list = append(list, event) } + require.Len(t, list, 20) - if len(list) != 20 { - t.Fatalf("failed to load 20 events") - } if list[0].CreatedAt != 10018 || list[1].CreatedAt != 10016 || list[18].CreatedAt != 3 || list[19].CreatedAt != 1 { t.Fatalf("order is incorrect") } @@ -49,7 +50,5 @@ func TestBasicStuff(t *testing.T) { for event := range ss.QueryEvents(nostr.Filter{Since: &since}) { list = append(list, event) } - if len(list) != 5 { - t.Fatalf("should have gotten 5, not %d", len(list)) - } + require.Len(t, list, 5) } diff --git a/eventstore/test/first_test.go b/eventstore/test/first_test.go index 1240a75..84eebd5 100644 --- a/eventstore/test/first_test.go +++ b/eventstore/test/first_test.go @@ -43,10 +43,8 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) { } // query - w := eventstore.RelayWrapper{Store: db} { - results, err := w.QuerySync(ctx, nostr.Filter{}) - require.NoError(t, err) + results := slices.Collect(db.QueryEvents(nostr.Filter{})) require.Len(t, results, len(allEvents)) require.ElementsMatch(t, allEvents, @@ -57,8 +55,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) { { for i := 0; i < 10; i++ { since := nostr.Timestamp(i*10 + 1) - results, err := w.QuerySync(ctx, nostr.Filter{Since: &since}) - require.NoError(t, err) + results := slices.Collect(db.QueryEvents(nostr.Filter{Since: &since})) require.ElementsMatch(t, allEvents[i:], results, @@ -67,8 +64,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) { } { - results, err := w.QuerySync(ctx, nostr.Filter{IDs: []nostr.ID{allEvents[7].ID, allEvents[9].ID}}) - require.NoError(t, err) + results := slices.Collect(db.QueryEvents(nostr.Filter{IDs: []nostr.ID{allEvents[7].ID, allEvents[9].ID}})) require.Len(t, results, 2) require.ElementsMatch(t, []nostr.Event{allEvents[7], allEvents[9]}, @@ -77,8 +73,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) { } { - results, err := w.QuerySync(ctx, nostr.Filter{Kinds: []uint16{1}}) - require.NoError(t, err) + results := slices.Collect(db.QueryEvents(nostr.Filter{Kinds: []uint16{1}})) require.ElementsMatch(t, []nostr.Event{allEvents[1], allEvents[3], allEvents[5], allEvents[7], allEvents[9]}, results, @@ -86,8 +81,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) { } { - results, err := w.QuerySync(ctx, nostr.Filter{Kinds: []uint16{9}}) - require.NoError(t, err) + results := slices.Collect(db.QueryEvents(nostr.Filter{Kinds: []uint16{9}})) require.ElementsMatch(t, []nostr.Event{allEvents[0], allEvents[2], allEvents[4], allEvents[6], allEvents[8]}, results, @@ -96,8 +90,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) { { pk4 := nostr.GetPublicKey(sk4) - results, err := w.QuerySync(ctx, nostr.Filter{Authors: []nostr.PubKey{pk4}}) - require.NoError(t, err) + results := slices.Collect(db.QueryEvents(nostr.Filter{Authors: []nostr.PubKey{pk4}})) require.ElementsMatch(t, []nostr.Event{allEvents[0], allEvents[3], allEvents[6], allEvents[9]}, results, @@ -106,8 +99,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) { { pk3 := nostr.GetPublicKey(sk3) - results, err := w.QuerySync(ctx, nostr.Filter{Kinds: []uint16{9}, Authors: []nostr.PubKey{pk3}}) - require.NoError(t, err) + results := slices.Collect(db.QueryEvents(nostr.Filter{Kinds: []uint16{9}, Authors: []nostr.PubKey{pk3}})) require.ElementsMatch(t, []nostr.Event{allEvents[2], allEvents[4], allEvents[8]}, results, @@ -117,9 +109,8 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) { { pk3 := nostr.GetPublicKey(sk3) pk4 := nostr.GetPublicKey(sk4) - pk4[1] = 'a' - results, err := w.QuerySync(ctx, nostr.Filter{Kinds: []uint16{9, 5, 7}, Authors: []nostr.PubKey{pk3, pk4}}) - require.NoError(t, err) + pk4[1] = 9 // this is so it doesn't match + results := slices.Collect(db.QueryEvents(nostr.Filter{Kinds: []uint16{9, 5, 7}, Authors: []nostr.PubKey{pk3, pk4}})) require.ElementsMatch(t, []nostr.Event{allEvents[0], allEvents[2], allEvents[4], allEvents[6], allEvents[8]}, results, @@ -127,8 +118,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) { } { - results, err := w.QuerySync(ctx, nostr.Filter{Tags: nostr.TagMap{"t": []string{"2", "4", "6"}}}) - require.NoError(t, err) + results := slices.Collect(db.QueryEvents(nostr.Filter{Tags: nostr.TagMap{"t": []string{"2", "4", "6"}}})) require.ElementsMatch(t, []nostr.Event{allEvents[2], allEvents[4], allEvents[6]}, results, @@ -141,8 +131,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) { // query again { - results, err := w.QuerySync(ctx, nostr.Filter{}) - require.NoError(t, err) + results := slices.Collect(db.QueryEvents(nostr.Filter{})) require.ElementsMatch(t, slices.Concat(allEvents[0:4], allEvents[6:]), results, @@ -150,8 +139,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) { } { - results, err := w.QuerySync(ctx, nostr.Filter{Tags: nostr.TagMap{"t": []string{"2", "6"}}}) - require.NoError(t, err) + results := slices.Collect(db.QueryEvents(nostr.Filter{Tags: nostr.TagMap{"t": []string{"2", "6"}}})) require.ElementsMatch(t, []nostr.Event{allEvents[2], allEvents[6]}, results, @@ -159,8 +147,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) { } { - results, err := w.QuerySync(ctx, nostr.Filter{Tags: nostr.TagMap{"e": []string{allEvents[3].Tags[1][1]}}}) - require.NoError(t, err) + results := slices.Collect(db.QueryEvents(nostr.Filter{Tags: nostr.TagMap{"e": []string{allEvents[3].Tags[1][1]}}})) require.ElementsMatch(t, []nostr.Event{allEvents[3]}, results, @@ -170,8 +157,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) { { for i := 0; i < 4; i++ { until := nostr.Timestamp(i*10 + 1) - results, err := w.QuerySync(ctx, nostr.Filter{Until: &until}) - require.NoError(t, err) + results := slices.Collect(db.QueryEvents(nostr.Filter{Until: &until})) require.ElementsMatch(t, allEvents[:i], @@ -203,12 +189,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) { } { - results, err := w.QuerySync(ctx, nostr.Filter{ - Tags: nostr.TagMap{"p": []string{p}}, - Kinds: []uint16{1984}, - Limit: 2, - }) - require.NoError(t, err) + results := slices.Collect(db.QueryEvents(nostr.Filter{Tags: nostr.TagMap{"p": []string{p}}, Kinds: []uint16{1984}, Limit: 2})) require.ElementsMatch(t, []nostr.Event{newEvents[2], newEvents[1]}, results, @@ -216,11 +197,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) { } { - results, err := w.QuerySync(ctx, nostr.Filter{ - Tags: nostr.TagMap{"p": []string{p}, "t": []string{"x"}}, - Limit: 4, - }) - require.NoError(t, err) + results := slices.Collect(db.QueryEvents(nostr.Filter{Tags: nostr.TagMap{"p": []string{p}, "t": []string{"x"}}, Limit: 4})) require.ElementsMatch(t, // the results won't be in canonical time order because this query is too awful, needs a kind []nostr.Event{newEvents[1]}, @@ -229,18 +206,12 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) { } { - results, err := w.QuerySync(ctx, nostr.Filter{ - Tags: nostr.TagMap{"p": []string{p, p2}}, - Kinds: []uint16{1}, - Limit: 4, - }) - require.NoError(t, err) - + results := slices.Collect(db.QueryEvents(nostr.Filter{Tags: nostr.TagMap{"p": []string{p, p2}}, Kinds: []uint16{1}, Limit: 4})) for _, idx := range []int{5, 6, 7} { require.True(t, slices.ContainsFunc( results, - func(evt *nostr.Event) bool { return evt.ID == newEvents[idx].ID }, + func(evt nostr.Event) bool { return evt.ID == newEvents[idx].ID }, ), "'p' tag 3 query error") } diff --git a/eventstore/test/manyauthors_test.go b/eventstore/test/manyauthors_test.go index d543732..cb84cb2 100644 --- a/eventstore/test/manyauthors_test.go +++ b/eventstore/test/manyauthors_test.go @@ -53,10 +53,7 @@ func manyAuthorsTest(t *testing.T, db eventstore.Store) { } } - w := eventstore.RelayWrapper{Store: db} - - res := slices.Collect(w.QueryEvents(bigfilter)) - + res := slices.Collect(db.QueryEvents(bigfilter)) require.Len(t, res, limit) require.True(t, slices.IsSortedFunc(res, nostr.CompareEventReverse)) slices.SortFunc(ordered, nostr.CompareEventReverse) diff --git a/eventstore/test/second_test.go b/eventstore/test/second_test.go index df73e99..d5c824a 100644 --- a/eventstore/test/second_test.go +++ b/eventstore/test/second_test.go @@ -43,7 +43,6 @@ func runSecondTestOn(t *testing.T, db eventstore.Store) { require.NoError(t, err) } - w := eventstore.RelayWrapper{Store: db} pk3 := nostr.GetPublicKey(sk3) pk4 := nostr.GetPublicKey(sk4) eTags := make([]string, 20) @@ -69,12 +68,9 @@ func runSecondTestOn(t *testing.T, db eventstore.Store) { t.Run("filter", func(t *testing.T) { for q, filter := range filters { - q := q - filter := filter label := fmt.Sprintf("filter %d: %s", q, filter) - t.Run(fmt.Sprintf("q-%d", q), func(t *testing.T) { - results := slices.Collect(w.QueryEvents(filter)) + results := slices.Collect(db.QueryEvents(filter)) require.NotEmpty(t, results, label) }) } diff --git a/eventstore/test/unbalanced_test.go b/eventstore/test/unbalanced_test.go index 55d97a6..3a3ef69 100644 --- a/eventstore/test/unbalanced_test.go +++ b/eventstore/test/unbalanced_test.go @@ -60,9 +60,7 @@ func unbalancedTest(t *testing.T, db eventstore.Store) { } require.Len(t, expected, limit) - w := eventstore.RelayWrapper{Store: db} - - res := slices.Collect(w.QueryEvents(bigfilter)) + res := slices.Collect(db.QueryEvents(bigfilter)) require.Equal(t, limit, len(res)) require.True(t, slices.IsSortedFunc(res, nostr.CompareEventReverse)) diff --git a/keys.go b/keys.go index ab8e413..9bc1c16 100644 --- a/keys.go +++ b/keys.go @@ -21,7 +21,7 @@ func Generate() SecretKey { type SecretKey [32]byte -func (sk SecretKey) String() string { return hex.EncodeToString(sk[:]) } +func (sk SecretKey) String() string { return "sk::" + sk.Hex() } func (sk SecretKey) Hex() string { return hex.EncodeToString(sk[:]) } func (sk SecretKey) Public() PubKey { return GetPublicKey(sk) } @@ -53,7 +53,7 @@ var ZeroPK = [32]byte{} type PubKey [32]byte -func (pk PubKey) String() string { return hex.EncodeToString(pk[:]) } +func (pk PubKey) String() string { return "pk::" + pk.Hex() } func (pk PubKey) Hex() string { return hex.EncodeToString(pk[:]) } func PubKeyFromHex(pkh string) (PubKey, error) { diff --git a/khatru/adding.go b/khatru/adding.go index 43b3f15..74d2b7a 100644 --- a/khatru/adding.go +++ b/khatru/adding.go @@ -45,7 +45,6 @@ func (rl *Relay) handleNormal(ctx context.Context, evt nostr.Event) (skipBroadca } else { // otherwise it's a replaceable if nil != rl.ReplaceEvent { - fmt.Print("\nREPLACING .", evt.CreatedAt, "\n\n") if err := rl.ReplaceEvent(ctx, evt); err != nil { switch err { case eventstore.ErrDupEvent: diff --git a/khatru/relay_fuzz_test.go b/khatru/relay_fuzz_test.go new file mode 100644 index 0000000..7963225 --- /dev/null +++ b/khatru/relay_fuzz_test.go @@ -0,0 +1,113 @@ +package khatru + +import ( + "context" + "math" + "math/rand/v2" + "net/http/httptest" + "testing" + "time" + + "fiatjaf.com/nostr" + "fiatjaf.com/nostr/eventstore/lmdb" + "github.com/stretchr/testify/require" +) + +func FuzzReplaceableEvents(f *testing.F) { + f.Add(uint(1), uint(2)) + + f.Fuzz(func(t *testing.T, seed uint, nevents uint) { + if nevents == 0 { + return + } + + relay := NewRelay() + store := &lmdb.LMDBBackend{Path: "/tmp/fuzz"} + store.Init() + relay.UseEventstore(store) + + defer store.Close() + + // start test server + server := httptest.NewServer(relay) + defer server.Close() + + // create test keys + sk1 := nostr.Generate() + pk1 := nostr.GetPublicKey(sk1) + + // helper to create signed events + createEvent := func(sk nostr.SecretKey, kind uint16, content string, tags nostr.Tags) nostr.Event { + pk := nostr.GetPublicKey(sk) + evt := nostr.Event{ + PubKey: pk, + CreatedAt: nostr.Now(), + Kind: kind, + Tags: tags, + Content: content, + } + evt.Sign(sk) + return evt + } + + url := "ws" + server.URL[4:] + client1, err := nostr.RelayConnect(context.Background(), url, nostr.RelayOptions{}) + if err != nil { + t.Skip("failed to connect client1") + } + defer client1.Close() + + client2, err := nostr.RelayConnect(context.Background(), url, nostr.RelayOptions{}) + if err != nil { + t.Skip("failed to connect client2") + } + defer client2.Close() + + t.Run("replaceable events", func(t *testing.T) { + ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) + defer cancel() + + rnd := rand.New(rand.NewPCG(uint64(seed), 0)) + + newest := nostr.Timestamp(0) + for range nevents { + evt := createEvent(sk1, 0, `{"name":"blblbl"}`, nil) + evt.CreatedAt = nostr.Timestamp(rnd.Int64() % math.MaxUint32) + evt.Sign(sk1) + err = client1.Publish(ctx, evt) + if err != nil { + t.Fatalf("failed to publish event: %v", err) + } + + if evt.CreatedAt > newest { + newest = evt.CreatedAt + } + } + + // query to verify only the newest event exists + sub, err := client2.Subscribe(ctx, nostr.Filter{ + Authors: []nostr.PubKey{pk1}, + Kinds: []uint16{0}, + }, nostr.SubscriptionOptions{}) + if err != nil { + t.Fatalf("failed to subscribe: %v", err) + } + defer sub.Unsub() + + // should only get one event back (the newest one) + var receivedEvents []nostr.Event + for { + select { + case evt := <-sub.Events: + receivedEvents = append(receivedEvents, evt) + case <-sub.EndOfStoredEvents: + require.Len(t, receivedEvents, 1) + require.Equal(t, newest, receivedEvents[0].CreatedAt) + return + case <-ctx.Done(): + t.Fatal("timeout waiting for events") + } + } + }) + }) +} diff --git a/khatru/relay_test.go b/khatru/relay_test.go index 4cb497a..2dd8830 100644 --- a/khatru/relay_test.go +++ b/khatru/relay_test.go @@ -217,7 +217,7 @@ func TestBasicRelayFunctionality(t *testing.T) { receivedEvents = append(receivedEvents, evt) case <-sub.EndOfStoredEvents: if len(receivedEvents) != 1 { - t.Errorf("expected exactly 1 event, got %d", len(receivedEvents)) + t.Errorf("expected exactly 1 event, got %v", receivedEvents) } if len(receivedEvents) > 0 && receivedEvents[0].Content != `{"name":"newer"}` { t.Errorf("expected newest event content, got %s", receivedEvents[0].Content) diff --git a/khatru/testdata/fuzz/FuzzReplaceableEvents/00ff79377dab077d b/khatru/testdata/fuzz/FuzzReplaceableEvents/00ff79377dab077d new file mode 100644 index 0000000..59ed458 --- /dev/null +++ b/khatru/testdata/fuzz/FuzzReplaceableEvents/00ff79377dab077d @@ -0,0 +1,3 @@ +go test fuzz v1 +uint(25) +uint(223) diff --git a/types.go b/types.go index f2d120f..928add4 100644 --- a/types.go +++ b/types.go @@ -17,7 +17,7 @@ var ZeroID = [32]byte{} // ID represents an event id type ID [32]byte -func (id ID) String() string { return hex.EncodeToString(id[:]) } +func (id ID) String() string { return "id::" + id.Hex() } func (id ID) Hex() string { return hex.EncodeToString(id[:]) } func IDFromHex(idh string) (ID, error) {