eventstore tests.

This commit is contained in:
fiatjaf
2025-04-18 11:27:22 -03:00
parent 32efaa7c58
commit 92c2de6294
32 changed files with 355 additions and 652 deletions

View File

@@ -0,0 +1,61 @@
package badger
import (
"os"
"testing"
"fiatjaf.com/nostr"
"fiatjaf.com/nostr/eventstore"
"github.com/stretchr/testify/require"
)
func TestBasicStoreAndQuery(t *testing.T) {
// create a temporary directory for the test database
dir, err := os.MkdirTemp("", "badger-test-*")
require.NoError(t, err)
defer os.RemoveAll(dir)
// initialize the store
db := &BadgerBackend{Path: dir}
err = db.Init()
require.NoError(t, err)
defer db.Close()
// create a test event
evt := nostr.Event{
Content: "hello world",
CreatedAt: 1000,
Kind: 1,
Tags: nostr.Tags{},
}
err = evt.Sign(nostr.Generate())
require.NoError(t, err)
// save the event
err = db.SaveEvent(evt)
require.NoError(t, err)
// try to save it again, should fail with ErrDupEvent
err = db.SaveEvent(evt)
require.Error(t, err)
require.Equal(t, eventstore.ErrDupEvent, err)
// query the event by its ID
filter := nostr.Filter{
IDs: []nostr.ID{evt.ID},
}
// collect results
results := make([]nostr.Event, 0)
for event := range db.QueryEvents(filter) {
results = append(results, event)
}
// verify we got exactly one event and it matches
require.Len(t, results, 1)
require.Equal(t, evt.ID, results[0].ID)
require.Equal(t, evt.Content, results[0].Content)
require.Equal(t, evt.CreatedAt, results[0].CreatedAt)
require.Equal(t, evt.Kind, results[0].Kind)
require.Equal(t, evt.PubKey, results[0].PubKey)
}

View File

@@ -2,23 +2,18 @@ package badger
import ( import (
"cmp" "cmp"
"context"
"encoding/binary" "encoding/binary"
"encoding/hex"
"fmt" "fmt"
"slices"
"testing" "testing"
"time" "time"
"github.com/dgraph-io/badger/v4"
"fiatjaf.com/nostr/eventstore"
"fiatjaf.com/nostr" "fiatjaf.com/nostr"
"github.com/dgraph-io/badger/v4"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"golang.org/x/exp/slices"
) )
func FuzzQuery(f *testing.F) { func FuzzQuery(f *testing.F) {
ctx := context.Background()
f.Add(uint(200), uint(50), uint(13), uint(2), uint(2), uint(0), uint(1)) f.Add(uint(200), uint(50), uint(13), uint(2), uint(2), uint(0), uint(1))
f.Fuzz(func(t *testing.T, total, limit, authors, timestampAuthorFactor, seedFactor, kinds, kindFactor uint) { f.Fuzz(func(t *testing.T, total, limit, authors, timestampAuthorFactor, seedFactor, kinds, kindFactor uint) {
total++ total++
@@ -72,41 +67,41 @@ func FuzzQuery(f *testing.F) {
// ~ start actual test // ~ start actual test
filter := nostr.Filter{ filter := nostr.Filter{
Authors: make([]string, authors), Authors: make([]nostr.PubKey, authors),
Limit: int(limit), Limit: int(limit),
} }
maxKind := 1 var maxKind uint16 = 1
if kinds > 0 { if kinds > 0 {
filter.Kinds = make([]int, kinds) filter.Kinds = make([]uint16, kinds)
for i := range filter.Kinds { for i := range filter.Kinds {
filter.Kinds[i] = int(kindFactor) * i filter.Kinds[i] = uint16(kindFactor) * uint16(i)
} }
maxKind = filter.Kinds[len(filter.Kinds)-1] maxKind = filter.Kinds[len(filter.Kinds)-1]
} }
for i := 0; i < int(authors); i++ { for i := 0; i < int(authors); i++ {
sk := make([]byte, 32) sk := nostr.SecretKey{}
binary.BigEndian.PutUint32(sk, uint32(i%int(authors*seedFactor))+1) binary.BigEndian.PutUint32(sk[:], uint32(i%int(authors*seedFactor))+1)
pk, _ := nostr.GetPublicKey(hex.EncodeToString(sk)) pk := nostr.GetPublicKey(sk)
filter.Authors[i] = pk filter.Authors[i] = pk
} }
expected := make([]*nostr.Event, 0, total) expected := make([]nostr.Event, 0, total)
for i := 0; i < int(total); i++ { for i := 0; i < int(total); i++ {
skseed := uint32(i%int(authors*seedFactor)) + 1 skseed := uint32(i%int(authors*seedFactor)) + 1
sk := make([]byte, 32) sk := nostr.SecretKey{}
binary.BigEndian.PutUint32(sk, skseed) binary.BigEndian.PutUint32(sk[:], skseed)
evt := &nostr.Event{ evt := nostr.Event{
CreatedAt: nostr.Timestamp(skseed)*nostr.Timestamp(timestampAuthorFactor) + nostr.Timestamp(i), CreatedAt: nostr.Timestamp(skseed)*nostr.Timestamp(timestampAuthorFactor) + nostr.Timestamp(i),
Content: fmt.Sprintf("unbalanced %d", i), Content: fmt.Sprintf("unbalanced %d", i),
Tags: nostr.Tags{}, Tags: nostr.Tags{},
Kind: i % maxKind, Kind: uint16(i) % maxKind,
} }
err := evt.Sign(hex.EncodeToString(sk)) err := evt.Sign(sk)
require.NoError(t, err) require.NoError(t, err)
err = db.SaveEvent(ctx, evt) err = db.SaveEvent(evt)
require.NoError(t, err) require.NoError(t, err)
if filter.Matches(evt) { if filter.Matches(evt) {
@@ -114,27 +109,25 @@ func FuzzQuery(f *testing.F) {
} }
} }
slices.SortFunc(expected, nostr.CompareEventPtrReverse) slices.SortFunc(expected, nostr.CompareEventReverse)
if len(expected) > int(limit) { if len(expected) > int(limit) {
expected = expected[0:limit] expected = expected[0:limit]
} }
w := eventstore.RelayWrapper{Store: db}
start := time.Now() start := time.Now()
// fmt.Println(filter) // fmt.Println(filter)
res, err := w.QuerySync(ctx, filter) res := slices.Collect(db.QueryEvents(filter))
end := time.Now() end := time.Now()
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, len(expected), len(res), "number of results is different than expected") require.Equal(t, len(expected), len(res), "number of results is different than expected")
require.Less(t, end.Sub(start).Milliseconds(), int64(1500), "query took too long") require.Less(t, end.Sub(start).Milliseconds(), int64(1500), "query took too long")
require.True(t, slices.IsSortedFunc(res, func(a, b *nostr.Event) int { return cmp.Compare(b.CreatedAt, a.CreatedAt) }), "results are not sorted") require.True(t, slices.IsSortedFunc(res, func(a, b nostr.Event) int { return cmp.Compare(b.CreatedAt, a.CreatedAt) }), "results are not sorted")
nresults := len(expected) nresults := len(expected)
getTimestamps := func(events []*nostr.Event) []nostr.Timestamp { getTimestamps := func(events []nostr.Event) []nostr.Timestamp {
res := make([]nostr.Timestamp, len(events)) res := make([]nostr.Timestamp, len(events))
for i, evt := range events { for i, evt := range events {
res[i] = evt.CreatedAt res[i] = evt.CreatedAt

View File

@@ -4,11 +4,11 @@ import (
"encoding/binary" "encoding/binary"
"encoding/hex" "encoding/hex"
"iter" "iter"
"slices"
"strconv" "strconv"
"strings" "strings"
"fiatjaf.com/nostr" "fiatjaf.com/nostr"
"golang.org/x/exp/slices"
) )
func getTagIndexPrefix(tagValue string) ([]byte, int) { func getTagIndexPrefix(tagValue string) ([]byte, int) {

View File

@@ -6,12 +6,12 @@ import (
"fmt" "fmt"
"iter" "iter"
"log" "log"
"slices"
"fiatjaf.com/nostr" "fiatjaf.com/nostr"
"fiatjaf.com/nostr/eventstore/codec/betterbinary" "fiatjaf.com/nostr/eventstore/codec/betterbinary"
"fiatjaf.com/nostr/eventstore/internal" "fiatjaf.com/nostr/eventstore/internal"
"github.com/dgraph-io/badger/v4" "github.com/dgraph-io/badger/v4"
"golang.org/x/exp/slices"
) )
var batchFilled = errors.New("batch-filled") var batchFilled = errors.New("batch-filled")

View File

@@ -2,11 +2,9 @@ package badger
import ( import (
"encoding/binary" "encoding/binary"
"encoding/hex"
"fmt"
"fiatjaf.com/nostr/eventstore/internal"
"fiatjaf.com/nostr" "fiatjaf.com/nostr"
"fiatjaf.com/nostr/eventstore/internal"
) )
type query struct { type query struct {
@@ -51,13 +49,10 @@ func prepareQueries(filter nostr.Filter) (
if len(filter.IDs) > 0 { if len(filter.IDs) > 0 {
queries = make([]query, len(filter.IDs)) queries = make([]query, len(filter.IDs))
for i, idHex := range filter.IDs { for i, id := range filter.IDs {
prefix := make([]byte, 1+8) prefix := make([]byte, 1+8)
prefix[0] = indexIdPrefix prefix[0] = indexIdPrefix
if len(idHex) != 64 { copy(prefix[1:1+8], id[0:8])
return nil, nil, 0, fmt.Errorf("invalid id '%s'", idHex)
}
hex.Decode(prefix[1:], []byte(idHex[0:8*2]))
queries[i] = query{i: i, prefix: prefix, skipTimestamp: true} queries[i] = query{i: i, prefix: prefix, skipTimestamp: true}
} }
@@ -96,27 +91,20 @@ pubkeyMatching:
if len(filter.Authors) > 0 { if len(filter.Authors) > 0 {
if len(filter.Kinds) == 0 { if len(filter.Kinds) == 0 {
queries = make([]query, len(filter.Authors)) queries = make([]query, len(filter.Authors))
for i, pubkeyHex := range filter.Authors { for i, pk := range filter.Authors {
if len(pubkeyHex) != 64 {
return nil, nil, 0, fmt.Errorf("invalid pubkey '%s'", pubkeyHex)
}
prefix := make([]byte, 1+8) prefix := make([]byte, 1+8)
prefix[0] = indexPubkeyPrefix prefix[0] = indexPubkeyPrefix
hex.Decode(prefix[1:], []byte(pubkeyHex[0:8*2])) copy(prefix[1:1+8], pk[0:8])
queries[i] = query{i: i, prefix: prefix} queries[i] = query{i: i, prefix: prefix}
} }
} else { } else {
queries = make([]query, len(filter.Authors)*len(filter.Kinds)) queries = make([]query, len(filter.Authors)*len(filter.Kinds))
i := 0 i := 0
for _, pubkeyHex := range filter.Authors { for _, pk := range filter.Authors {
for _, kind := range filter.Kinds { for _, kind := range filter.Kinds {
if len(pubkeyHex) != 64 {
return nil, nil, 0, fmt.Errorf("invalid pubkey '%s'", pubkeyHex)
}
prefix := make([]byte, 1+8+2) prefix := make([]byte, 1+8+2)
prefix[0] = indexPubkeyKindPrefix prefix[0] = indexPubkeyKindPrefix
hex.Decode(prefix[1:], []byte(pubkeyHex[0:8*2])) copy(prefix[1:1+8], pk[0:8])
binary.BigEndian.PutUint16(prefix[1+8:], uint16(kind)) binary.BigEndian.PutUint16(prefix[1+8:], uint16(kind))
queries[i] = query{i: i, prefix: prefix} queries[i] = query{i: i, prefix: prefix}
i++ i++

View File

@@ -1,12 +1,11 @@
package bluge package bluge
import ( import (
"context"
"os" "os"
"testing" "testing"
"fiatjaf.com/nostr/eventstore/badger"
"fiatjaf.com/nostr" "fiatjaf.com/nostr"
"fiatjaf.com/nostr/eventstore/badger"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@@ -25,9 +24,7 @@ func TestBlugeFlow(t *testing.T) {
bl.Init() bl.Init()
defer bl.Close() defer bl.Close()
ctx := context.Background() willDelete := make([]nostr.Event, 0, 3)
willDelete := make([]*nostr.Event, 0, 3)
for i, content := range []string{ for i, content := range []string{
"good morning mr paper maker", "good morning mr paper maker",
@@ -36,11 +33,11 @@ func TestBlugeFlow(t *testing.T) {
"tonight we dine in my house", "tonight we dine in my house",
"the paper in this house if very good, mr", "the paper in this house if very good, mr",
} { } {
evt := &nostr.Event{Content: content, Tags: nostr.Tags{}} evt := nostr.Event{Content: content, Tags: nostr.Tags{}}
evt.Sign("0000000000000000000000000000000000000000000000000000000000000001") evt.Sign(nostr.MustSecretKeyFromHex("0000000000000000000000000000000000000000000000000000000000000001"))
bb.SaveEvent(ctx, evt) bb.SaveEvent(evt)
bl.SaveEvent(ctx, evt) bl.SaveEvent(evt)
if i%2 == 0 { if i%2 == 0 {
willDelete = append(willDelete, evt) willDelete = append(willDelete, evt)
@@ -48,33 +45,26 @@ func TestBlugeFlow(t *testing.T) {
} }
{ {
ch, err := bl.QueryEvents(ctx, nostr.Filter{Search: "good"})
if err != nil {
t.Fatalf("QueryEvents error: %s", err)
return
}
n := 0 n := 0
for range ch { for range bl.QueryEvents(nostr.Filter{Search: "good"}) {
n++ n++
} }
assert.Equal(t, 3, n) assert.Equal(t, 3, n)
} }
for _, evt := range willDelete { for _, evt := range willDelete {
bl.DeleteEvent(ctx, evt) bl.DeleteEvent(evt.ID)
} }
{ {
ch, err := bl.QueryEvents(ctx, nostr.Filter{Search: "good"})
if err != nil {
t.Fatalf("QueryEvents error: %s", err)
return
}
n := 0 n := 0
for res := range ch { for res := range bl.QueryEvents(nostr.Filter{Search: "good"}) {
n++ n++
assert.Equal(t, res.Content, "good night") assert.Equal(t, res.Content, "good night")
assert.Equal(t, res.PubKey, "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798") assert.Equal(t,
nostr.MustPubKeyFromHex("79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798"),
res.PubKey,
)
} }
assert.Equal(t, 1, n) assert.Equal(t, 1, n)
} }

View File

@@ -1,6 +1,8 @@
package bluge package bluge
import ( import (
"encoding/hex"
"fiatjaf.com/nostr" "fiatjaf.com/nostr"
) )
@@ -20,5 +22,7 @@ func (id eventIdentifier) Field() string {
} }
func (id eventIdentifier) Term() []byte { func (id eventIdentifier) Term() []byte {
return id[:] idhex := make([]byte, 64)
hex.Encode(idhex, id[:])
return idhex
} }

View File

@@ -90,9 +90,12 @@ func (b *BlugeBackend) QueryEvents(filter nostr.Filter) iter.Seq[nostr.Event] {
var next *search.DocumentMatch var next *search.DocumentMatch
for next, err = dmi.Next(); next != nil; next, err = dmi.Next() { for next, err = dmi.Next(); next != nil; next, err = dmi.Next() {
next.VisitStoredFields(func(field string, value []byte) bool { next.VisitStoredFields(func(field string, value []byte) bool {
for evt := range b.RawEventStore.QueryEvents(nostr.Filter{IDs: []nostr.ID{nostr.ID(value)}}) { id, err := nostr.IDFromHex(string(value))
if err == nil {
for evt := range b.RawEventStore.QueryEvents(nostr.Filter{IDs: []nostr.ID{id}}) {
yield(evt) yield(evt)
} }
}
return false return false
}) })
} }

View File

@@ -4,13 +4,13 @@ import (
"bytes" "bytes"
"encoding/binary" "encoding/binary"
"encoding/hex" "encoding/hex"
"slices"
"fiatjaf.com/nostr" "fiatjaf.com/nostr"
"fiatjaf.com/nostr/eventstore/codec/betterbinary" "fiatjaf.com/nostr/eventstore/codec/betterbinary"
"fiatjaf.com/nostr/nip45" "fiatjaf.com/nostr/nip45"
"fiatjaf.com/nostr/nip45/hyperloglog" "fiatjaf.com/nostr/nip45/hyperloglog"
"github.com/PowerDNS/lmdb-go/lmdb" "github.com/PowerDNS/lmdb-go/lmdb"
"golang.org/x/exp/slices"
) )
func (b *LMDBBackend) CountEvents(filter nostr.Filter) (uint32, error) { func (b *LMDBBackend) CountEvents(filter nostr.Filter) (uint32, error) {

View File

@@ -40,13 +40,13 @@ func (b *LMDBBackend) delete(txn *lmdb.Txn, id nostr.ID) error {
for k := range b.getIndexKeysForEvent(evt) { for k := range b.getIndexKeysForEvent(evt) {
err := txn.Del(k.dbi, k.key, idx) err := txn.Del(k.dbi, k.key, idx)
if err != nil { if err != nil {
return fmt.Errorf("failed to delete index entry %s for %x: %w", b.keyName(k), evt.ID[0:8*2], err) return fmt.Errorf("failed to delete index entry %s for %x: %w", b.keyName(k), evt.ID[0:8], err)
} }
} }
// delete the raw event // delete the raw event
if err := txn.Del(b.rawEventStore, idx, nil); err != nil { if err := txn.Del(b.rawEventStore, idx, nil); err != nil {
return fmt.Errorf("failed to delete raw event %x (idx %x): %w", evt.ID[0:8*2], idx, err) return fmt.Errorf("failed to delete raw event %x (idx %x): %w", evt.ID[0:8], idx, err)
} }
return nil return nil

View File

@@ -2,24 +2,19 @@ package lmdb
import ( import (
"cmp" "cmp"
"context"
"encoding/binary" "encoding/binary"
"encoding/hex"
"fmt" "fmt"
"os" "os"
"slices"
"testing" "testing"
"time" "time"
"github.com/PowerDNS/lmdb-go/lmdb"
"fiatjaf.com/nostr/eventstore"
"fiatjaf.com/nostr" "fiatjaf.com/nostr"
"github.com/PowerDNS/lmdb-go/lmdb"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"golang.org/x/exp/slices"
) )
func FuzzQuery(f *testing.F) { func FuzzQuery(f *testing.F) {
ctx := context.Background()
f.Add(uint(200), uint(50), uint(13), uint(2), uint(2), uint(0), uint(1)) f.Add(uint(200), uint(50), uint(13), uint(2), uint(2), uint(0), uint(1))
f.Fuzz(func(t *testing.T, total, limit, authors, timestampAuthorFactor, seedFactor, kinds, kindFactor uint) { f.Fuzz(func(t *testing.T, total, limit, authors, timestampAuthorFactor, seedFactor, kinds, kindFactor uint) {
total++ total++
@@ -51,41 +46,41 @@ func FuzzQuery(f *testing.F) {
// ~ start actual test // ~ start actual test
filter := nostr.Filter{ filter := nostr.Filter{
Authors: make([]string, authors), Authors: make([]nostr.PubKey, authors),
Limit: int(limit), Limit: int(limit),
} }
maxKind := 1 var maxKind uint16 = 1
if kinds > 0 { if kinds > 0 {
filter.Kinds = make([]int, kinds) filter.Kinds = make([]uint16, kinds)
for i := range filter.Kinds { for i := range filter.Kinds {
filter.Kinds[i] = int(kindFactor) * i filter.Kinds[i] = uint16(int(kindFactor) * i)
} }
maxKind = filter.Kinds[len(filter.Kinds)-1] maxKind = filter.Kinds[len(filter.Kinds)-1]
} }
for i := 0; i < int(authors); i++ { for i := 0; i < int(authors); i++ {
sk := make([]byte, 32) var sk nostr.SecretKey
binary.BigEndian.PutUint32(sk, uint32(i%int(authors*seedFactor))+1) binary.BigEndian.PutUint32(sk[:], uint32(i%int(authors*seedFactor))+1)
pk, _ := nostr.GetPublicKey(hex.EncodeToString(sk)) pk := nostr.GetPublicKey(sk)
filter.Authors[i] = pk filter.Authors[i] = pk
} }
expected := make([]*nostr.Event, 0, total) expected := make([]nostr.Event, 0, total)
for i := 0; i < int(total); i++ { for i := 0; i < int(total); i++ {
skseed := uint32(i%int(authors*seedFactor)) + 1 skseed := uint32(i%int(authors*seedFactor)) + 1
sk := make([]byte, 32) sk := nostr.SecretKey{}
binary.BigEndian.PutUint32(sk, skseed) binary.BigEndian.PutUint32(sk[:], skseed)
evt := &nostr.Event{ evt := nostr.Event{
CreatedAt: nostr.Timestamp(skseed)*nostr.Timestamp(timestampAuthorFactor) + nostr.Timestamp(i), CreatedAt: nostr.Timestamp(skseed)*nostr.Timestamp(timestampAuthorFactor) + nostr.Timestamp(i),
Content: fmt.Sprintf("unbalanced %d", i), Content: fmt.Sprintf("unbalanced %d", i),
Tags: nostr.Tags{}, Tags: nostr.Tags{},
Kind: i % maxKind, Kind: uint16(i) % maxKind,
} }
err := evt.Sign(hex.EncodeToString(sk)) err := evt.Sign(sk)
require.NoError(t, err) require.NoError(t, err)
err = db.SaveEvent(ctx, evt) err = db.SaveEvent(evt)
require.NoError(t, err) require.NoError(t, err)
if filter.Matches(evt) { if filter.Matches(evt) {
@@ -93,25 +88,21 @@ func FuzzQuery(f *testing.F) {
} }
} }
slices.SortFunc(expected, nostr.CompareEventPtrReverse) slices.SortFunc(expected, nostr.CompareEventReverse)
if len(expected) > int(limit) { if len(expected) > int(limit) {
expected = expected[0:limit] expected = expected[0:limit]
} }
w := eventstore.RelayWrapper{Store: db}
start := time.Now() start := time.Now()
res, err := w.QuerySync(ctx, filter) res := slices.Collect(db.QueryEvents(filter))
end := time.Now() end := time.Now()
require.NoError(t, err)
require.Equal(t, len(expected), len(res), "number of results is different than expected") require.Equal(t, len(expected), len(res), "number of results is different than expected")
require.Less(t, end.Sub(start).Milliseconds(), int64(1500), "query took too long") require.Less(t, end.Sub(start).Milliseconds(), int64(1500), "query took too long")
nresults := len(expected) nresults := len(expected)
getTimestamps := func(events []*nostr.Event) []nostr.Timestamp { getTimestamps := func(events []nostr.Event) []nostr.Timestamp {
res := make([]nostr.Timestamp, len(events)) res := make([]nostr.Timestamp, len(events))
for i, evt := range events { for i, evt := range events {
res[i] = evt.CreatedAt res[i] = evt.CreatedAt
@@ -132,6 +123,6 @@ func FuzzQuery(f *testing.F) {
require.True(t, filter.Matches(evt), "event %s doesn't match filter %s", evt, filter) require.True(t, filter.Matches(evt), "event %s doesn't match filter %s", evt, filter)
} }
require.True(t, slices.IsSortedFunc(res, func(a, b *nostr.Event) int { return cmp.Compare(b.CreatedAt, a.CreatedAt) }), "results are not sorted") require.True(t, slices.IsSortedFunc(res, func(a, b nostr.Event) int { return cmp.Compare(b.CreatedAt, a.CreatedAt) }), "results are not sorted")
}) })
} }

View File

@@ -6,12 +6,12 @@ import (
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"iter" "iter"
"slices"
"strconv" "strconv"
"strings" "strings"
"fiatjaf.com/nostr" "fiatjaf.com/nostr"
"github.com/PowerDNS/lmdb-go/lmdb" "github.com/PowerDNS/lmdb-go/lmdb"
"golang.org/x/exp/slices"
) )
// this iterator always goes backwards // this iterator always goes backwards
@@ -64,7 +64,7 @@ func (b *LMDBBackend) getIndexKeysForEvent(evt nostr.Event) iter.Seq[key] {
{ {
// ~ by pubkey+date // ~ by pubkey+date
k := make([]byte, 8+4) k := make([]byte, 8+4)
hex.Decode(k[0:8], []byte(evt.PubKey[0:8*2])) copy(k[0:8], evt.PubKey[0:8])
binary.BigEndian.PutUint32(k[8:8+4], uint32(evt.CreatedAt)) binary.BigEndian.PutUint32(k[8:8+4], uint32(evt.CreatedAt))
if !yield(key{dbi: b.indexPubkey, key: k[0 : 8+4]}) { if !yield(key{dbi: b.indexPubkey, key: k[0 : 8+4]}) {
return return
@@ -84,7 +84,7 @@ func (b *LMDBBackend) getIndexKeysForEvent(evt nostr.Event) iter.Seq[key] {
{ {
// ~ by pubkey+kind+date // ~ by pubkey+kind+date
k := make([]byte, 8+2+4) k := make([]byte, 8+2+4)
hex.Decode(k[0:8], []byte(evt.PubKey[0:8*2])) copy(k[0:8], evt.PubKey[0:8])
binary.BigEndian.PutUint16(k[8:8+2], uint16(evt.Kind)) binary.BigEndian.PutUint16(k[8:8+2], uint16(evt.Kind))
binary.BigEndian.PutUint32(k[8+2:8+2+4], uint32(evt.CreatedAt)) binary.BigEndian.PutUint32(k[8+2:8+2+4], uint32(evt.CreatedAt))
if !yield(key{dbi: b.indexPubkeyKind, key: k[0 : 8+2+4]}) { if !yield(key{dbi: b.indexPubkeyKind, key: k[0 : 8+2+4]}) {

View File

@@ -99,7 +99,7 @@ func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]in
results[q] = make([]internal.IterEvent, 0, batchSizePerQuery*2) results[q] = make([]internal.IterEvent, 0, batchSizePerQuery*2)
} }
// fmt.Println("queries", len(queries)) // fmt.Println("queries", filter, len(queries))
for c := 0; ; c++ { for c := 0; ; c++ {
batchSizePerQuery = internal.BatchSizePerNumberOfQueries(limit, remainingUnexhausted) batchSizePerQuery = internal.BatchSizePerNumberOfQueries(limit, remainingUnexhausted)
@@ -113,7 +113,7 @@ func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]in
if oldest.Q == q && remainingUnexhausted > 1 { if oldest.Q == q && remainingUnexhausted > 1 {
continue continue
} }
// fmt.Println(" query", q, unsafe.Pointer(&results[q]), hex.EncodeToString(query.prefix), len(results[q])) // fmt.Println(" query", q, unsafe.Pointer(&results[q]), b.dbiName(query.dbi), hex.EncodeToString(query.prefix), len(results[q]))
it := iterators[q] it := iterators[q]
pulledThisIteration := 0 pulledThisIteration := 0

View File

@@ -53,14 +53,9 @@ func (b *LMDBBackend) prepareQueries(filter nostr.Filter) (
if filter.IDs != nil { if filter.IDs != nil {
// when there are ids we ignore everything else // when there are ids we ignore everything else
queries = make([]query, len(filter.IDs)) queries = make([]query, len(filter.IDs))
for i, idHex := range filter.IDs { for i, id := range filter.IDs {
if len(idHex) != 64 {
return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid id '%s'", idHex)
}
prefix := make([]byte, 8) prefix := make([]byte, 8)
if _, err := hex.Decode(prefix[0:8], []byte(idHex[0:8*2])); err != nil { copy(prefix[0:8], id[0:8])
return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid id '%s'", idHex)
}
queries[i] = query{i: i, dbi: b.indexId, prefix: prefix[0:8], keySize: 8, timestampSize: 0} queries[i] = query{i: i, dbi: b.indexId, prefix: prefix[0:8], keySize: 8, timestampSize: 0}
} }
return queries, nil, nil, "", nil, 0, nil return queries, nil, nil, "", nil, 0, nil
@@ -161,29 +156,17 @@ pubkeyMatching:
if len(filter.Kinds) == 0 { if len(filter.Kinds) == 0 {
// will use pubkey index // will use pubkey index
queries = make([]query, len(filter.Authors)) queries = make([]query, len(filter.Authors))
for i, pubkeyHex := range filter.Authors { for i, pk := range filter.Authors {
if len(pubkeyHex) != 64 { queries[i] = query{i: i, dbi: b.indexPubkey, prefix: pk[0:8], keySize: 8 + 4, timestampSize: 4}
return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid author '%s'", pubkeyHex)
}
prefix := make([]byte, 8)
if _, err := hex.Decode(prefix[0:8], []byte(pubkeyHex[0:8*2])); err != nil {
return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid author '%s'", pubkeyHex)
}
queries[i] = query{i: i, dbi: b.indexPubkey, prefix: prefix[0:8], keySize: 8 + 4, timestampSize: 4}
} }
} else { } else {
// will use pubkeyKind index // will use pubkeyKind index
queries = make([]query, len(filter.Authors)*len(filter.Kinds)) queries = make([]query, len(filter.Authors)*len(filter.Kinds))
i := 0 i := 0
for _, pubkeyHex := range filter.Authors { for _, pk := range filter.Authors {
for _, kind := range filter.Kinds { for _, kind := range filter.Kinds {
if len(pubkeyHex) != 64 {
return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid author '%s'", pubkeyHex)
}
prefix := make([]byte, 8+2) prefix := make([]byte, 8+2)
if _, err := hex.Decode(prefix[0:8], []byte(pubkeyHex[0:8*2])); err != nil { copy(prefix[0:8], pk[0:8])
return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid author '%s'", pubkeyHex)
}
binary.BigEndian.PutUint16(prefix[8:8+2], uint16(kind)) binary.BigEndian.PutUint16(prefix[8:8+2], uint16(kind))
queries[i] = query{i: i, dbi: b.indexPubkeyKind, prefix: prefix[0 : 8+2], keySize: 10 + 4, timestampSize: 4} queries[i] = query{i: i, dbi: b.indexPubkeyKind, prefix: prefix[0 : 8+2], keySize: 10 + 4, timestampSize: 4}
i++ i++

View File

@@ -12,7 +12,7 @@ import (
func (b *LMDBBackend) ReplaceEvent(evt nostr.Event) error { func (b *LMDBBackend) ReplaceEvent(evt nostr.Event) error {
// sanity checking // sanity checking
if evt.CreatedAt > math.MaxUint32 || evt.Kind > math.MaxUint16 { if evt.CreatedAt > math.MaxUint32 || evt.Kind > math.MaxUint16 {
return fmt.Errorf("event with values out of expected boundaries") return fmt.Errorf("event with values out of expected boundaries %d/%d", evt.CreatedAt, evt.Kind)
} }
return b.lmdbEnv.Update(func(txn *lmdb.Txn) error { return b.lmdbEnv.Update(func(txn *lmdb.Txn) error {

View File

@@ -13,7 +13,7 @@ import (
func (b *LMDBBackend) SaveEvent(evt nostr.Event) error { func (b *LMDBBackend) SaveEvent(evt nostr.Event) error {
// sanity checking // sanity checking
if evt.CreatedAt > math.MaxUint32 || evt.Kind > math.MaxUint16 { if evt.CreatedAt > math.MaxUint32 || evt.Kind > math.MaxUint16 {
return fmt.Errorf("event with values out of expected boundaries") return fmt.Errorf("event with values out of expected boundaries %d/%d", evt.CreatedAt, evt.Kind)
} }
return b.lmdbEnv.Update(func(txn *lmdb.Txn) error { return b.lmdbEnv.Update(func(txn *lmdb.Txn) error {

View File

@@ -1,8 +1,8 @@
package mmm package mmm
import ( import (
"context"
"fmt" "fmt"
"iter"
"math/rand/v2" "math/rand/v2"
"os" "os"
"slices" "slices"
@@ -47,11 +47,10 @@ func FuzzTest(f *testing.F) {
} }
// create test events // create test events
ctx := context.Background() sk := nostr.MustSecretKeyFromHex("945e01e37662430162121b804d3645a86d97df9d256917d86735d0eb219393eb")
sk := "945e01e37662430162121b804d3645a86d97df9d256917d86735d0eb219393eb" storedIds := make([]nostr.ID, nevents)
storedIds := make([]string, nevents) nTags := make(map[nostr.ID]int)
nTags := make(map[string]int) storedByLayer := make(map[string][]nostr.ID)
storedByLayer := make(map[string][]string)
// create n events with random combinations of tags // create n events with random combinations of tags
for i := 0; i < int(nevents); i++ { for i := 0; i < int(nevents); i++ {
@@ -68,9 +67,9 @@ func FuzzTest(f *testing.F) {
} }
} }
evt := &nostr.Event{ evt := nostr.Event{
CreatedAt: nostr.Timestamp(i), CreatedAt: nostr.Timestamp(i),
Kind: i, // hack to query by serial id Kind: uint16(i), // hack to query by serial id
Tags: tags, Tags: tags,
Content: fmt.Sprintf("test content %d", i), Content: fmt.Sprintf("test content %d", i),
} }
@@ -78,23 +77,20 @@ func FuzzTest(f *testing.F) {
for _, layer := range mmm.layers { for _, layer := range mmm.layers {
if evt.Tags.FindWithValue("t", layer.name) != nil { if evt.Tags.FindWithValue("t", layer.name) != nil {
err := layer.SaveEvent(ctx, evt) err := layer.SaveEvent(evt)
require.NoError(t, err) require.NoError(t, err)
storedByLayer[layer.name] = append(storedByLayer[layer.name], evt.ID) storedByLayer[layer.name] = append(storedByLayer[layer.name], evt.ID)
} }
} }
storedIds = append(storedIds, evt.ID) storedIds[i] = evt.ID
nTags[evt.ID] = len(evt.Tags) nTags[evt.ID] = len(evt.Tags)
} }
// verify each layer has the correct events // verify each layer has the correct events
for _, layer := range mmm.layers { for _, layer := range mmm.layers {
results, err := layer.QueryEvents(ctx, nostr.Filter{})
require.NoError(t, err)
count := 0 count := 0
for evt := range results { for evt := range layer.QueryEvents(nostr.Filter{}) {
require.True(t, evt.Tags.ContainsAny("t", []string{layer.name})) require.True(t, evt.Tags.ContainsAny("t", []string{layer.name}))
count++ count++
} }
@@ -102,7 +98,7 @@ func FuzzTest(f *testing.F) {
} }
// randomly select n events to delete from random layers // randomly select n events to delete from random layers
deleted := make(map[string][]*IndexingLayer) deleted := make(map[nostr.ID][]*IndexingLayer)
for range ndeletes { for range ndeletes {
id := storedIds[rnd.Int()%len(storedIds)] id := storedIds[rnd.Int()%len(storedIds)]
@@ -117,7 +113,7 @@ func FuzzTest(f *testing.F) {
require.Contains(t, layers, layer) require.Contains(t, layers, layer)
// delete now // delete now
layer.DeleteEvent(ctx, evt) layer.DeleteEvent(evt.ID)
deleted[id] = append(deleted[id], layer) deleted[id] = append(deleted[id], layer)
} else { } else {
// was never saved to this in the first place // was never saved to this in the first place
@@ -152,16 +148,16 @@ func FuzzTest(f *testing.F) {
for _, layer := range mmm.layers { for _, layer := range mmm.layers {
// verify event still accessible from other layers // verify event still accessible from other layers
if slices.Contains(foundlayers, layer) { if slices.Contains(foundlayers, layer) {
ch, err := layer.QueryEvents(ctx, nostr.Filter{Kinds: []int{evt.Kind}}) // hack next, stop := iter.Pull(layer.QueryEvents(nostr.Filter{Kinds: []uint16{evt.Kind}})) // hack
require.NoError(t, err) _, fetched := next()
fetched := <-ch require.True(t, fetched)
require.NotNil(t, fetched) stop()
} else { } else {
// and not accessible from this layer we just deleted // and not accessible from this layer we just deleted
ch, err := layer.QueryEvents(ctx, nostr.Filter{Kinds: []int{evt.Kind}}) // hack next, stop := iter.Pull(layer.QueryEvents(nostr.Filter{Kinds: []uint16{evt.Kind}})) // hack
require.NoError(t, err) _, fetched := next()
fetched := <-ch require.True(t, fetched)
require.Nil(t, fetched) stop()
} }
} }
} }
@@ -169,11 +165,8 @@ func FuzzTest(f *testing.F) {
// now delete a layer and events that only exist in that layer should vanish // now delete a layer and events that only exist in that layer should vanish
layer := mmm.layers[rnd.Int()%len(mmm.layers)] layer := mmm.layers[rnd.Int()%len(mmm.layers)]
ch, err := layer.QueryEvents(ctx, nostr.Filter{}) eventsThatShouldVanish := make([]nostr.ID, 0, nevents/2)
require.NoError(t, err) for evt := range layer.QueryEvents(nostr.Filter{}) {
eventsThatShouldVanish := make([]string, 0, nevents/2)
for evt := range ch {
if len(evt.Tags) == 1+len(deleted[evt.ID]) { if len(evt.Tags) == 1+len(deleted[evt.ID]) {
eventsThatShouldVanish = append(eventsThatShouldVanish, evt.ID) eventsThatShouldVanish = append(eventsThatShouldVanish, evt.ID)
} }

View File

@@ -51,7 +51,7 @@ func (il *IndexingLayer) getIndexKeysForEvent(evt nostr.Event) iter.Seq[key] {
{ {
// ~ by pubkey+date // ~ by pubkey+date
k := make([]byte, 8+4) k := make([]byte, 8+4)
hex.Decode(k[0:8], []byte(evt.PubKey[0:8*2])) copy(k[0:8], evt.PubKey[0:8])
binary.BigEndian.PutUint32(k[8:8+4], uint32(evt.CreatedAt)) binary.BigEndian.PutUint32(k[8:8+4], uint32(evt.CreatedAt))
if !yield(key{dbi: il.indexPubkey, key: k[0 : 8+4]}) { if !yield(key{dbi: il.indexPubkey, key: k[0 : 8+4]}) {
return return
@@ -71,7 +71,7 @@ func (il *IndexingLayer) getIndexKeysForEvent(evt nostr.Event) iter.Seq[key] {
{ {
// ~ by pubkey+kind+date // ~ by pubkey+kind+date
k := make([]byte, 8+2+4) k := make([]byte, 8+2+4)
hex.Decode(k[0:8], []byte(evt.PubKey[0:8*2])) copy(k[0:8], evt.PubKey[0:8])
binary.BigEndian.PutUint16(k[8:8+2], uint16(evt.Kind)) binary.BigEndian.PutUint16(k[8:8+2], uint16(evt.Kind))
binary.BigEndian.PutUint32(k[8+2:8+2+4], uint32(evt.CreatedAt)) binary.BigEndian.PutUint32(k[8+2:8+2+4], uint32(evt.CreatedAt))
if !yield(key{dbi: il.indexPubkeyKind, key: k[0 : 8+2+4]}) { if !yield(key{dbi: il.indexPubkeyKind, key: k[0 : 8+2+4]}) {

View File

@@ -1,386 +0,0 @@
package mmm
import (
"context"
"encoding/binary"
"encoding/hex"
"fmt"
"os"
"testing"
"github.com/PowerDNS/lmdb-go/lmdb"
"fiatjaf.com/nostr"
"github.com/rs/zerolog"
"github.com/stretchr/testify/require"
)
func TestMultiLayerIndexing(t *testing.T) {
// Create a temporary directory for the test
tmpDir := "/tmp/eventstore-mmm-test"
os.RemoveAll(tmpDir)
logger := zerolog.New(zerolog.ConsoleWriter{Out: os.Stderr})
// initialize MMM with three layers:
// 1. odd timestamps layer
// 2. even timestamps layer
// 3. all events layer
mmm := &MultiMmapManager{
Dir: tmpDir,
Logger: &logger,
}
err := mmm.Init()
require.NoError(t, err)
defer mmm.Close()
// create layers
err = mmm.EnsureLayer("odd", &IndexingLayer{
MaxLimit: 100,
ShouldIndex: func(ctx context.Context, evt *nostr.Event) bool {
return evt.CreatedAt%2 == 1
},
})
require.NoError(t, err)
err = mmm.EnsureLayer("even", &IndexingLayer{
MaxLimit: 100,
ShouldIndex: func(ctx context.Context, evt *nostr.Event) bool {
return evt.CreatedAt%2 == 0
},
})
require.NoError(t, err)
err = mmm.EnsureLayer("all", &IndexingLayer{
MaxLimit: 100,
ShouldIndex: func(ctx context.Context, evt *nostr.Event) bool {
return true
},
})
require.NoError(t, err)
// create test events
ctx := context.Background()
baseTime := nostr.Timestamp(0)
sk := "945e01e37662430162121b804d3645a86d97df9d256917d86735d0eb219393eb"
events := make([]*nostr.Event, 10)
for i := 0; i < 10; i++ {
evt := &nostr.Event{
CreatedAt: baseTime + nostr.Timestamp(i),
Kind: 1,
Tags: nostr.Tags{},
Content: "test content",
}
evt.Sign(sk)
events[i] = evt
stored, err := mmm.StoreGlobal(ctx, evt)
require.NoError(t, err)
require.True(t, stored)
}
{
// query odd layer
oddResults, err := mmm.layers[0].QueryEvents(ctx, nostr.Filter{
Kinds: []int{1},
})
require.NoError(t, err)
oddCount := 0
for evt := range oddResults {
require.Equal(t, evt.CreatedAt%2, nostr.Timestamp(1))
oddCount++
}
require.Equal(t, 5, oddCount)
}
{
// query even layer
evenResults, err := mmm.layers[1].QueryEvents(ctx, nostr.Filter{
Kinds: []int{1},
})
require.NoError(t, err)
evenCount := 0
for evt := range evenResults {
require.Equal(t, evt.CreatedAt%2, nostr.Timestamp(0))
evenCount++
}
require.Equal(t, 5, evenCount)
}
{
// query all layer
allResults, err := mmm.layers[2].QueryEvents(ctx, nostr.Filter{
Kinds: []int{1},
})
require.NoError(t, err)
allCount := 0
for range allResults {
allCount++
}
require.Equal(t, 10, allCount)
}
// delete some events
err = mmm.layers[0].DeleteEvent(ctx, events[1]) // odd timestamp
require.NoError(t, err)
err = mmm.layers[1].DeleteEvent(ctx, events[2]) // even timestamp
// verify deletions
{
oddResults, err := mmm.layers[0].QueryEvents(ctx, nostr.Filter{
Kinds: []int{1},
})
require.NoError(t, err)
oddCount := 0
for range oddResults {
oddCount++
}
require.Equal(t, 4, oddCount)
}
{
evenResults, err := mmm.layers[1].QueryEvents(ctx, nostr.Filter{
Kinds: []int{1},
})
require.NoError(t, err)
evenCount := 0
for range evenResults {
evenCount++
}
require.Equal(t, 4, evenCount)
}
{
allResults, err := mmm.layers[2].QueryEvents(ctx, nostr.Filter{
Kinds: []int{1},
})
require.NoError(t, err)
allCount := 0
for range allResults {
allCount++
}
require.Equal(t, 10, allCount)
}
// save events directly to layers regardless of timestamp
{
oddEvent := &nostr.Event{
CreatedAt: baseTime + 100, // even timestamp
Kind: 1,
Content: "forced odd",
}
oddEvent.Sign(sk)
err = mmm.layers[0].SaveEvent(ctx, oddEvent) // save even timestamp to odd layer
require.NoError(t, err)
// it is added to the odd il
oddResults, err := mmm.layers[0].QueryEvents(ctx, nostr.Filter{
Kinds: []int{1},
})
require.NoError(t, err)
oddCount := 0
for range oddResults {
oddCount++
}
require.Equal(t, 5, oddCount)
// it doesn't affect the event il
evenResults, err := mmm.layers[1].QueryEvents(ctx, nostr.Filter{
Kinds: []int{1},
})
require.NoError(t, err)
evenCount := 0
for range evenResults {
evenCount++
}
require.Equal(t, 4, evenCount)
}
// test replaceable events
for _, layer := range mmm.layers {
replaceable := &nostr.Event{
CreatedAt: baseTime + 0,
Kind: 0,
Content: fmt.Sprintf("first"),
}
replaceable.Sign(sk)
err := layer.ReplaceEvent(ctx, replaceable)
require.NoError(t, err)
}
// replace events alternating between layers
for i := range mmm.layers {
content := fmt.Sprintf("last %d", i)
newEvt := &nostr.Event{
CreatedAt: baseTime + 1000,
Kind: 0,
Content: content,
}
newEvt.Sign(sk)
layer := mmm.layers[i]
err = layer.ReplaceEvent(ctx, newEvt)
require.NoError(t, err)
// verify replacement in the layer that did it
results, err := layer.QueryEvents(ctx, nostr.Filter{
Kinds: []int{0},
})
require.NoError(t, err)
count := 0
for evt := range results {
require.Equal(t, content, evt.Content)
count++
}
require.Equal(t, 1, count)
// verify other layers still have the old version
for j := 0; j < 3; j++ {
if mmm.layers[j] == layer {
continue
}
results, err := mmm.layers[j].QueryEvents(ctx, nostr.Filter{
Kinds: []int{0},
})
require.NoError(t, err)
count := 0
for evt := range results {
if i < j {
require.Equal(t, "first", evt.Content)
} else {
require.Equal(t, evt.Content, fmt.Sprintf("last %d", j))
}
count++
}
require.Equal(t, 1, count, "%d/%d", i, j)
}
}
}
func TestLayerReferenceTracking(t *testing.T) {
// Create a temporary directory for the test
tmpDir, err := os.MkdirTemp("", "mmm-test-*")
require.NoError(t, err)
defer os.RemoveAll(tmpDir)
logger := zerolog.New(zerolog.ConsoleWriter{Out: os.Stderr})
// initialize MMM with three layers
mmm := &MultiMmapManager{
Dir: tmpDir,
Logger: &logger,
}
err = mmm.Init()
require.NoError(t, err)
defer mmm.Close()
// create three layers
err = mmm.EnsureLayer("layer1", &IndexingLayer{
MaxLimit: 100,
ShouldIndex: func(ctx context.Context, evt *nostr.Event) bool { return true },
})
require.NoError(t, err)
err = mmm.EnsureLayer("layer2", &IndexingLayer{
MaxLimit: 100,
ShouldIndex: func(ctx context.Context, evt *nostr.Event) bool { return true },
})
require.NoError(t, err)
err = mmm.EnsureLayer("layer3", &IndexingLayer{
MaxLimit: 100,
ShouldIndex: func(ctx context.Context, evt *nostr.Event) bool { return true },
})
require.NoError(t, err)
err = mmm.EnsureLayer("layer4", &IndexingLayer{
MaxLimit: 100,
ShouldIndex: func(ctx context.Context, evt *nostr.Event) bool { return true },
})
require.NoError(t, err)
// create test events
ctx := context.Background()
sk := "945e01e37662430162121b804d3645a86d97df9d256917d86735d0eb219393eb"
evt1 := &nostr.Event{
CreatedAt: 1000,
Kind: 1,
Tags: nostr.Tags{},
Content: "event 1",
}
evt1.Sign(sk)
evt2 := &nostr.Event{
CreatedAt: 2000,
Kind: 1,
Tags: nostr.Tags{},
Content: "event 2",
}
evt2.Sign(sk)
// save evt1 to layer1
err = mmm.layers[0].SaveEvent(ctx, evt1)
require.NoError(t, err)
// save evt1 to layer2
err = mmm.layers[1].SaveEvent(ctx, evt1)
require.NoError(t, err)
// save evt1 to layer4
err = mmm.layers[0].SaveEvent(ctx, evt1)
require.NoError(t, err)
// delete evt1 from layer1
err = mmm.layers[0].DeleteEvent(ctx, evt1)
require.NoError(t, err)
// save evt2 to layer3
err = mmm.layers[2].SaveEvent(ctx, evt2)
require.NoError(t, err)
// save evt2 to layer4
err = mmm.layers[3].SaveEvent(ctx, evt2)
require.NoError(t, err)
// save evt2 to layer3 again
err = mmm.layers[2].SaveEvent(ctx, evt2)
require.NoError(t, err)
// delete evt1 from layer4
err = mmm.layers[3].DeleteEvent(ctx, evt1)
require.NoError(t, err)
// verify the state of the indexId database
err = mmm.lmdbEnv.View(func(txn *lmdb.Txn) error {
cursor, err := txn.OpenCursor(mmm.indexId)
if err != nil {
return err
}
defer cursor.Close()
count := 0
for k, v, err := cursor.Get(nil, nil, lmdb.First); err == nil; k, v, err = cursor.Get(nil, nil, lmdb.Next) {
count++
if hex.EncodeToString(k) == evt1.ID[:16] {
// evt1 should only reference layer2
require.Equal(t, 14, len(v), "evt1 should have one layer reference")
layerRef := binary.BigEndian.Uint16(v[12:14])
require.Equal(t, mmm.layers[1].id, layerRef, "evt1 should reference layer2")
} else if hex.EncodeToString(k) == evt2.ID[:16] {
// evt2 should references to layer3 and layer4
require.Equal(t, 16, len(v), "evt2 should have two layer references")
layer3Ref := binary.BigEndian.Uint16(v[12:14])
require.Equal(t, mmm.layers[2].id, layer3Ref, "evt2 should reference layer3")
layer4Ref := binary.BigEndian.Uint16(v[14:16])
require.Equal(t, mmm.layers[3].id, layer4Ref, "evt2 should reference layer4")
} else {
t.Errorf("unexpected event in indexId: %x", k)
}
}
require.Equal(t, 2, count, "should have exactly two events in indexId")
return nil
})
require.NoError(t, err)
}

View File

@@ -145,29 +145,19 @@ pubkeyMatching:
if len(filter.Kinds) == 0 { if len(filter.Kinds) == 0 {
// will use pubkey index // will use pubkey index
queries = make([]query, len(filter.Authors)) queries = make([]query, len(filter.Authors))
for i, pubkeyHex := range filter.Authors { for i, pk := range filter.Authors {
if len(pubkeyHex) != 64 {
return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid author '%s'", pubkeyHex)
}
prefix := make([]byte, 8) prefix := make([]byte, 8)
if _, err := hex.Decode(prefix[0:8], []byte(pubkeyHex[0:8*2])); err != nil { copy(prefix[0:8], pk[0:8])
return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid author '%s'", pubkeyHex)
}
queries[i] = query{i: i, dbi: il.indexPubkey, prefix: prefix[0:8], keySize: 8 + 4, timestampSize: 4} queries[i] = query{i: i, dbi: il.indexPubkey, prefix: prefix[0:8], keySize: 8 + 4, timestampSize: 4}
} }
} else { } else {
// will use pubkeyKind index // will use pubkeyKind index
queries = make([]query, len(filter.Authors)*len(filter.Kinds)) queries = make([]query, len(filter.Authors)*len(filter.Kinds))
i := 0 i := 0
for _, pubkeyHex := range filter.Authors { for _, pk := range filter.Authors {
for _, kind := range filter.Kinds { for _, kind := range filter.Kinds {
if len(pubkeyHex) != 64 {
return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid author '%s'", pubkeyHex)
}
prefix := make([]byte, 8+2) prefix := make([]byte, 8+2)
if _, err := hex.Decode(prefix[0:8], []byte(pubkeyHex[0:8*2])); err != nil { copy(prefix[0:8], pk[0:8])
return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid author '%s'", pubkeyHex)
}
binary.BigEndian.PutUint16(prefix[8:8+2], uint16(kind)) binary.BigEndian.PutUint16(prefix[8:8+2], uint16(kind))
queries[i] = query{i: i, dbi: il.indexPubkeyKind, prefix: prefix[0 : 8+2], keySize: 10 + 4, timestampSize: 4} queries[i] = query{i: i, dbi: il.indexPubkeyKind, prefix: prefix[0 : 8+2], keySize: 10 + 4, timestampSize: 4}
i++ i++

View File

@@ -5,12 +5,12 @@ import (
"cmp" "cmp"
"fmt" "fmt"
"iter" "iter"
"slices"
"sync" "sync"
"fiatjaf.com/nostr" "fiatjaf.com/nostr"
"fiatjaf.com/nostr/eventstore" "fiatjaf.com/nostr/eventstore"
"fiatjaf.com/nostr/eventstore/internal" "fiatjaf.com/nostr/eventstore/internal"
"golang.org/x/exp/slices"
) )
var _ eventstore.Store = (*SliceStore)(nil) var _ eventstore.Store = (*SliceStore)(nil)
@@ -80,6 +80,13 @@ func (b *SliceStore) CountEvents(filter nostr.Filter) (uint32, error) {
} }
func (b *SliceStore) SaveEvent(evt nostr.Event) error { func (b *SliceStore) SaveEvent(evt nostr.Event) error {
b.Lock()
defer b.Unlock()
return b.save(evt)
}
func (b *SliceStore) save(evt nostr.Event) error {
idx, found := slices.BinarySearchFunc(b.internal, evt, eventComparator) idx, found := slices.BinarySearchFunc(b.internal, evt, eventComparator)
if found { if found {
return eventstore.ErrDupEvent return eventstore.ErrDupEvent
@@ -93,8 +100,22 @@ func (b *SliceStore) SaveEvent(evt nostr.Event) error {
} }
func (b *SliceStore) DeleteEvent(id nostr.ID) error { func (b *SliceStore) DeleteEvent(id nostr.ID) error {
idx, found := slices.BinarySearchFunc(b.internal, id, eventIDComparator) b.Lock()
if !found { defer b.Unlock()
return b.delete(id)
}
func (b *SliceStore) delete(id nostr.ID) error {
var idx int = -1
for i, event := range b.internal {
if event.ID == id {
idx = i
break
}
}
if idx == -1 {
// we don't have this event // we don't have this event
return nil return nil
} }
@@ -117,7 +138,7 @@ func (b *SliceStore) ReplaceEvent(evt nostr.Event) error {
shouldStore := true shouldStore := true
for previous := range b.QueryEvents(filter) { for previous := range b.QueryEvents(filter) {
if internal.IsOlder(previous, evt) { if internal.IsOlder(previous, evt) {
if err := b.DeleteEvent(previous.ID); err != nil { if err := b.delete(previous.ID); err != nil {
return fmt.Errorf("failed to delete event for replacing: %w", err) return fmt.Errorf("failed to delete event for replacing: %w", err)
} }
} else { } else {
@@ -126,7 +147,7 @@ func (b *SliceStore) ReplaceEvent(evt nostr.Event) error {
} }
if shouldStore { if shouldStore {
if err := b.SaveEvent(evt); err != nil && err != eventstore.ErrDupEvent { if err := b.save(evt); err != nil && err != eventstore.ErrDupEvent {
return fmt.Errorf("failed to save: %w", err) return fmt.Errorf("failed to save: %w", err)
} }
} }
@@ -135,17 +156,13 @@ func (b *SliceStore) ReplaceEvent(evt nostr.Event) error {
} }
func eventTimestampComparator(e nostr.Event, t nostr.Timestamp) int { func eventTimestampComparator(e nostr.Event, t nostr.Timestamp) int {
return int(t) - int(e.CreatedAt) return cmp.Compare(t, e.CreatedAt)
}
func eventIDComparator(e nostr.Event, i nostr.ID) int {
return bytes.Compare(i[:], e.ID[:])
} }
func eventComparator(a nostr.Event, b nostr.Event) int { func eventComparator(a nostr.Event, b nostr.Event) int {
c := cmp.Compare(b.CreatedAt, a.CreatedAt) v := cmp.Compare(b.CreatedAt, a.CreatedAt)
if c != 0 { if v == 0 {
return c v = bytes.Compare(b.ID[:], a.ID[:])
} }
return bytes.Compare(b.ID[:], a.ID[:]) return v
} }

View File

@@ -4,6 +4,7 @@ import (
"testing" "testing"
"fiatjaf.com/nostr" "fiatjaf.com/nostr"
"github.com/stretchr/testify/require"
) )
func TestBasicStuff(t *testing.T) { func TestBasicStuff(t *testing.T) {
@@ -20,17 +21,17 @@ func TestBasicStuff(t *testing.T) {
if i%3 == 0 { if i%3 == 0 {
kind = 12 kind = 12
} }
ss.SaveEvent(nostr.Event{CreatedAt: nostr.Timestamp(v), Kind: uint16(kind)}) evt := nostr.Event{CreatedAt: nostr.Timestamp(v), Kind: uint16(kind)}
evt.Sign(nostr.Generate())
ss.SaveEvent(evt)
} }
list := make([]nostr.Event, 0, 20) list := make([]nostr.Event, 0, 20)
for event := range ss.QueryEvents(nostr.Filter{}) { for event := range ss.QueryEvents(nostr.Filter{}) {
list = append(list, event) list = append(list, event)
} }
require.Len(t, list, 20)
if len(list) != 20 {
t.Fatalf("failed to load 20 events")
}
if list[0].CreatedAt != 10018 || list[1].CreatedAt != 10016 || list[18].CreatedAt != 3 || list[19].CreatedAt != 1 { if list[0].CreatedAt != 10018 || list[1].CreatedAt != 10016 || list[18].CreatedAt != 3 || list[19].CreatedAt != 1 {
t.Fatalf("order is incorrect") t.Fatalf("order is incorrect")
} }
@@ -49,7 +50,5 @@ func TestBasicStuff(t *testing.T) {
for event := range ss.QueryEvents(nostr.Filter{Since: &since}) { for event := range ss.QueryEvents(nostr.Filter{Since: &since}) {
list = append(list, event) list = append(list, event)
} }
if len(list) != 5 { require.Len(t, list, 5)
t.Fatalf("should have gotten 5, not %d", len(list))
}
} }

View File

@@ -43,10 +43,8 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
} }
// query // query
w := eventstore.RelayWrapper{Store: db}
{ {
results, err := w.QuerySync(ctx, nostr.Filter{}) results := slices.Collect(db.QueryEvents(nostr.Filter{}))
require.NoError(t, err)
require.Len(t, results, len(allEvents)) require.Len(t, results, len(allEvents))
require.ElementsMatch(t, require.ElementsMatch(t,
allEvents, allEvents,
@@ -57,8 +55,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
{ {
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
since := nostr.Timestamp(i*10 + 1) since := nostr.Timestamp(i*10 + 1)
results, err := w.QuerySync(ctx, nostr.Filter{Since: &since}) results := slices.Collect(db.QueryEvents(nostr.Filter{Since: &since}))
require.NoError(t, err)
require.ElementsMatch(t, require.ElementsMatch(t,
allEvents[i:], allEvents[i:],
results, results,
@@ -67,8 +64,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
} }
{ {
results, err := w.QuerySync(ctx, nostr.Filter{IDs: []nostr.ID{allEvents[7].ID, allEvents[9].ID}}) results := slices.Collect(db.QueryEvents(nostr.Filter{IDs: []nostr.ID{allEvents[7].ID, allEvents[9].ID}}))
require.NoError(t, err)
require.Len(t, results, 2) require.Len(t, results, 2)
require.ElementsMatch(t, require.ElementsMatch(t,
[]nostr.Event{allEvents[7], allEvents[9]}, []nostr.Event{allEvents[7], allEvents[9]},
@@ -77,8 +73,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
} }
{ {
results, err := w.QuerySync(ctx, nostr.Filter{Kinds: []uint16{1}}) results := slices.Collect(db.QueryEvents(nostr.Filter{Kinds: []uint16{1}}))
require.NoError(t, err)
require.ElementsMatch(t, require.ElementsMatch(t,
[]nostr.Event{allEvents[1], allEvents[3], allEvents[5], allEvents[7], allEvents[9]}, []nostr.Event{allEvents[1], allEvents[3], allEvents[5], allEvents[7], allEvents[9]},
results, results,
@@ -86,8 +81,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
} }
{ {
results, err := w.QuerySync(ctx, nostr.Filter{Kinds: []uint16{9}}) results := slices.Collect(db.QueryEvents(nostr.Filter{Kinds: []uint16{9}}))
require.NoError(t, err)
require.ElementsMatch(t, require.ElementsMatch(t,
[]nostr.Event{allEvents[0], allEvents[2], allEvents[4], allEvents[6], allEvents[8]}, []nostr.Event{allEvents[0], allEvents[2], allEvents[4], allEvents[6], allEvents[8]},
results, results,
@@ -96,8 +90,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
{ {
pk4 := nostr.GetPublicKey(sk4) pk4 := nostr.GetPublicKey(sk4)
results, err := w.QuerySync(ctx, nostr.Filter{Authors: []nostr.PubKey{pk4}}) results := slices.Collect(db.QueryEvents(nostr.Filter{Authors: []nostr.PubKey{pk4}}))
require.NoError(t, err)
require.ElementsMatch(t, require.ElementsMatch(t,
[]nostr.Event{allEvents[0], allEvents[3], allEvents[6], allEvents[9]}, []nostr.Event{allEvents[0], allEvents[3], allEvents[6], allEvents[9]},
results, results,
@@ -106,8 +99,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
{ {
pk3 := nostr.GetPublicKey(sk3) pk3 := nostr.GetPublicKey(sk3)
results, err := w.QuerySync(ctx, nostr.Filter{Kinds: []uint16{9}, Authors: []nostr.PubKey{pk3}}) results := slices.Collect(db.QueryEvents(nostr.Filter{Kinds: []uint16{9}, Authors: []nostr.PubKey{pk3}}))
require.NoError(t, err)
require.ElementsMatch(t, require.ElementsMatch(t,
[]nostr.Event{allEvents[2], allEvents[4], allEvents[8]}, []nostr.Event{allEvents[2], allEvents[4], allEvents[8]},
results, results,
@@ -117,9 +109,8 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
{ {
pk3 := nostr.GetPublicKey(sk3) pk3 := nostr.GetPublicKey(sk3)
pk4 := nostr.GetPublicKey(sk4) pk4 := nostr.GetPublicKey(sk4)
pk4[1] = 'a' pk4[1] = 9 // this is so it doesn't match
results, err := w.QuerySync(ctx, nostr.Filter{Kinds: []uint16{9, 5, 7}, Authors: []nostr.PubKey{pk3, pk4}}) results := slices.Collect(db.QueryEvents(nostr.Filter{Kinds: []uint16{9, 5, 7}, Authors: []nostr.PubKey{pk3, pk4}}))
require.NoError(t, err)
require.ElementsMatch(t, require.ElementsMatch(t,
[]nostr.Event{allEvents[0], allEvents[2], allEvents[4], allEvents[6], allEvents[8]}, []nostr.Event{allEvents[0], allEvents[2], allEvents[4], allEvents[6], allEvents[8]},
results, results,
@@ -127,8 +118,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
} }
{ {
results, err := w.QuerySync(ctx, nostr.Filter{Tags: nostr.TagMap{"t": []string{"2", "4", "6"}}}) results := slices.Collect(db.QueryEvents(nostr.Filter{Tags: nostr.TagMap{"t": []string{"2", "4", "6"}}}))
require.NoError(t, err)
require.ElementsMatch(t, require.ElementsMatch(t,
[]nostr.Event{allEvents[2], allEvents[4], allEvents[6]}, []nostr.Event{allEvents[2], allEvents[4], allEvents[6]},
results, results,
@@ -141,8 +131,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
// query again // query again
{ {
results, err := w.QuerySync(ctx, nostr.Filter{}) results := slices.Collect(db.QueryEvents(nostr.Filter{}))
require.NoError(t, err)
require.ElementsMatch(t, require.ElementsMatch(t,
slices.Concat(allEvents[0:4], allEvents[6:]), slices.Concat(allEvents[0:4], allEvents[6:]),
results, results,
@@ -150,8 +139,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
} }
{ {
results, err := w.QuerySync(ctx, nostr.Filter{Tags: nostr.TagMap{"t": []string{"2", "6"}}}) results := slices.Collect(db.QueryEvents(nostr.Filter{Tags: nostr.TagMap{"t": []string{"2", "6"}}}))
require.NoError(t, err)
require.ElementsMatch(t, require.ElementsMatch(t,
[]nostr.Event{allEvents[2], allEvents[6]}, []nostr.Event{allEvents[2], allEvents[6]},
results, results,
@@ -159,8 +147,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
} }
{ {
results, err := w.QuerySync(ctx, nostr.Filter{Tags: nostr.TagMap{"e": []string{allEvents[3].Tags[1][1]}}}) results := slices.Collect(db.QueryEvents(nostr.Filter{Tags: nostr.TagMap{"e": []string{allEvents[3].Tags[1][1]}}}))
require.NoError(t, err)
require.ElementsMatch(t, require.ElementsMatch(t,
[]nostr.Event{allEvents[3]}, []nostr.Event{allEvents[3]},
results, results,
@@ -170,8 +157,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
{ {
for i := 0; i < 4; i++ { for i := 0; i < 4; i++ {
until := nostr.Timestamp(i*10 + 1) until := nostr.Timestamp(i*10 + 1)
results, err := w.QuerySync(ctx, nostr.Filter{Until: &until}) results := slices.Collect(db.QueryEvents(nostr.Filter{Until: &until}))
require.NoError(t, err)
require.ElementsMatch(t, require.ElementsMatch(t,
allEvents[:i], allEvents[:i],
@@ -203,12 +189,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
} }
{ {
results, err := w.QuerySync(ctx, nostr.Filter{ results := slices.Collect(db.QueryEvents(nostr.Filter{Tags: nostr.TagMap{"p": []string{p}}, Kinds: []uint16{1984}, Limit: 2}))
Tags: nostr.TagMap{"p": []string{p}},
Kinds: []uint16{1984},
Limit: 2,
})
require.NoError(t, err)
require.ElementsMatch(t, require.ElementsMatch(t,
[]nostr.Event{newEvents[2], newEvents[1]}, []nostr.Event{newEvents[2], newEvents[1]},
results, results,
@@ -216,11 +197,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
} }
{ {
results, err := w.QuerySync(ctx, nostr.Filter{ results := slices.Collect(db.QueryEvents(nostr.Filter{Tags: nostr.TagMap{"p": []string{p}, "t": []string{"x"}}, Limit: 4}))
Tags: nostr.TagMap{"p": []string{p}, "t": []string{"x"}},
Limit: 4,
})
require.NoError(t, err)
require.ElementsMatch(t, require.ElementsMatch(t,
// the results won't be in canonical time order because this query is too awful, needs a kind // the results won't be in canonical time order because this query is too awful, needs a kind
[]nostr.Event{newEvents[1]}, []nostr.Event{newEvents[1]},
@@ -229,18 +206,12 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
} }
{ {
results, err := w.QuerySync(ctx, nostr.Filter{ results := slices.Collect(db.QueryEvents(nostr.Filter{Tags: nostr.TagMap{"p": []string{p, p2}}, Kinds: []uint16{1}, Limit: 4}))
Tags: nostr.TagMap{"p": []string{p, p2}},
Kinds: []uint16{1},
Limit: 4,
})
require.NoError(t, err)
for _, idx := range []int{5, 6, 7} { for _, idx := range []int{5, 6, 7} {
require.True(t, require.True(t,
slices.ContainsFunc( slices.ContainsFunc(
results, results,
func(evt *nostr.Event) bool { return evt.ID == newEvents[idx].ID }, func(evt nostr.Event) bool { return evt.ID == newEvents[idx].ID },
), ),
"'p' tag 3 query error") "'p' tag 3 query error")
} }

View File

@@ -53,10 +53,7 @@ func manyAuthorsTest(t *testing.T, db eventstore.Store) {
} }
} }
w := eventstore.RelayWrapper{Store: db} res := slices.Collect(db.QueryEvents(bigfilter))
res := slices.Collect(w.QueryEvents(bigfilter))
require.Len(t, res, limit) require.Len(t, res, limit)
require.True(t, slices.IsSortedFunc(res, nostr.CompareEventReverse)) require.True(t, slices.IsSortedFunc(res, nostr.CompareEventReverse))
slices.SortFunc(ordered, nostr.CompareEventReverse) slices.SortFunc(ordered, nostr.CompareEventReverse)

View File

@@ -43,7 +43,6 @@ func runSecondTestOn(t *testing.T, db eventstore.Store) {
require.NoError(t, err) require.NoError(t, err)
} }
w := eventstore.RelayWrapper{Store: db}
pk3 := nostr.GetPublicKey(sk3) pk3 := nostr.GetPublicKey(sk3)
pk4 := nostr.GetPublicKey(sk4) pk4 := nostr.GetPublicKey(sk4)
eTags := make([]string, 20) eTags := make([]string, 20)
@@ -69,12 +68,9 @@ func runSecondTestOn(t *testing.T, db eventstore.Store) {
t.Run("filter", func(t *testing.T) { t.Run("filter", func(t *testing.T) {
for q, filter := range filters { for q, filter := range filters {
q := q
filter := filter
label := fmt.Sprintf("filter %d: %s", q, filter) label := fmt.Sprintf("filter %d: %s", q, filter)
t.Run(fmt.Sprintf("q-%d", q), func(t *testing.T) { t.Run(fmt.Sprintf("q-%d", q), func(t *testing.T) {
results := slices.Collect(w.QueryEvents(filter)) results := slices.Collect(db.QueryEvents(filter))
require.NotEmpty(t, results, label) require.NotEmpty(t, results, label)
}) })
} }

View File

@@ -60,9 +60,7 @@ func unbalancedTest(t *testing.T, db eventstore.Store) {
} }
require.Len(t, expected, limit) require.Len(t, expected, limit)
w := eventstore.RelayWrapper{Store: db} res := slices.Collect(db.QueryEvents(bigfilter))
res := slices.Collect(w.QueryEvents(bigfilter))
require.Equal(t, limit, len(res)) require.Equal(t, limit, len(res))
require.True(t, slices.IsSortedFunc(res, nostr.CompareEventReverse)) require.True(t, slices.IsSortedFunc(res, nostr.CompareEventReverse))

View File

@@ -21,7 +21,7 @@ func Generate() SecretKey {
type SecretKey [32]byte type SecretKey [32]byte
func (sk SecretKey) String() string { return hex.EncodeToString(sk[:]) } func (sk SecretKey) String() string { return "sk::" + sk.Hex() }
func (sk SecretKey) Hex() string { return hex.EncodeToString(sk[:]) } func (sk SecretKey) Hex() string { return hex.EncodeToString(sk[:]) }
func (sk SecretKey) Public() PubKey { return GetPublicKey(sk) } func (sk SecretKey) Public() PubKey { return GetPublicKey(sk) }
@@ -53,7 +53,7 @@ var ZeroPK = [32]byte{}
type PubKey [32]byte type PubKey [32]byte
func (pk PubKey) String() string { return hex.EncodeToString(pk[:]) } func (pk PubKey) String() string { return "pk::" + pk.Hex() }
func (pk PubKey) Hex() string { return hex.EncodeToString(pk[:]) } func (pk PubKey) Hex() string { return hex.EncodeToString(pk[:]) }
func PubKeyFromHex(pkh string) (PubKey, error) { func PubKeyFromHex(pkh string) (PubKey, error) {

View File

@@ -45,7 +45,6 @@ func (rl *Relay) handleNormal(ctx context.Context, evt nostr.Event) (skipBroadca
} else { } else {
// otherwise it's a replaceable // otherwise it's a replaceable
if nil != rl.ReplaceEvent { if nil != rl.ReplaceEvent {
fmt.Print("\nREPLACING .", evt.CreatedAt, "\n\n")
if err := rl.ReplaceEvent(ctx, evt); err != nil { if err := rl.ReplaceEvent(ctx, evt); err != nil {
switch err { switch err {
case eventstore.ErrDupEvent: case eventstore.ErrDupEvent:

113
khatru/relay_fuzz_test.go Normal file
View File

@@ -0,0 +1,113 @@
package khatru
import (
"context"
"math"
"math/rand/v2"
"net/http/httptest"
"testing"
"time"
"fiatjaf.com/nostr"
"fiatjaf.com/nostr/eventstore/lmdb"
"github.com/stretchr/testify/require"
)
func FuzzReplaceableEvents(f *testing.F) {
f.Add(uint(1), uint(2))
f.Fuzz(func(t *testing.T, seed uint, nevents uint) {
if nevents == 0 {
return
}
relay := NewRelay()
store := &lmdb.LMDBBackend{Path: "/tmp/fuzz"}
store.Init()
relay.UseEventstore(store)
defer store.Close()
// start test server
server := httptest.NewServer(relay)
defer server.Close()
// create test keys
sk1 := nostr.Generate()
pk1 := nostr.GetPublicKey(sk1)
// helper to create signed events
createEvent := func(sk nostr.SecretKey, kind uint16, content string, tags nostr.Tags) nostr.Event {
pk := nostr.GetPublicKey(sk)
evt := nostr.Event{
PubKey: pk,
CreatedAt: nostr.Now(),
Kind: kind,
Tags: tags,
Content: content,
}
evt.Sign(sk)
return evt
}
url := "ws" + server.URL[4:]
client1, err := nostr.RelayConnect(context.Background(), url, nostr.RelayOptions{})
if err != nil {
t.Skip("failed to connect client1")
}
defer client1.Close()
client2, err := nostr.RelayConnect(context.Background(), url, nostr.RelayOptions{})
if err != nil {
t.Skip("failed to connect client2")
}
defer client2.Close()
t.Run("replaceable events", func(t *testing.T) {
ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second)
defer cancel()
rnd := rand.New(rand.NewPCG(uint64(seed), 0))
newest := nostr.Timestamp(0)
for range nevents {
evt := createEvent(sk1, 0, `{"name":"blblbl"}`, nil)
evt.CreatedAt = nostr.Timestamp(rnd.Int64() % math.MaxUint32)
evt.Sign(sk1)
err = client1.Publish(ctx, evt)
if err != nil {
t.Fatalf("failed to publish event: %v", err)
}
if evt.CreatedAt > newest {
newest = evt.CreatedAt
}
}
// query to verify only the newest event exists
sub, err := client2.Subscribe(ctx, nostr.Filter{
Authors: []nostr.PubKey{pk1},
Kinds: []uint16{0},
}, nostr.SubscriptionOptions{})
if err != nil {
t.Fatalf("failed to subscribe: %v", err)
}
defer sub.Unsub()
// should only get one event back (the newest one)
var receivedEvents []nostr.Event
for {
select {
case evt := <-sub.Events:
receivedEvents = append(receivedEvents, evt)
case <-sub.EndOfStoredEvents:
require.Len(t, receivedEvents, 1)
require.Equal(t, newest, receivedEvents[0].CreatedAt)
return
case <-ctx.Done():
t.Fatal("timeout waiting for events")
}
}
})
})
}

View File

@@ -217,7 +217,7 @@ func TestBasicRelayFunctionality(t *testing.T) {
receivedEvents = append(receivedEvents, evt) receivedEvents = append(receivedEvents, evt)
case <-sub.EndOfStoredEvents: case <-sub.EndOfStoredEvents:
if len(receivedEvents) != 1 { if len(receivedEvents) != 1 {
t.Errorf("expected exactly 1 event, got %d", len(receivedEvents)) t.Errorf("expected exactly 1 event, got %v", receivedEvents)
} }
if len(receivedEvents) > 0 && receivedEvents[0].Content != `{"name":"newer"}` { if len(receivedEvents) > 0 && receivedEvents[0].Content != `{"name":"newer"}` {
t.Errorf("expected newest event content, got %s", receivedEvents[0].Content) t.Errorf("expected newest event content, got %s", receivedEvents[0].Content)

View File

@@ -0,0 +1,3 @@
go test fuzz v1
uint(25)
uint(223)

View File

@@ -17,7 +17,7 @@ var ZeroID = [32]byte{}
// ID represents an event id // ID represents an event id
type ID [32]byte type ID [32]byte
func (id ID) String() string { return hex.EncodeToString(id[:]) } func (id ID) String() string { return "id::" + id.Hex() }
func (id ID) Hex() string { return hex.EncodeToString(id[:]) } func (id ID) Hex() string { return hex.EncodeToString(id[:]) }
func IDFromHex(idh string) (ID, error) { func IDFromHex(idh string) (ID, error) {