define a nostr.Kind type for event kinds, make adjustments everywhere and fix some horrible bugs with mmm, lmdb and badger querying and deleting.

This commit is contained in:
fiatjaf
2025-04-20 11:14:39 -03:00
parent 27f40c2cf2
commit 15c6093c9b
74 changed files with 689 additions and 404 deletions

View File

@@ -60,9 +60,9 @@ func (b *BadgerBackend) CountEvents(filter nostr.Filter) (uint32, error) {
return err
}
err = item.Value(func(val []byte) error {
err = item.Value(func(bin []byte) error {
evt := nostr.Event{}
if err := betterbinary.Unmarshal(val, &evt); err != nil {
if err := betterbinary.Unmarshal(bin, &evt); err != nil {
return err
}
@@ -135,15 +135,15 @@ func (b *BadgerBackend) CountEventsHLL(filter nostr.Filter, offset int) (uint32,
return err
}
err = item.Value(func(val []byte) error {
err = item.Value(func(bin []byte) error {
if extraFilter == nil {
hll.AddBytes([32]byte(val[32:64]))
hll.AddBytes(betterbinary.GetPubKey(bin))
count++
return nil
}
evt := nostr.Event{}
if err := betterbinary.Unmarshal(val, &evt); err != nil {
if err := betterbinary.Unmarshal(bin, &evt); err != nil {
return err
}
if extraFilter.Matches(evt) {

View File

@@ -12,6 +12,8 @@ import (
var serialDelete uint32 = 0
func (b *BadgerBackend) DeleteEvent(id nostr.ID) error {
fmt.Println("...", id)
deletionHappened := false
err := b.Update(func(txn *badger.Txn) error {
@@ -52,21 +54,24 @@ func (b *BadgerBackend) delete(txn *badger.Txn, id nostr.ID) (bool, error) {
var evt nostr.Event
it := txn.NewIterator(opts)
defer it.Close()
it.Seek(prefix)
if it.ValidForPrefix(prefix) {
idx = append(idx, it.Item().Key()[1+8:]...)
if err := it.Item().Value(func(val []byte) error {
return betterbinary.Unmarshal(val, &evt)
}); err != nil {
return false, fmt.Errorf("failed to unmarshal event %x to delete: %w", id[:], err)
idx = append(idx, it.Item().Key()[1+8:1+8+4]...)
item, err := txn.Get(idx)
if err == badger.ErrKeyNotFound {
// this event doesn't exist or is already deleted
return false, nil
} else if err != nil {
return false, fmt.Errorf("failed to fetch event %x to delete: %w", id[:], err)
} else {
if err := item.Value(func(bin []byte) error {
return betterbinary.Unmarshal(bin, &evt)
}); err != nil {
return false, fmt.Errorf("failed to unmarshal event %x to delete: %w", id[:], err)
}
}
}
it.Close()
// if no idx was found, end here, this event doesn't exist
if len(idx) == 1 {
return false, nil
}
// calculate all index keys we have for this event and delete them
for k := range b.getIndexKeysForEvent(evt, idx[1:]) {

View File

@@ -70,11 +70,11 @@ func FuzzQuery(f *testing.F) {
Authors: make([]nostr.PubKey, authors),
Limit: int(limit),
}
var maxKind uint16 = 1
var maxKind nostr.Kind = 1
if kinds > 0 {
filter.Kinds = make([]uint16, kinds)
filter.Kinds = make([]nostr.Kind, kinds)
for i := range filter.Kinds {
filter.Kinds[i] = uint16(kindFactor) * uint16(i)
filter.Kinds[i] = nostr.Kind(kindFactor) * nostr.Kind(i)
}
maxKind = filter.Kinds[len(filter.Kinds)-1]
}
@@ -96,7 +96,7 @@ func FuzzQuery(f *testing.F) {
CreatedAt: nostr.Timestamp(skseed)*nostr.Timestamp(timestampAuthorFactor) + nostr.Timestamp(i),
Content: fmt.Sprintf("unbalanced %d", i),
Tags: nostr.Tags{},
Kind: uint16(i) % maxKind,
Kind: nostr.Kind(i) % maxKind,
}
err := evt.Sign(sk)
require.NoError(t, err)

View File

@@ -19,7 +19,7 @@ func getTagIndexPrefix(tagValue string) ([]byte, int) {
// store value in the new special "a" tag index
k = make([]byte, 1+2+8+len(d)+4+4)
k[0] = indexTagAddrPrefix
binary.BigEndian.PutUint16(k[1:], kind)
binary.BigEndian.PutUint16(k[1:], uint16(kind))
copy(k[1+2:], pkb[0:8])
copy(k[1+2+8:], d)
offset = 1 + 2 + 8 + len(d)
@@ -137,12 +137,12 @@ func (b *BadgerBackend) getIndexKeysForEvent(evt nostr.Event, idx []byte) iter.S
}
}
func getAddrTagElements(tagValue string) (kind uint16, pkb []byte, d string) {
func getAddrTagElements(tagValue string) (kind nostr.Kind, pkb []byte, d string) {
spl := strings.Split(tagValue, ":")
if len(spl) == 3 {
if pkb, _ := hex.DecodeString(spl[1]); len(pkb) == 32 {
if kind, err := strconv.ParseUint(spl[0], 10, 16); err == nil {
return uint16(kind), pkb, spl[2]
return nostr.Kind(kind), pkb, spl[2]
}
}
}

View File

@@ -161,26 +161,26 @@ func (b *BadgerBackend) query(txn *badger.Txn, filter nostr.Filter, limit int) (
return nil, err
}
if err := item.Value(func(val []byte) error {
// fmt.Println(" event", hex.EncodeToString(val[0:4]), "kind", binary.BigEndian.Uint16(val[132:134]), "author", hex.EncodeToString(val[32:36]), "ts", nostr.Timestamp(binary.BigEndian.Uint32(val[128:132])))
if err := item.Value(func(bin []byte) error {
// fmt.Println(" event", betterbinary.GetID(bin ), "kind", betterbinary.GetKind(bin ).Num(), "author", betterbinary.GetPubKey(bin ), "ts", betterbinary.GetCreatedAt(bin ), hex.EncodeToString(it.key), it.valIdx)
// check it against pubkeys without decoding the entire thing
if extraFilter != nil && extraFilter.Authors != nil &&
!nostr.ContainsPubKey(extraFilter.Authors, nostr.PubKey(val[32:64])) {
!nostr.ContainsPubKey(extraFilter.Authors, betterbinary.GetPubKey(bin)) {
// fmt.Println(" skipped (authors)")
return nil
}
// check it against kinds without decoding the entire thing
if extraFilter != nil && extraFilter.Kinds != nil &&
!slices.Contains(extraFilter.Kinds, binary.BigEndian.Uint16(val[132:134])) {
!slices.Contains(extraFilter.Kinds, betterbinary.GetKind(bin)) {
// fmt.Println(" skipped (kinds)")
return nil
}
event := nostr.Event{}
if err := betterbinary.Unmarshal(val, &event); err != nil {
log.Printf("badger: value read error (id %x): %s\n", val[0:32], err)
if err := betterbinary.Unmarshal(bin, &event); err != nil {
log.Printf("badger: value read error (id %x): %s\n", betterbinary.GetID(bin), err)
return err
}

View File

@@ -16,8 +16,8 @@ func (b *BadgerBackend) ReplaceEvent(evt nostr.Event) error {
}
return b.Update(func(txn *badger.Txn) error {
filter := nostr.Filter{Limit: 1, Kinds: []uint16{evt.Kind}, Authors: []nostr.PubKey{evt.PubKey}}
if nostr.IsAddressableKind(evt.Kind) {
filter := nostr.Filter{Limit: 1, Kinds: []nostr.Kind{evt.Kind}, Authors: []nostr.PubKey{evt.PubKey}}
if evt.Kind.IsAddressable() {
// when addressable, add the "d" tag to the filter
filter.Tags = nostr.TagMap{"d": []string{evt.Tags.GetD()}}
}

View File

@@ -12,8 +12,8 @@ func (b *BlugeBackend) ReplaceEvent(evt nostr.Event) error {
b.Lock()
defer b.Unlock()
filter := nostr.Filter{Limit: 1, Kinds: []uint16{evt.Kind}, Authors: []nostr.PubKey{evt.PubKey}}
if nostr.IsAddressableKind(evt.Kind) {
filter := nostr.Filter{Limit: 1, Kinds: []nostr.Kind{evt.Kind}, Authors: []nostr.PubKey{evt.PubKey}}
if evt.Kind.IsReplaceable() {
filter.Tags = nostr.TagMap{"d": []string{evt.Tags.GetD()}}
}

View File

@@ -0,0 +1,23 @@
package betterbinary
import (
"encoding/binary"
"fiatjaf.com/nostr"
)
func GetKind(evtb []byte) nostr.Kind {
return nostr.Kind(binary.LittleEndian.Uint16(evtb[1:3]))
}
func GetID(evtb []byte) nostr.ID {
return nostr.ID(evtb[7:39])
}
func GetPubKey(evtb []byte) nostr.PubKey {
return nostr.PubKey(evtb[39:71])
}
func GetCreatedAt(evtb []byte) nostr.Timestamp {
return nostr.Timestamp(binary.LittleEndian.Uint32(evtb[3:7]))
}

View File

@@ -107,7 +107,7 @@ func Unmarshal(data []byte, evt *nostr.Event) (err error) {
}
}()
evt.Kind = uint16(binary.LittleEndian.Uint16(data[1:3]))
evt.Kind = nostr.Kind(binary.LittleEndian.Uint16(data[1:3]))
evt.CreatedAt = nostr.Timestamp(binary.LittleEndian.Uint32(data[3:7]))
evt.ID = nostr.ID(data[7:39])
evt.PubKey = nostr.PubKey(data[39:71])

View File

@@ -27,7 +27,3 @@ func TagMatches(evtb []byte, key string, vals []string) bool {
}
return false
}
func KindMatches(evtb []byte, kind uint16) bool {
return binary.LittleEndian.Uint16(evtb[1:3]) == kind
}

View File

@@ -53,25 +53,25 @@ func (b *LMDBBackend) CountEvents(filter nostr.Filter) (uint32, error) {
count++
} else {
// fetch actual event
val, err := txn.Get(b.rawEventStore, it.valIdx)
bin, err := txn.Get(b.rawEventStore, it.valIdx)
if err != nil {
panic(err)
}
// check it against pubkeys without decoding the entire thing
if !slices.Contains(extraAuthors, [32]byte(val[32:64])) {
if !slices.Contains(extraAuthors, betterbinary.GetPubKey(bin)) {
it.next()
continue
}
// check it against kinds without decoding the entire thing
if !slices.Contains(extraKinds, [2]byte(val[132:134])) {
if !slices.Contains(extraKinds, betterbinary.GetKind(bin)) {
it.next()
continue
}
evt := &nostr.Event{}
if err := betterbinary.Unmarshal(val, evt); err != nil {
if err := betterbinary.Unmarshal(bin, evt); err != nil {
it.next()
continue
}
@@ -139,7 +139,7 @@ func (b *LMDBBackend) CountEventsHLL(filter nostr.Filter, offset int) (uint32, *
}
// fetch actual event (we need it regardless because we need the pubkey for the hll)
val, err := txn.Get(b.rawEventStore, it.valIdx)
bin, err := txn.Get(b.rawEventStore, it.valIdx)
if err != nil {
panic(err)
}
@@ -147,16 +147,16 @@ func (b *LMDBBackend) CountEventsHLL(filter nostr.Filter, offset int) (uint32, *
if extraKinds == nil && extraTagValues == nil {
// nothing extra to check
count++
hll.AddBytes(nostr.PubKey(val[32:64]))
hll.AddBytes(betterbinary.GetPubKey(bin))
} else {
// check it against kinds without decoding the entire thing
if !slices.Contains(extraKinds, [2]byte(val[132:134])) {
if !slices.Contains(extraKinds, betterbinary.GetKind(bin)) {
it.next()
continue
}
evt := &nostr.Event{}
if err := betterbinary.Unmarshal(val, evt); err != nil {
if err := betterbinary.Unmarshal(bin, evt); err != nil {
it.next()
continue
}

View File

@@ -26,13 +26,13 @@ func (b *LMDBBackend) delete(txn *lmdb.Txn, id nostr.ID) error {
}
// if we do, get it so we can compute the indexes
buf, err := txn.Get(b.rawEventStore, idx)
bin, err := txn.Get(b.rawEventStore, idx)
if err != nil {
return fmt.Errorf("failed to get raw event %x to delete: %w", id, err)
}
var evt nostr.Event
if err := betterbinary.Unmarshal(buf, &evt); err != nil {
if err := betterbinary.Unmarshal(bin, &evt); err != nil {
return fmt.Errorf("failed to unmarshal raw event %x to delete: %w", id, err)
}

View File

@@ -49,11 +49,11 @@ func FuzzQuery(f *testing.F) {
Authors: make([]nostr.PubKey, authors),
Limit: int(limit),
}
var maxKind uint16 = 1
var maxKind nostr.Kind = 1
if kinds > 0 {
filter.Kinds = make([]uint16, kinds)
filter.Kinds = make([]nostr.Kind, kinds)
for i := range filter.Kinds {
filter.Kinds[i] = uint16(int(kindFactor) * i)
filter.Kinds[i] = nostr.Kind(int(kindFactor) * i)
}
maxKind = filter.Kinds[len(filter.Kinds)-1]
}
@@ -75,7 +75,7 @@ func FuzzQuery(f *testing.F) {
CreatedAt: nostr.Timestamp(skseed)*nostr.Timestamp(timestampAuthorFactor) + nostr.Timestamp(i),
Content: fmt.Sprintf("unbalanced %d", i),
Tags: nostr.Tags{},
Kind: uint16(i) % maxKind,
Kind: nostr.Kind(i) % maxKind,
}
err := evt.Sign(sk)
require.NoError(t, err)

View File

@@ -6,6 +6,7 @@ import (
"os"
"sync/atomic"
"fiatjaf.com/nostr"
"fiatjaf.com/nostr/eventstore"
"github.com/PowerDNS/lmdb-go/lmdb"
)
@@ -34,7 +35,7 @@ type LMDBBackend struct {
indexPTagKind lmdb.DBI
hllCache lmdb.DBI
EnableHLLCacheFor func(kind uint16) (useCache bool, skipSavingActualEvent bool)
EnableHLLCacheFor func(kind nostr.Kind) (useCache bool, skipSavingActualEvent bool)
lastId atomic.Uint32
}

View File

@@ -113,9 +113,10 @@ func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]in
if oldest.Q == q && remainingUnexhausted > 1 {
continue
}
// fmt.Println(" query", q, unsafe.Pointer(&results[q]), b.dbiName(query.dbi), hex.EncodeToString(query.prefix), len(results[q]))
// fmt.Println(" query", q, unsafe.Pointer(&results[q]), b.dbiName(query.dbi), hex.EncodeToString(query.prefix), hex.EncodeToString(query.startingPoint), len(results[q]))
it := iterators[q]
// fmt.Println(" ", q, unsafe.Pointer(iterators[q]), it.err)
pulledThisIteration := 0
for {
@@ -124,7 +125,7 @@ func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]in
len(it.key) != query.keySize ||
!bytes.HasPrefix(it.key, query.prefix) {
// either iteration has errored or we reached the end of this prefix
// fmt.Println(" reached end", it.key, query.keySize, query.prefix)
// fmt.Println(" reached end", hex.EncodeToString(it.key), query.keySize, hex.EncodeToString(query.prefix), it.err)
exhaust(q)
break
}
@@ -140,7 +141,7 @@ func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]in
}
// fetch actual event
val, err := txn.Get(b.rawEventStore, it.valIdx)
bin, err := txn.Get(b.rawEventStore, it.valIdx)
if err != nil {
log.Printf(
"lmdb: failed to get %x based on prefix %x, index key %x from raw event store: %s\n",
@@ -149,26 +150,26 @@ func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]in
}
// check it against pubkeys without decoding the entire thing
if extraAuthors != nil && !slices.Contains(extraAuthors, [32]byte(val[32:64])) {
if extraAuthors != nil && !slices.Contains(extraAuthors, betterbinary.GetPubKey(bin)) {
it.next()
continue
}
// check it against kinds without decoding the entire thing
if extraKinds != nil && !slices.Contains(extraKinds, [2]byte(val[132:134])) {
if extraKinds != nil && !slices.Contains(extraKinds, betterbinary.GetKind(bin)) {
it.next()
continue
}
// decode the entire thing
event := nostr.Event{}
if err := betterbinary.Unmarshal(val, &event); err != nil {
log.Printf("lmdb: value read error (id %x) on query prefix %x sp %x dbi %d: %s\n", val[0:32],
if err := betterbinary.Unmarshal(bin, &event); err != nil {
log.Printf("lmdb: value read error (id %x) on query prefix %x sp %x dbi %d: %s\n", betterbinary.GetID(bin),
query.prefix, query.startingPoint, query.dbi, err)
return nil, fmt.Errorf("event read error: %w", err)
}
// fmt.Println(" event", hex.EncodeToString(val[0:4]), "kind", binary.BigEndian.Uint16(val[132:134]), "author", hex.EncodeToString(val[32:36]), "ts", nostr.Timestamp(binary.BigEndian.Uint32(val[128:132])), hex.EncodeToString(it.key), it.valIdx)
// fmt.Println(" event", betterbinary.GetID(bin), "kind", betterbinary.GetKind(bin).Num(), "author", betterbinary.GetPubKey(bin), "ts", betterbinary.GetCreatedAt(bin), hex.EncodeToString(it.key), it.valIdx)
// if there is still a tag to be checked, do it now
if extraTagValues != nil && !event.Tags.ContainsAny(extraTagKey, extraTagValues) {

View File

@@ -22,8 +22,8 @@ type query struct {
func (b *LMDBBackend) prepareQueries(filter nostr.Filter) (
queries []query,
extraAuthors [][32]byte,
extraKinds [][2]byte,
extraAuthors []nostr.PubKey,
extraKinds []nostr.Kind,
extraTagKey string,
extraTagValues []string,
since uint32,
@@ -127,16 +127,16 @@ func (b *LMDBBackend) prepareQueries(filter nostr.Filter) (
// add an extra kind filter if available (only do this on plain tag index, not on ptag-kind index)
if filter.Kinds != nil {
extraKinds = make([][2]byte, len(filter.Kinds))
extraKinds = make([]nostr.Kind, len(filter.Kinds))
for i, kind := range filter.Kinds {
binary.BigEndian.PutUint16(extraKinds[i][0:2], uint16(kind))
extraKinds[i] = kind
}
}
}
// add an extra author search if possible
if filter.Authors != nil {
extraAuthors = make([][32]byte, len(filter.Authors))
extraAuthors = make([]nostr.PubKey, len(filter.Authors))
for i, pk := range filter.Authors {
extraAuthors[i] = pk
}

View File

@@ -16,8 +16,8 @@ func (b *LMDBBackend) ReplaceEvent(evt nostr.Event) error {
}
return b.lmdbEnv.Update(func(txn *lmdb.Txn) error {
filter := nostr.Filter{Limit: 1, Kinds: []uint16{evt.Kind}, Authors: []nostr.PubKey{evt.PubKey}}
if nostr.IsAddressableKind(evt.Kind) {
filter := nostr.Filter{Limit: 1, Kinds: []nostr.Kind{evt.Kind}, Authors: []nostr.PubKey{evt.PubKey}}
if evt.Kind.IsAddressable() {
// when addressable, add the "d" tag to the filter
filter.Tags = nostr.TagMap{"d": []string{evt.Tags.GetD()}}
}

View File

@@ -54,13 +54,13 @@ func (il *IndexingLayer) CountEvents(filter nostr.Filter) (uint32, error) {
bin := il.mmmm.mmapf[pos.start : pos.start+uint64(pos.size)]
// check it against pubkeys without decoding the entire thing
if extraAuthors != nil && !slices.Contains(extraAuthors, [32]byte(bin[39:71])) {
if extraAuthors != nil && !slices.Contains(extraAuthors, betterbinary.GetPubKey(bin)) {
it.next()
continue
}
// check it against kinds without decoding the entire thing
if extraKinds != nil && !slices.Contains(extraKinds, [2]byte(bin[1:3])) {
if extraKinds != nil && !slices.Contains(extraKinds, betterbinary.GetKind(bin)) {
it.next()
continue
}

View File

@@ -69,7 +69,7 @@ func FuzzTest(f *testing.F) {
evt := nostr.Event{
CreatedAt: nostr.Timestamp(i),
Kind: uint16(i), // hack to query by serial id
Kind: nostr.Kind(i), // hack to query by serial id
Tags: tags,
Content: fmt.Sprintf("test content %d", i),
}
@@ -148,13 +148,13 @@ func FuzzTest(f *testing.F) {
for _, layer := range mmm.layers {
// verify event still accessible from other layers
if slices.Contains(foundlayers, layer) {
next, stop := iter.Pull(layer.QueryEvents(nostr.Filter{Kinds: []uint16{evt.Kind}})) // hack
next, stop := iter.Pull(layer.QueryEvents(nostr.Filter{Kinds: []nostr.Kind{evt.Kind}})) // hack
_, fetched := next()
require.True(t, fetched)
stop()
} else {
// and not accessible from this layer we just deleted
next, stop := iter.Pull(layer.QueryEvents(nostr.Filter{Kinds: []uint16{evt.Kind}})) // hack
next, stop := iter.Pull(layer.QueryEvents(nostr.Filter{Kinds: []nostr.Kind{evt.Kind}})) // hack
_, fetched := next()
require.True(t, fetched)
stop()

View File

@@ -212,13 +212,13 @@ func (il *IndexingLayer) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([
bin := il.mmmm.mmapf[pos.start : pos.start+uint64(pos.size)]
// check it against pubkeys without decoding the entire thing
if extraAuthors != nil && !slices.Contains(extraAuthors, [32]byte(bin[39:71])) {
if extraAuthors != nil && !slices.Contains(extraAuthors, betterbinary.GetPubKey(bin)) {
it.next()
continue
}
// check it against kinds without decoding the entire thing
if extraKinds != nil && !slices.Contains(extraKinds, [2]byte(bin[1:3])) {
if extraKinds != nil && !slices.Contains(extraKinds, betterbinary.GetKind(bin)) {
it.next()
continue
}
@@ -231,7 +231,7 @@ func (il *IndexingLayer) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([
return nil, fmt.Errorf("event read error: %w", err)
}
// fmt.Println(" event", hex.EncodeToString(val[0:4]), "kind", binary.BigEndian.Uint16(val[132:134]), "author", hex.EncodeToString(val[32:36]), "ts", nostr.Timestamp(binary.BigEndian.Uint32(val[128:132])), hex.EncodeToString(it.key), it.valIdx)
// fmt.Println(" event", betterbinary.GetID(bin), "kind", betterbinary.GetKind(bin).Num(), "author", betterbinary.GetPubKey(bin), "ts", betterbinary.GetCreatedAt(bin), hex.EncodeToString(it.key), it.valIdx)
// if there is still a tag to be checked, do it now
if extraTagValues != nil && !event.Tags.ContainsAny(extraTagKey, extraTagValues) {

View File

@@ -22,8 +22,8 @@ type query struct {
func (il *IndexingLayer) prepareQueries(filter nostr.Filter) (
queries []query,
extraAuthors [][32]byte,
extraKinds [][2]byte,
extraAuthors []nostr.PubKey,
extraKinds []nostr.Kind,
extraTagKey string,
extraTagValues []string,
since uint32,
@@ -116,16 +116,16 @@ func (il *IndexingLayer) prepareQueries(filter nostr.Filter) (
// add an extra kind filter if available (only do this on plain tag index, not on ptag-kind index)
if filter.Kinds != nil {
extraKinds = make([][2]byte, len(filter.Kinds))
extraKinds = make([]nostr.Kind, len(filter.Kinds))
for i, kind := range filter.Kinds {
binary.BigEndian.PutUint16(extraKinds[i][0:2], uint16(kind))
extraKinds[i] = kind
}
}
}
// add an extra author search if possible
if filter.Authors != nil {
extraAuthors = make([][32]byte, len(filter.Authors))
extraAuthors = make([]nostr.PubKey, len(filter.Authors))
for i, pk := range filter.Authors {
copy(extraAuthors[i][:], pk[:])
}

View File

@@ -22,8 +22,8 @@ func (il *IndexingLayer) ReplaceEvent(evt nostr.Event) error {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
filter := nostr.Filter{Limit: 1, Kinds: []uint16{evt.Kind}, Authors: []nostr.PubKey{evt.PubKey}}
if nostr.IsAddressableKind(evt.Kind) {
filter := nostr.Filter{Limit: 1, Kinds: []nostr.Kind{evt.Kind}, Authors: []nostr.PubKey{evt.PubKey}}
if evt.Kind.IsAddressable() {
// when addressable, add the "d" tag to the filter
filter.Tags = nostr.TagMap{"d": []string{evt.Tags.GetD()}}
}

View File

@@ -130,8 +130,8 @@ func (b *SliceStore) ReplaceEvent(evt nostr.Event) error {
b.Lock()
defer b.Unlock()
filter := nostr.Filter{Limit: 1, Kinds: []uint16{evt.Kind}, Authors: []nostr.PubKey{evt.PubKey}}
if nostr.IsAddressableKind(evt.Kind) {
filter := nostr.Filter{Limit: 1, Kinds: []nostr.Kind{evt.Kind}, Authors: []nostr.PubKey{evt.PubKey}}
if evt.Kind.IsAddressable() {
filter.Tags = nostr.TagMap{"d": []string{evt.Tags.GetD()}}
}

View File

@@ -21,7 +21,7 @@ func TestBasicStuff(t *testing.T) {
if i%3 == 0 {
kind = 12
}
evt := nostr.Event{CreatedAt: nostr.Timestamp(v), Kind: uint16(kind)}
evt := nostr.Event{CreatedAt: nostr.Timestamp(v), Kind: nostr.Kind(kind)}
evt.Sign(nostr.Generate())
ss.SaveEvent(evt)
}
@@ -38,7 +38,7 @@ func TestBasicStuff(t *testing.T) {
until := nostr.Timestamp(9999)
list = make([]nostr.Event, 0, 7)
for event := range ss.QueryEvents(nostr.Filter{Limit: 15, Until: &until, Kinds: []uint16{11}}) {
for event := range ss.QueryEvents(nostr.Filter{Limit: 15, Until: &until, Kinds: []nostr.Kind{11}}) {
list = append(list, event)
}
if len(list) != 7 {

View File

@@ -52,7 +52,7 @@ func runBenchmarkOn(b *testing.B, db eventstore.Store) {
{"e", hex.EncodeToString(eTag)},
{"p", ref.Hex()},
},
Kind: uint16(i % 10),
Kind: nostr.Kind(i % 10),
}
sk := sk3
if i%3 == 0 {
@@ -63,24 +63,24 @@ func runBenchmarkOn(b *testing.B, db eventstore.Store) {
}
filters := make([]nostr.Filter, 0, 10)
filters = append(filters, nostr.Filter{Kinds: []uint16{1, 4, 8, 16}})
filters = append(filters, nostr.Filter{Kinds: []nostr.Kind{1, 4, 8, 16}})
pk3 := nostr.GetPublicKey(sk3)
filters = append(filters, nostr.Filter{Authors: []nostr.PubKey{pk3, nostr.Generate().Public()}})
filters = append(filters, nostr.Filter{Authors: []nostr.PubKey{pk3, nostr.Generate().Public()}, Kinds: []uint16{3, 4}})
filters = append(filters, nostr.Filter{Authors: []nostr.PubKey{pk3, nostr.Generate().Public()}, Kinds: []nostr.Kind{3, 4}})
filters = append(filters, nostr.Filter{})
filters = append(filters, nostr.Filter{Limit: 20})
filters = append(filters, nostr.Filter{Kinds: []uint16{8, 9}, Tags: nostr.TagMap{"p": []string{pk3.Hex()}}})
filters = append(filters, nostr.Filter{Kinds: []nostr.Kind{8, 9}, Tags: nostr.TagMap{"p": []string{pk3.Hex()}}})
pk4 := nostr.GetPublicKey(sk4)
filters = append(filters, nostr.Filter{Kinds: []uint16{8, 9}, Tags: nostr.TagMap{"p": []string{pk3.Hex(), pk4.Hex()}}})
filters = append(filters, nostr.Filter{Kinds: []uint16{8, 9}, Tags: nostr.TagMap{"p": []string{pk3.Hex(), pk4.Hex()}}})
filters = append(filters, nostr.Filter{Kinds: []nostr.Kind{8, 9}, Tags: nostr.TagMap{"p": []string{pk3.Hex(), pk4.Hex()}}})
filters = append(filters, nostr.Filter{Kinds: []nostr.Kind{8, 9}, Tags: nostr.TagMap{"p": []string{pk3.Hex(), pk4.Hex()}}})
eTags := make([]string, 20)
for i := 0; i < 20; i++ {
eTag := make([]byte, 32)
binary.BigEndian.PutUint16(eTag, uint16(i))
eTags[i] = hex.EncodeToString(eTag)
}
filters = append(filters, nostr.Filter{Kinds: []uint16{9}, Tags: nostr.TagMap{"e": eTags}})
filters = append(filters, nostr.Filter{Kinds: []uint16{5}, Tags: nostr.TagMap{"e": eTags, "t": []string{"t5"}}})
filters = append(filters, nostr.Filter{Kinds: []nostr.Kind{9}, Tags: nostr.TagMap{"e": eTags}})
filters = append(filters, nostr.Filter{Kinds: []nostr.Kind{5}, Tags: nostr.TagMap{"e": eTags, "t": []string{"t5"}}})
filters = append(filters, nostr.Filter{Tags: nostr.TagMap{"e": eTags}})
filters = append(filters, nostr.Filter{Tags: nostr.TagMap{"e": eTags}, Limit: 50})

View File

@@ -73,7 +73,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
}
{
results := slices.Collect(db.QueryEvents(nostr.Filter{Kinds: []uint16{1}}))
results := slices.Collect(db.QueryEvents(nostr.Filter{Kinds: []nostr.Kind{1}}))
require.ElementsMatch(t,
[]nostr.Event{allEvents[1], allEvents[3], allEvents[5], allEvents[7], allEvents[9]},
results,
@@ -81,7 +81,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
}
{
results := slices.Collect(db.QueryEvents(nostr.Filter{Kinds: []uint16{9}}))
results := slices.Collect(db.QueryEvents(nostr.Filter{Kinds: []nostr.Kind{9}}))
require.ElementsMatch(t,
[]nostr.Event{allEvents[0], allEvents[2], allEvents[4], allEvents[6], allEvents[8]},
results,
@@ -99,7 +99,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
{
pk3 := nostr.GetPublicKey(sk3)
results := slices.Collect(db.QueryEvents(nostr.Filter{Kinds: []uint16{9}, Authors: []nostr.PubKey{pk3}}))
results := slices.Collect(db.QueryEvents(nostr.Filter{Kinds: []nostr.Kind{9}, Authors: []nostr.PubKey{pk3}}))
require.ElementsMatch(t,
[]nostr.Event{allEvents[2], allEvents[4], allEvents[8]},
results,
@@ -109,8 +109,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
{
pk3 := nostr.GetPublicKey(sk3)
pk4 := nostr.GetPublicKey(sk4)
pk4[1] = 9 // this is so it doesn't match
results := slices.Collect(db.QueryEvents(nostr.Filter{Kinds: []uint16{9, 5, 7}, Authors: []nostr.PubKey{pk3, pk4}}))
results := slices.Collect(db.QueryEvents(nostr.Filter{Kinds: []nostr.Kind{9, 5, 7}, Authors: []nostr.PubKey{pk3, pk4}}))
require.ElementsMatch(t,
[]nostr.Event{allEvents[0], allEvents[2], allEvents[4], allEvents[6], allEvents[8]},
results,
@@ -183,17 +182,18 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
}
sk := nostr.Generate()
for _, newEvent := range newEvents {
newEvent.Sign(sk)
require.NoError(t, db.SaveEvent(newEvent))
for i := range newEvents {
newEvents[i].Sign(sk)
require.NoError(t, db.SaveEvent(newEvents[i]))
}
{
results := slices.Collect(db.QueryEvents(nostr.Filter{Tags: nostr.TagMap{"p": []string{p}}, Kinds: []uint16{1984}, Limit: 2}))
results := slices.Collect(db.QueryEvents(nostr.Filter{Tags: nostr.TagMap{"p": []string{p}}, Kinds: []nostr.Kind{1984}, Limit: 2}))
require.ElementsMatch(t,
[]nostr.Event{newEvents[2], newEvents[1]},
results,
"'p' tag 1 query error")
"'p' tag 1 query error",
)
}
{
@@ -206,7 +206,7 @@ func runFirstTestOn(t *testing.T, db eventstore.Store) {
}
{
results := slices.Collect(db.QueryEvents(nostr.Filter{Tags: nostr.TagMap{"p": []string{p, p2}}, Kinds: []uint16{1}, Limit: 4}))
results := slices.Collect(db.QueryEvents(nostr.Filter{Tags: nostr.TagMap{"p": []string{p, p2}}, Kinds: []nostr.Kind{1}, Limit: 4}))
for _, idx := range []int{5, 6, 7} {
require.True(t,
slices.ContainsFunc(

View File

@@ -17,7 +17,7 @@ func manyAuthorsTest(t *testing.T, db eventstore.Store) {
const total = 10000
const limit = 500
const authors = 1700
kinds := []uint16{6, 7, 8}
kinds := []nostr.Kind{6, 7, 8}
bigfilter := nostr.Filter{
Authors: make([]nostr.PubKey, authors),
@@ -40,7 +40,7 @@ func manyAuthorsTest(t *testing.T, db eventstore.Store) {
CreatedAt: nostr.Timestamp(i*i) / 4,
Content: fmt.Sprintf("lots of stuff %d", i),
Tags: nostr.Tags{},
Kind: uint16(i % 10),
Kind: nostr.Kind(i % 10),
}
err := evt.Sign([32]byte(sk))
require.NoError(t, err)

View File

@@ -32,7 +32,7 @@ func runSecondTestOn(t *testing.T, db eventstore.Store) {
{"e", hex.EncodeToString(eTag)},
{"p", ref.Hex()},
},
Kind: uint16(i % 10),
Kind: nostr.Kind(i % 10),
}
sk := sk3
if i%3 == 0 {
@@ -52,19 +52,20 @@ func runSecondTestOn(t *testing.T, db eventstore.Store) {
eTags[i] = hex.EncodeToString(eTag)
}
filters := make([]nostr.Filter, 0, 10)
filters = append(filters, nostr.Filter{Kinds: []uint16{1, 4, 8, 16}})
filters = append(filters, nostr.Filter{Authors: []nostr.PubKey{pk3, nostr.Generate().Public()}})
filters = append(filters, nostr.Filter{Authors: []nostr.PubKey{pk3, nostr.Generate().Public()}, Kinds: []uint16{3, 4}})
filters = append(filters, nostr.Filter{})
filters = append(filters, nostr.Filter{Limit: 20})
filters = append(filters, nostr.Filter{Kinds: []uint16{8, 9}, Tags: nostr.TagMap{"p": []string{pk3.Hex()}}})
filters = append(filters, nostr.Filter{Kinds: []uint16{8, 9}, Tags: nostr.TagMap{"p": []string{pk3.Hex(), pk4.Hex()}}})
filters = append(filters, nostr.Filter{Kinds: []uint16{8, 9}, Tags: nostr.TagMap{"p": []string{pk3.Hex(), pk4.Hex()}}})
filters = append(filters, nostr.Filter{Kinds: []uint16{9}, Tags: nostr.TagMap{"e": eTags}})
filters = append(filters, nostr.Filter{Kinds: []uint16{5}, Tags: nostr.TagMap{"e": eTags, "t": []string{"t5"}}})
filters = append(filters, nostr.Filter{Tags: nostr.TagMap{"e": eTags}})
filters = append(filters, nostr.Filter{Tags: nostr.TagMap{"e": eTags}, Limit: 50})
filters := []nostr.Filter{
{Kinds: []nostr.Kind{1, 4, 8, 16}},
{Authors: []nostr.PubKey{pk3, nostr.Generate().Public()}},
{Authors: []nostr.PubKey{pk3, nostr.Generate().Public()}, Kinds: []nostr.Kind{3, 4}},
{},
{Limit: 20},
{Kinds: []nostr.Kind{8, 9}, Tags: nostr.TagMap{"p": []string{pk3.Hex()}}},
{Kinds: []nostr.Kind{8, 9}, Tags: nostr.TagMap{"p": []string{pk3.Hex(), pk4.Hex()}}},
{Kinds: []nostr.Kind{8, 9}, Tags: nostr.TagMap{"p": []string{pk3.Hex(), pk4.Hex()}}},
{Kinds: []nostr.Kind{9}, Tags: nostr.TagMap{"e": eTags}},
{Kinds: []nostr.Kind{5}, Tags: nostr.TagMap{"e": eTags, "t": []string{"t5"}}},
{Tags: nostr.TagMap{"e": eTags}},
{Tags: nostr.TagMap{"e": eTags}, Limit: 50},
}
t.Run("filter", func(t *testing.T) {
for q, filter := range filters {

View File

@@ -15,7 +15,7 @@ type StorePublisher struct {
}
func (w StorePublisher) Publish(ctx context.Context, evt nostr.Event) error {
if nostr.IsEphemeralKind(evt.Kind) {
if evt.Kind.IsEphemeral() {
// do not store ephemeral events
return nil
}
@@ -23,7 +23,7 @@ func (w StorePublisher) Publish(ctx context.Context, evt nostr.Event) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if nostr.IsRegularKind(evt.Kind) {
if evt.Kind.IsRegular() {
// regular events are just saved directly
if err := w.SaveEvent(evt); err != nil && err != eventstore.ErrDupEvent {
return fmt.Errorf("failed to save: %w", err)

View File

@@ -44,6 +44,6 @@ func TestRelayWrapper(t *testing.T) {
}
time.Sleep(time.Millisecond * 200)
evts := slices.Collect(w.QueryEvents(nostr.Filter{Kinds: []uint16{3}}))
evts := slices.Collect(w.QueryEvents(nostr.Filter{Kinds: []nostr.Kind{3}}))
require.Len(t, evts, 1)
}