a big bundle of conversions and other changes.
This commit is contained in:
@@ -1,17 +1,16 @@
|
||||
package badger
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"log"
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
bin "fiatjaf.com/nostr/eventstore/internal/binary"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore/codec/betterbinary"
|
||||
"fiatjaf.com/nostr/nip45/hyperloglog"
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
)
|
||||
|
||||
func (b *BadgerBackend) CountEvents(ctx context.Context, filter nostr.Filter) (int64, error) {
|
||||
func (b *BadgerBackend) CountEvents(filter nostr.Filter) (int64, error) {
|
||||
var count int64 = 0
|
||||
|
||||
queries, extraFilter, since, err := prepareQueries(filter)
|
||||
@@ -62,8 +61,8 @@ func (b *BadgerBackend) CountEvents(ctx context.Context, filter nostr.Filter) (i
|
||||
}
|
||||
|
||||
err = item.Value(func(val []byte) error {
|
||||
evt := &nostr.Event{}
|
||||
if err := bin.Unmarshal(val, evt); err != nil {
|
||||
evt := nostr.Event{}
|
||||
if err := betterbinary.Unmarshal(val, &evt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -87,7 +86,7 @@ func (b *BadgerBackend) CountEvents(ctx context.Context, filter nostr.Filter) (i
|
||||
return count, err
|
||||
}
|
||||
|
||||
func (b *BadgerBackend) CountEventsHLL(ctx context.Context, filter nostr.Filter, offset int) (int64, *hyperloglog.HyperLogLog, error) {
|
||||
func (b *BadgerBackend) CountEventsHLL(filter nostr.Filter, offset int) (int64, *hyperloglog.HyperLogLog, error) {
|
||||
var count int64 = 0
|
||||
|
||||
queries, extraFilter, since, err := prepareQueries(filter)
|
||||
@@ -138,13 +137,13 @@ func (b *BadgerBackend) CountEventsHLL(ctx context.Context, filter nostr.Filter,
|
||||
|
||||
err = item.Value(func(val []byte) error {
|
||||
if extraFilter == nil {
|
||||
hll.AddBytes(val[32:64])
|
||||
hll.AddBytes([32]byte(val[32:64]))
|
||||
count++
|
||||
return nil
|
||||
}
|
||||
|
||||
evt := &nostr.Event{}
|
||||
if err := bin.Unmarshal(val, evt); err != nil {
|
||||
evt := nostr.Event{}
|
||||
if err := betterbinary.Unmarshal(val, &evt); err != nil {
|
||||
return err
|
||||
}
|
||||
if extraFilter.Matches(evt) {
|
||||
|
||||
@@ -1,22 +1,22 @@
|
||||
package badger
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore/codec/betterbinary"
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
)
|
||||
|
||||
var serialDelete uint32 = 0
|
||||
|
||||
func (b *BadgerBackend) DeleteEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
func (b *BadgerBackend) DeleteEvent(id nostr.ID) error {
|
||||
deletionHappened := false
|
||||
|
||||
err := b.Update(func(txn *badger.Txn) error {
|
||||
var err error
|
||||
deletionHappened, err = b.delete(txn, evt)
|
||||
deletionHappened, err = b.delete(txn, id)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
@@ -36,22 +36,30 @@ func (b *BadgerBackend) DeleteEvent(ctx context.Context, evt *nostr.Event) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BadgerBackend) delete(txn *badger.Txn, evt *nostr.Event) (bool, error) {
|
||||
func (b *BadgerBackend) delete(txn *badger.Txn, id nostr.ID) (bool, error) {
|
||||
idx := make([]byte, 1, 5)
|
||||
idx[0] = rawEventStorePrefix
|
||||
|
||||
// query event by id to get its idx
|
||||
idPrefix8, _ := hex.DecodeString(evt.ID[0 : 8*2])
|
||||
prefix := make([]byte, 1+8)
|
||||
prefix[0] = indexIdPrefix
|
||||
copy(prefix[1:], idPrefix8)
|
||||
copy(prefix[1:], id[0:8])
|
||||
opts := badger.IteratorOptions{
|
||||
PrefetchValues: false,
|
||||
}
|
||||
|
||||
// also grab the actual event so we can calculate its indexes
|
||||
var evt nostr.Event
|
||||
|
||||
it := txn.NewIterator(opts)
|
||||
it.Seek(prefix)
|
||||
if it.ValidForPrefix(prefix) {
|
||||
idx = append(idx, it.Item().Key()[1+8:]...)
|
||||
if err := it.Item().Value(func(val []byte) error {
|
||||
return betterbinary.Unmarshal(val, &evt)
|
||||
}); err != nil {
|
||||
return false, fmt.Errorf("failed to unmarshal event %x to delete: %w", id[:], err)
|
||||
}
|
||||
}
|
||||
it.Close()
|
||||
|
||||
|
||||
@@ -40,14 +40,13 @@ func getTagIndexPrefix(tagValue string) ([]byte, int) {
|
||||
return k, offset
|
||||
}
|
||||
|
||||
func (b *BadgerBackend) getIndexKeysForEvent(evt *nostr.Event, idx []byte) iter.Seq[[]byte] {
|
||||
func (b *BadgerBackend) getIndexKeysForEvent(evt nostr.Event, idx []byte) iter.Seq[[]byte] {
|
||||
return func(yield func([]byte) bool) {
|
||||
{
|
||||
// ~ by id
|
||||
idPrefix8, _ := hex.DecodeString(evt.ID[0 : 8*2])
|
||||
k := make([]byte, 1+8+4)
|
||||
k[0] = indexIdPrefix
|
||||
copy(k[1:], idPrefix8)
|
||||
copy(k[1:], evt.ID[0:8])
|
||||
copy(k[1+8:], idx)
|
||||
if !yield(k) {
|
||||
return
|
||||
@@ -56,10 +55,9 @@ func (b *BadgerBackend) getIndexKeysForEvent(evt *nostr.Event, idx []byte) iter.
|
||||
|
||||
{
|
||||
// ~ by pubkey+date
|
||||
pubkeyPrefix8, _ := hex.DecodeString(evt.PubKey[0 : 8*2])
|
||||
k := make([]byte, 1+8+4+4)
|
||||
k[0] = indexPubkeyPrefix
|
||||
copy(k[1:], pubkeyPrefix8)
|
||||
copy(k[1:], evt.PubKey[0:8])
|
||||
binary.BigEndian.PutUint32(k[1+8:], uint32(evt.CreatedAt))
|
||||
copy(k[1+8+4:], idx)
|
||||
if !yield(k) {
|
||||
@@ -81,10 +79,9 @@ func (b *BadgerBackend) getIndexKeysForEvent(evt *nostr.Event, idx []byte) iter.
|
||||
|
||||
{
|
||||
// ~ by pubkey+kind+date
|
||||
pubkeyPrefix8, _ := hex.DecodeString(evt.PubKey[0 : 8*2])
|
||||
k := make([]byte, 1+8+2+4+4)
|
||||
k[0] = indexPubkeyKindPrefix
|
||||
copy(k[1:], pubkeyPrefix8)
|
||||
copy(k[1:], evt.PubKey[0:8])
|
||||
binary.BigEndian.PutUint16(k[1+8:], uint16(evt.Kind))
|
||||
binary.BigEndian.PutUint32(k[1+8+2:], uint32(evt.CreatedAt))
|
||||
copy(k[1+8+2+4:], idx)
|
||||
@@ -152,7 +149,7 @@ func getAddrTagElements(tagValue string) (kind uint16, pkb []byte, d string) {
|
||||
return 0, nil, ""
|
||||
}
|
||||
|
||||
func filterMatchesTags(ef *nostr.Filter, event *nostr.Event) bool {
|
||||
func filterMatchesTags(ef nostr.Filter, event nostr.Event) bool {
|
||||
for f, v := range ef.Tags {
|
||||
if v != nil && !event.Tags.ContainsAny(f, v) {
|
||||
return false
|
||||
|
||||
@@ -5,9 +5,9 @@ import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -32,9 +32,9 @@ type BadgerBackend struct {
|
||||
BadgerOptionsModifier func(badger.Options) badger.Options
|
||||
|
||||
// Experimental
|
||||
SkipIndexingTag func(event *nostr.Event, tagName string, tagValue string) bool
|
||||
SkipIndexingTag func(event nostr.Event, tagName string, tagValue string) bool
|
||||
// Experimental
|
||||
IndexLongerTag func(event *nostr.Event, tagName string, tagValue string) bool
|
||||
IndexLongerTag func(event nostr.Event, tagName string, tagValue string) bool
|
||||
|
||||
*badger.DB
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package badger
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
)
|
||||
@@ -26,35 +25,12 @@ func (b *BadgerBackend) runMigrations() error {
|
||||
// do the migrations in increasing steps (there is no rollback)
|
||||
//
|
||||
|
||||
// the 3 first migrations go to trash because on version 3 we need to export and import all the data anyway
|
||||
if version < 3 {
|
||||
// if there is any data in the relay we will stop and notify the user,
|
||||
// otherwise we just set version to 3 and proceed
|
||||
prefix := []byte{indexIdPrefix}
|
||||
it := txn.NewIterator(badger.IteratorOptions{
|
||||
PrefetchValues: true,
|
||||
PrefetchSize: 100,
|
||||
Prefix: prefix,
|
||||
})
|
||||
defer it.Close()
|
||||
|
||||
hasAnyEntries := false
|
||||
for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
|
||||
hasAnyEntries = true
|
||||
break
|
||||
}
|
||||
|
||||
if hasAnyEntries {
|
||||
return fmt.Errorf("your database is at version %d, but in order to migrate up to version 3 you must manually export all the events and then import again: run an old version of this software, export the data, then delete the database files, run the new version, import the data back in.", version)
|
||||
}
|
||||
|
||||
b.bumpVersion(txn, 3)
|
||||
}
|
||||
|
||||
if version < 4 {
|
||||
if version < 1 {
|
||||
// ...
|
||||
}
|
||||
|
||||
// b.bumpVersion(txn, 1)
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,68 +1,54 @@
|
||||
package badger
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"iter"
|
||||
"log"
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"fiatjaf.com/nostr/eventstore/internal"
|
||||
bin "fiatjaf.com/nostr/eventstore/internal/binary"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore/codec/betterbinary"
|
||||
"fiatjaf.com/nostr/eventstore/internal"
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
var batchFilled = errors.New("batch-filled")
|
||||
|
||||
func (b *BadgerBackend) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) {
|
||||
ch := make(chan *nostr.Event)
|
||||
|
||||
if filter.Search != "" {
|
||||
close(ch)
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
// max number of events we'll return
|
||||
maxLimit := b.MaxLimit
|
||||
var limit int
|
||||
if eventstore.IsNegentropySession(ctx) {
|
||||
maxLimit = b.MaxLimitNegentropy
|
||||
limit = maxLimit
|
||||
} else {
|
||||
limit = maxLimit / 4
|
||||
}
|
||||
if filter.Limit > 0 && filter.Limit <= maxLimit {
|
||||
limit = filter.Limit
|
||||
}
|
||||
if tlimit := nostr.GetTheoreticalLimit(filter); tlimit == 0 {
|
||||
close(ch)
|
||||
return ch, nil
|
||||
} else if tlimit > 0 {
|
||||
limit = tlimit
|
||||
}
|
||||
|
||||
// fmt.Println("limit", limit)
|
||||
|
||||
go b.View(func(txn *badger.Txn) error {
|
||||
defer close(ch)
|
||||
|
||||
results, err := b.query(txn, filter, limit)
|
||||
if err != nil {
|
||||
return err
|
||||
func (b *BadgerBackend) QueryEvents(filter nostr.Filter) iter.Seq[nostr.Event] {
|
||||
return func(yield func(nostr.Event) bool) {
|
||||
if filter.Search != "" {
|
||||
return
|
||||
}
|
||||
|
||||
for _, evt := range results {
|
||||
ch <- evt.Event
|
||||
// max number of events we'll return
|
||||
limit := b.MaxLimit / 4
|
||||
if filter.Limit > 0 && filter.Limit <= b.MaxLimit {
|
||||
limit = filter.Limit
|
||||
}
|
||||
if tlimit := nostr.GetTheoreticalLimit(filter); tlimit == 0 {
|
||||
return
|
||||
} else if tlimit > 0 {
|
||||
limit = tlimit
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
// fmt.Println("limit", limit)
|
||||
b.View(func(txn *badger.Txn) error {
|
||||
results, err := b.query(txn, filter, limit)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ch, nil
|
||||
for _, evt := range results {
|
||||
if !yield(evt.Event) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BadgerBackend) query(txn *badger.Txn, filter nostr.Filter, limit int) ([]internal.IterEvent, error) {
|
||||
@@ -81,16 +67,16 @@ func (b *BadgerBackend) query(txn *badger.Txn, filter nostr.Filter, limit int) (
|
||||
// we will continue to pull from it as soon as some other iterator takes the position
|
||||
oldest := internal.IterEvent{Q: -1}
|
||||
|
||||
secondPhase := false // after we have gathered enough events we will change the way we iterate
|
||||
sndPhase := false // after we have gathered enough events we will change the way we iterate
|
||||
secondBatch := make([][]internal.IterEvent, 0, len(queries)+1)
|
||||
secondPhaseParticipants := make([]int, 0, len(queries)+1)
|
||||
sndPhaseParticipants := make([]int, 0, len(queries)+1)
|
||||
|
||||
// while merging results in the second phase we will alternate between these two lists
|
||||
// to avoid having to create new lists all the time
|
||||
var secondPhaseResultsA []internal.IterEvent
|
||||
var secondPhaseResultsB []internal.IterEvent
|
||||
var secondPhaseResultsToggle bool // this is just a dummy thing we use to keep track of the alternating
|
||||
var secondPhaseHasResultsPending bool
|
||||
var sndPhaseResultsA []internal.IterEvent
|
||||
var sndPhaseResultsB []internal.IterEvent
|
||||
var sndPhaseResultsToggle bool // this is just a dummy thing we use to keep track of the alternating
|
||||
var sndPhaseHasResultsPending bool
|
||||
|
||||
remainingUnexhausted := len(queries) // when all queries are exhausted we can finally end this thing
|
||||
batchSizePerQuery := internal.BatchSizePerNumberOfQueries(limit, remainingUnexhausted)
|
||||
@@ -180,26 +166,26 @@ func (b *BadgerBackend) query(txn *badger.Txn, filter nostr.Filter, limit int) (
|
||||
|
||||
// check it against pubkeys without decoding the entire thing
|
||||
if extraFilter != nil && extraFilter.Authors != nil &&
|
||||
!slices.Contains(extraFilter.Authors, hex.EncodeToString(val[32:64])) {
|
||||
!nostr.ContainsPubKey(extraFilter.Authors, nostr.PubKey(val[32:64])) {
|
||||
// fmt.Println(" skipped (authors)")
|
||||
return nil
|
||||
}
|
||||
|
||||
// check it against kinds without decoding the entire thing
|
||||
if extraFilter != nil && extraFilter.Kinds != nil &&
|
||||
!slices.Contains(extraFilter.Kinds, int(binary.BigEndian.Uint16(val[132:134]))) {
|
||||
!slices.Contains(extraFilter.Kinds, binary.BigEndian.Uint16(val[132:134])) {
|
||||
// fmt.Println(" skipped (kinds)")
|
||||
return nil
|
||||
}
|
||||
|
||||
event := &nostr.Event{}
|
||||
if err := bin.Unmarshal(val, event); err != nil {
|
||||
event := nostr.Event{}
|
||||
if err := betterbinary.Unmarshal(val, &event); err != nil {
|
||||
log.Printf("badger: value read error (id %x): %s\n", val[0:32], err)
|
||||
return err
|
||||
}
|
||||
|
||||
// check if this matches the other filters that were not part of the index
|
||||
if extraFilter != nil && !filterMatchesTags(extraFilter, event) {
|
||||
if extraFilter != nil && !filterMatchesTags(*extraFilter, event) {
|
||||
// fmt.Println(" skipped (filter)", extraFilter, event)
|
||||
return nil
|
||||
}
|
||||
@@ -208,18 +194,18 @@ func (b *BadgerBackend) query(txn *badger.Txn, filter nostr.Filter, limit int) (
|
||||
evt := internal.IterEvent{Event: event, Q: q}
|
||||
//
|
||||
//
|
||||
if secondPhase {
|
||||
if sndPhase {
|
||||
// do the process described below at HIWAWVRTP.
|
||||
// if we've reached here this means we've already passed the `since` check.
|
||||
// now we have to eliminate the event currently at the `since` threshold.
|
||||
nextThreshold := firstPhaseResults[len(firstPhaseResults)-2]
|
||||
if oldest.Event == nil {
|
||||
if oldest.Event.ID == nostr.ZeroID {
|
||||
// fmt.Println(" b1")
|
||||
// BRANCH WHEN WE DON'T HAVE THE OLDEST EVENT (BWWDHTOE)
|
||||
// when we don't have the oldest set, we will keep the results
|
||||
// and not change the cutting point -- it's bad, but hopefully not that bad.
|
||||
results[q] = append(results[q], evt)
|
||||
secondPhaseHasResultsPending = true
|
||||
sndPhaseHasResultsPending = true
|
||||
} else if nextThreshold.CreatedAt > oldest.CreatedAt {
|
||||
// fmt.Println(" b2", nextThreshold.CreatedAt, ">", oldest.CreatedAt)
|
||||
// one of the events we have stored is the actual next threshold
|
||||
@@ -236,7 +222,7 @@ func (b *BadgerBackend) query(txn *badger.Txn, filter nostr.Filter, limit int) (
|
||||
// finally
|
||||
// add this to the results to be merged later
|
||||
results[q] = append(results[q], evt)
|
||||
secondPhaseHasResultsPending = true
|
||||
sndPhaseHasResultsPending = true
|
||||
} else if nextThreshold.CreatedAt < evt.CreatedAt {
|
||||
// the next last event in the firstPhaseResults is the next threshold
|
||||
// fmt.Println(" b3", nextThreshold.CreatedAt, "<", oldest.CreatedAt)
|
||||
@@ -246,7 +232,7 @@ func (b *BadgerBackend) query(txn *badger.Txn, filter nostr.Filter, limit int) (
|
||||
// fmt.Println(" new since", since)
|
||||
// add this to the results to be merged later
|
||||
results[q] = append(results[q], evt)
|
||||
secondPhaseHasResultsPending = true
|
||||
sndPhaseHasResultsPending = true
|
||||
// update the oldest event
|
||||
if evt.CreatedAt < oldest.CreatedAt {
|
||||
oldest = evt
|
||||
@@ -265,7 +251,7 @@ func (b *BadgerBackend) query(txn *badger.Txn, filter nostr.Filter, limit int) (
|
||||
firstPhaseTotalPulled++
|
||||
|
||||
// update the oldest event
|
||||
if oldest.Event == nil || evt.CreatedAt < oldest.CreatedAt {
|
||||
if oldest.Event.ID == nostr.ZeroID || evt.CreatedAt < oldest.CreatedAt {
|
||||
oldest = evt
|
||||
}
|
||||
}
|
||||
@@ -295,20 +281,20 @@ func (b *BadgerBackend) query(txn *badger.Txn, filter nostr.Filter, limit int) (
|
||||
|
||||
// we will do this check if we don't accumulated the requested number of events yet
|
||||
// fmt.Println("oldest", oldest.Event, "from iter", oldest.Q)
|
||||
if secondPhase && secondPhaseHasResultsPending && (oldest.Event == nil || remainingUnexhausted == 0) {
|
||||
if sndPhase && sndPhaseHasResultsPending && (oldest.Event.ID == nostr.ZeroID || remainingUnexhausted == 0) {
|
||||
// fmt.Println("second phase aggregation!")
|
||||
// when we are in the second phase we will aggressively aggregate results on every iteration
|
||||
//
|
||||
secondBatch = secondBatch[:0]
|
||||
for s := 0; s < len(secondPhaseParticipants); s++ {
|
||||
q := secondPhaseParticipants[s]
|
||||
for s := 0; s < len(sndPhaseParticipants); s++ {
|
||||
q := sndPhaseParticipants[s]
|
||||
|
||||
if len(results[q]) > 0 {
|
||||
secondBatch = append(secondBatch, results[q])
|
||||
}
|
||||
|
||||
if exhausted[q] {
|
||||
secondPhaseParticipants = internal.SwapDelete(secondPhaseParticipants, s)
|
||||
sndPhaseParticipants = internal.SwapDelete(sndPhaseParticipants, s)
|
||||
s--
|
||||
}
|
||||
}
|
||||
@@ -316,29 +302,29 @@ func (b *BadgerBackend) query(txn *badger.Txn, filter nostr.Filter, limit int) (
|
||||
// every time we get here we will alternate between these A and B lists
|
||||
// combining everything we have into a new partial results list.
|
||||
// after we've done that we can again set the oldest.
|
||||
// fmt.Println(" xxx", secondPhaseResultsToggle)
|
||||
if secondPhaseResultsToggle {
|
||||
secondBatch = append(secondBatch, secondPhaseResultsB)
|
||||
secondPhaseResultsA = internal.MergeSortMultiple(secondBatch, limit, secondPhaseResultsA)
|
||||
oldest = secondPhaseResultsA[len(secondPhaseResultsA)-1]
|
||||
// fmt.Println(" new aggregated a", len(secondPhaseResultsB))
|
||||
// fmt.Println(" xxx", sndPhaseResultsToggle)
|
||||
if sndPhaseResultsToggle {
|
||||
secondBatch = append(secondBatch, sndPhaseResultsB)
|
||||
sndPhaseResultsA = internal.MergeSortMultiple(secondBatch, limit, sndPhaseResultsA)
|
||||
oldest = sndPhaseResultsA[len(sndPhaseResultsA)-1]
|
||||
// fmt.Println(" new aggregated a", len(sndPhaseResultsB))
|
||||
} else {
|
||||
secondBatch = append(secondBatch, secondPhaseResultsA)
|
||||
secondPhaseResultsB = internal.MergeSortMultiple(secondBatch, limit, secondPhaseResultsB)
|
||||
oldest = secondPhaseResultsB[len(secondPhaseResultsB)-1]
|
||||
// fmt.Println(" new aggregated b", len(secondPhaseResultsB))
|
||||
secondBatch = append(secondBatch, sndPhaseResultsA)
|
||||
sndPhaseResultsB = internal.MergeSortMultiple(secondBatch, limit, sndPhaseResultsB)
|
||||
oldest = sndPhaseResultsB[len(sndPhaseResultsB)-1]
|
||||
// fmt.Println(" new aggregated b", len(sndPhaseResultsB))
|
||||
}
|
||||
secondPhaseResultsToggle = !secondPhaseResultsToggle
|
||||
sndPhaseResultsToggle = !sndPhaseResultsToggle
|
||||
|
||||
since = uint32(oldest.CreatedAt)
|
||||
// fmt.Println(" new since", since)
|
||||
|
||||
// reset the `results` list so we can keep using it
|
||||
results = results[:len(queries)]
|
||||
for _, q := range secondPhaseParticipants {
|
||||
for _, q := range sndPhaseParticipants {
|
||||
results[q] = results[q][:0]
|
||||
}
|
||||
} else if !secondPhase && firstPhaseTotalPulled >= limit && remainingUnexhausted > 0 {
|
||||
} else if !sndPhase && firstPhaseTotalPulled >= limit && remainingUnexhausted > 0 {
|
||||
// fmt.Println("have enough!", firstPhaseTotalPulled, "/", limit, "remaining", remainingUnexhausted)
|
||||
|
||||
// we will exclude this oldest number as it is not relevant anymore
|
||||
@@ -382,16 +368,16 @@ func (b *BadgerBackend) query(txn *badger.Txn, filter nostr.Filter, limit int) (
|
||||
results[q] = results[q][:0]
|
||||
|
||||
// build this index of indexes with everybody who remains
|
||||
secondPhaseParticipants = append(secondPhaseParticipants, q)
|
||||
sndPhaseParticipants = append(sndPhaseParticipants, q)
|
||||
}
|
||||
|
||||
// we create these two lists and alternate between them so we don't have to create a
|
||||
// a new one every time
|
||||
secondPhaseResultsA = make([]internal.IterEvent, 0, limit*2)
|
||||
secondPhaseResultsB = make([]internal.IterEvent, 0, limit*2)
|
||||
sndPhaseResultsA = make([]internal.IterEvent, 0, limit*2)
|
||||
sndPhaseResultsB = make([]internal.IterEvent, 0, limit*2)
|
||||
|
||||
// from now on we won't run this block anymore
|
||||
secondPhase = true
|
||||
sndPhase = true
|
||||
}
|
||||
|
||||
// fmt.Println("remaining", remainingUnexhausted)
|
||||
@@ -400,27 +386,27 @@ func (b *BadgerBackend) query(txn *badger.Txn, filter nostr.Filter, limit int) (
|
||||
}
|
||||
}
|
||||
|
||||
// fmt.Println("is secondPhase?", secondPhase)
|
||||
// fmt.Println("is sndPhase?", sndPhase)
|
||||
|
||||
var combinedResults []internal.IterEvent
|
||||
|
||||
if secondPhase {
|
||||
if sndPhase {
|
||||
// fmt.Println("ending second phase")
|
||||
// when we reach this point either secondPhaseResultsA or secondPhaseResultsB will be full of stuff,
|
||||
// when we reach this point either sndPhaseResultsA or sndPhaseResultsB will be full of stuff,
|
||||
// the other will be empty
|
||||
var secondPhaseResults []internal.IterEvent
|
||||
// fmt.Println("xxx", secondPhaseResultsToggle, len(secondPhaseResultsA), len(secondPhaseResultsB))
|
||||
if secondPhaseResultsToggle {
|
||||
secondPhaseResults = secondPhaseResultsB
|
||||
combinedResults = secondPhaseResultsA[0:limit] // reuse this
|
||||
// fmt.Println(" using b", len(secondPhaseResultsA))
|
||||
var sndPhaseResults []internal.IterEvent
|
||||
// fmt.Println("xxx", sndPhaseResultsToggle, len(sndPhaseResultsA), len(sndPhaseResultsB))
|
||||
if sndPhaseResultsToggle {
|
||||
sndPhaseResults = sndPhaseResultsB
|
||||
combinedResults = sndPhaseResultsA[0:limit] // reuse this
|
||||
// fmt.Println(" using b", len(sndPhaseResultsA))
|
||||
} else {
|
||||
secondPhaseResults = secondPhaseResultsA
|
||||
combinedResults = secondPhaseResultsB[0:limit] // reuse this
|
||||
// fmt.Println(" using a", len(secondPhaseResultsA))
|
||||
sndPhaseResults = sndPhaseResultsA
|
||||
combinedResults = sndPhaseResultsB[0:limit] // reuse this
|
||||
// fmt.Println(" using a", len(sndPhaseResultsA))
|
||||
}
|
||||
|
||||
all := [][]internal.IterEvent{firstPhaseResults, secondPhaseResults}
|
||||
all := [][]internal.IterEvent{firstPhaseResults, sndPhaseResults}
|
||||
combinedResults = internal.MergeSortMultiple(all, limit, combinedResults)
|
||||
// fmt.Println("final combinedResults", len(combinedResults), cap(combinedResults), limit)
|
||||
} else {
|
||||
|
||||
@@ -1,23 +1,22 @@
|
||||
package badger
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"fiatjaf.com/nostr/eventstore/internal"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore/internal"
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
)
|
||||
|
||||
func (b *BadgerBackend) ReplaceEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
func (b *BadgerBackend) ReplaceEvent(evt nostr.Event) error {
|
||||
// sanity checking
|
||||
if evt.CreatedAt > math.MaxUint32 || evt.Kind > math.MaxUint16 {
|
||||
return fmt.Errorf("event with values out of expected boundaries")
|
||||
}
|
||||
|
||||
return b.Update(func(txn *badger.Txn) error {
|
||||
filter := nostr.Filter{Limit: 1, Kinds: []int{evt.Kind}, Authors: []string{evt.PubKey}}
|
||||
filter := nostr.Filter{Limit: 1, Kinds: []uint16{evt.Kind}, Authors: []nostr.PubKey{evt.PubKey}}
|
||||
if nostr.IsAddressableKind(evt.Kind) {
|
||||
// when addressable, add the "d" tag to the filter
|
||||
filter.Tags = nostr.TagMap{"d": []string{evt.Tags.GetD()}}
|
||||
@@ -32,7 +31,7 @@ func (b *BadgerBackend) ReplaceEvent(ctx context.Context, evt *nostr.Event) erro
|
||||
shouldStore := true
|
||||
for _, previous := range results {
|
||||
if internal.IsOlder(previous.Event, evt) {
|
||||
if _, err := b.delete(txn, previous.Event); err != nil {
|
||||
if _, err := b.delete(txn, previous.Event.ID); err != nil {
|
||||
return fmt.Errorf("failed to delete event %s for replacing: %w", previous.Event.ID, err)
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -1,18 +1,16 @@
|
||||
package badger
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
bin "fiatjaf.com/nostr/eventstore/internal/binary"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"fiatjaf.com/nostr/eventstore/codec/betterbinary"
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
)
|
||||
|
||||
func (b *BadgerBackend) SaveEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
func (b *BadgerBackend) SaveEvent(evt nostr.Event) error {
|
||||
// sanity checking
|
||||
if evt.CreatedAt > math.MaxUint32 || evt.Kind > math.MaxUint16 {
|
||||
return fmt.Errorf("event with values out of expected boundaries")
|
||||
@@ -20,10 +18,9 @@ func (b *BadgerBackend) SaveEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
|
||||
return b.Update(func(txn *badger.Txn) error {
|
||||
// query event by id to ensure we don't save duplicates
|
||||
id, _ := hex.DecodeString(evt.ID)
|
||||
prefix := make([]byte, 1+8)
|
||||
prefix[0] = indexIdPrefix
|
||||
copy(prefix[1:], id)
|
||||
copy(prefix[1:], evt.ID[0:8])
|
||||
it := txn.NewIterator(badger.IteratorOptions{})
|
||||
defer it.Close()
|
||||
it.Seek(prefix)
|
||||
@@ -36,16 +33,16 @@ func (b *BadgerBackend) SaveEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
})
|
||||
}
|
||||
|
||||
func (b *BadgerBackend) save(txn *badger.Txn, evt *nostr.Event) error {
|
||||
func (b *BadgerBackend) save(txn *badger.Txn, evt nostr.Event) error {
|
||||
// encode to binary
|
||||
bin, err := bin.Marshal(evt)
|
||||
if err != nil {
|
||||
buf := make([]byte, betterbinary.Measure(evt))
|
||||
if err := betterbinary.Marshal(evt, buf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
idx := b.Serial()
|
||||
// raw event store
|
||||
if err := txn.Set(idx, bin); err != nil {
|
||||
if err := txn.Set(idx, buf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
package bluge
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
)
|
||||
|
||||
func (b *BlugeBackend) DeleteEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
return b.writer.Delete(eventIdentifier(evt.ID))
|
||||
func (b *BlugeBackend) DeleteEvent(id nostr.ID) error {
|
||||
return b.writer.Delete(eventIdentifier(id))
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package bluge
|
||||
|
||||
import "encoding/hex"
|
||||
import (
|
||||
"fiatjaf.com/nostr"
|
||||
)
|
||||
|
||||
const (
|
||||
contentField = "c"
|
||||
@@ -9,7 +11,7 @@ const (
|
||||
pubkeyField = "p"
|
||||
)
|
||||
|
||||
type eventIdentifier string
|
||||
type eventIdentifier nostr.ID
|
||||
|
||||
const idField = "i"
|
||||
|
||||
@@ -18,6 +20,5 @@ func (id eventIdentifier) Field() string {
|
||||
}
|
||||
|
||||
func (id eventIdentifier) Term() []byte {
|
||||
v, _ := hex.DecodeString(string(id))
|
||||
return v
|
||||
return id[:]
|
||||
}
|
||||
|
||||
@@ -2,108 +2,96 @@ package bluge
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"iter"
|
||||
"strconv"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"github.com/blugelabs/bluge"
|
||||
"github.com/blugelabs/bluge/search"
|
||||
"fiatjaf.com/nostr"
|
||||
)
|
||||
|
||||
func (b *BlugeBackend) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) {
|
||||
ch := make(chan *nostr.Event)
|
||||
|
||||
if len(filter.Search) < 2 {
|
||||
close(ch)
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
reader, err := b.writer.Reader()
|
||||
if err != nil {
|
||||
close(ch)
|
||||
return nil, fmt.Errorf("unable to open reader: %w", err)
|
||||
}
|
||||
|
||||
searchQ := bluge.NewMatchQuery(filter.Search)
|
||||
searchQ.SetField(contentField)
|
||||
var q bluge.Query = searchQ
|
||||
|
||||
complicatedQuery := bluge.NewBooleanQuery().AddMust(searchQ)
|
||||
|
||||
if len(filter.Kinds) > 0 {
|
||||
eitherKind := bluge.NewBooleanQuery()
|
||||
eitherKind.SetMinShould(1)
|
||||
for _, kind := range filter.Kinds {
|
||||
kindQ := bluge.NewTermQuery(strconv.Itoa(kind))
|
||||
kindQ.SetField(kindField)
|
||||
eitherKind.AddShould(kindQ)
|
||||
func (b *BlugeBackend) QueryEvents(filter nostr.Filter) iter.Seq[nostr.Event] {
|
||||
return func(yield func(nostr.Event) bool) {
|
||||
if len(filter.Search) < 2 {
|
||||
return
|
||||
}
|
||||
complicatedQuery.AddMust(eitherKind)
|
||||
q = complicatedQuery
|
||||
}
|
||||
|
||||
if len(filter.Authors) > 0 {
|
||||
eitherPubkey := bluge.NewBooleanQuery()
|
||||
eitherPubkey.SetMinShould(1)
|
||||
for _, pubkey := range filter.Authors {
|
||||
if len(pubkey) != 64 {
|
||||
continue
|
||||
reader, err := b.writer.Reader()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
searchQ := bluge.NewMatchQuery(filter.Search)
|
||||
searchQ.SetField(contentField)
|
||||
var q bluge.Query = searchQ
|
||||
|
||||
complicatedQuery := bluge.NewBooleanQuery().AddMust(searchQ)
|
||||
|
||||
if len(filter.Kinds) > 0 {
|
||||
eitherKind := bluge.NewBooleanQuery()
|
||||
eitherKind.SetMinShould(1)
|
||||
for _, kind := range filter.Kinds {
|
||||
kindQ := bluge.NewTermQuery(strconv.Itoa(int(kind)))
|
||||
kindQ.SetField(kindField)
|
||||
eitherKind.AddShould(kindQ)
|
||||
}
|
||||
pubkeyQ := bluge.NewTermQuery(pubkey[56:])
|
||||
pubkeyQ.SetField(pubkeyField)
|
||||
eitherPubkey.AddShould(pubkeyQ)
|
||||
complicatedQuery.AddMust(eitherKind)
|
||||
q = complicatedQuery
|
||||
}
|
||||
complicatedQuery.AddMust(eitherPubkey)
|
||||
q = complicatedQuery
|
||||
}
|
||||
|
||||
if filter.Since != nil || filter.Until != nil {
|
||||
min := 0.0
|
||||
if filter.Since != nil {
|
||||
min = float64(*filter.Since)
|
||||
if len(filter.Authors) > 0 {
|
||||
eitherPubkey := bluge.NewBooleanQuery()
|
||||
eitherPubkey.SetMinShould(1)
|
||||
for _, pubkey := range filter.Authors {
|
||||
if len(pubkey) != 64 {
|
||||
continue
|
||||
}
|
||||
pubkeyQ := bluge.NewTermQuery(pubkey.Hex()[56:])
|
||||
pubkeyQ.SetField(pubkeyField)
|
||||
eitherPubkey.AddShould(pubkeyQ)
|
||||
}
|
||||
complicatedQuery.AddMust(eitherPubkey)
|
||||
q = complicatedQuery
|
||||
}
|
||||
max := float64(nostr.Now())
|
||||
if filter.Until != nil {
|
||||
max = float64(*filter.Until)
|
||||
|
||||
if filter.Since != nil || filter.Until != nil {
|
||||
min := 0.0
|
||||
if filter.Since != nil {
|
||||
min = float64(*filter.Since)
|
||||
}
|
||||
max := float64(nostr.Now())
|
||||
if filter.Until != nil {
|
||||
max = float64(*filter.Until)
|
||||
}
|
||||
dateRangeQ := bluge.NewNumericRangeInclusiveQuery(min, max, true, true)
|
||||
dateRangeQ.SetField(createdAtField)
|
||||
complicatedQuery.AddMust(dateRangeQ)
|
||||
q = complicatedQuery
|
||||
}
|
||||
dateRangeQ := bluge.NewNumericRangeInclusiveQuery(min, max, true, true)
|
||||
dateRangeQ.SetField(createdAtField)
|
||||
complicatedQuery.AddMust(dateRangeQ)
|
||||
q = complicatedQuery
|
||||
}
|
||||
|
||||
limit := 40
|
||||
if filter.Limit != 0 {
|
||||
limit = filter.Limit
|
||||
if filter.Limit > 150 {
|
||||
limit = 150
|
||||
limit := 40
|
||||
if filter.Limit != 0 {
|
||||
limit = filter.Limit
|
||||
if filter.Limit > 150 {
|
||||
limit = 150
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
req := bluge.NewTopNSearch(limit, q)
|
||||
req := bluge.NewTopNSearch(limit, q)
|
||||
|
||||
dmi, err := reader.Search(context.Background(), req)
|
||||
if err != nil {
|
||||
close(ch)
|
||||
reader.Close()
|
||||
return ch, fmt.Errorf("error executing search: %w", err)
|
||||
}
|
||||
dmi, err := reader.Search(context.Background(), req)
|
||||
if err != nil {
|
||||
reader.Close()
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer reader.Close()
|
||||
defer close(ch)
|
||||
|
||||
var next *search.DocumentMatch
|
||||
for next, err = dmi.Next(); next != nil; next, err = dmi.Next() {
|
||||
next.VisitStoredFields(func(field string, value []byte) bool {
|
||||
id := hex.EncodeToString(value)
|
||||
rawch, err := b.RawEventStore.QueryEvents(ctx, nostr.Filter{IDs: []string{id}})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
for evt := range rawch {
|
||||
ch <- evt
|
||||
for evt := range b.RawEventStore.QueryEvents(nostr.Filter{IDs: []nostr.ID{nostr.ID(value)}}) {
|
||||
yield(evt)
|
||||
}
|
||||
return false
|
||||
})
|
||||
@@ -111,7 +99,5 @@ func (b *BlugeBackend) QueryEvents(ctx context.Context, filter nostr.Filter) (ch
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
return ch, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,29 +4,24 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"fiatjaf.com/nostr/eventstore/internal"
|
||||
"fiatjaf.com/nostr"
|
||||
)
|
||||
|
||||
func (b *BlugeBackend) ReplaceEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
func (b *BlugeBackend) ReplaceEvent(ctx context.Context, evt nostr.Event) error {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
|
||||
filter := nostr.Filter{Limit: 1, Kinds: []int{evt.Kind}, Authors: []string{evt.PubKey}}
|
||||
filter := nostr.Filter{Limit: 1, Kinds: []uint16{evt.Kind}, Authors: []nostr.PubKey{evt.PubKey}}
|
||||
if nostr.IsAddressableKind(evt.Kind) {
|
||||
filter.Tags = nostr.TagMap{"d": []string{evt.Tags.GetD()}}
|
||||
}
|
||||
|
||||
ch, err := b.QueryEvents(ctx, filter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query before replacing: %w", err)
|
||||
}
|
||||
|
||||
shouldStore := true
|
||||
for previous := range ch {
|
||||
for previous := range b.QueryEvents(filter) {
|
||||
if internal.IsOlder(previous, evt) {
|
||||
if err := b.DeleteEvent(ctx, previous); err != nil {
|
||||
if err := b.DeleteEvent(previous.ID); err != nil {
|
||||
return fmt.Errorf("failed to delete event for replacing: %w", err)
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -1,23 +1,22 @@
|
||||
package bluge
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/blugelabs/bluge"
|
||||
"fiatjaf.com/nostr"
|
||||
"github.com/blugelabs/bluge"
|
||||
)
|
||||
|
||||
func (b *BlugeBackend) SaveEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
func (b *BlugeBackend) SaveEvent(evt nostr.Event) error {
|
||||
id := eventIdentifier(evt.ID)
|
||||
doc := &bluge.Document{
|
||||
bluge.NewKeywordFieldBytes(id.Field(), id.Term()).Sortable().StoreValue(),
|
||||
}
|
||||
|
||||
doc.AddField(bluge.NewTextField(contentField, evt.Content))
|
||||
doc.AddField(bluge.NewTextField(kindField, strconv.Itoa(evt.Kind)))
|
||||
doc.AddField(bluge.NewTextField(pubkeyField, evt.PubKey[56:]))
|
||||
doc.AddField(bluge.NewTextField(kindField, strconv.Itoa(int(evt.Kind))))
|
||||
doc.AddField(bluge.NewTextField(pubkeyField, evt.PubKey.Hex()[56:]))
|
||||
doc.AddField(bluge.NewNumericField(createdAtField, float64(evt.CreatedAt)))
|
||||
|
||||
if err := b.writer.Update(doc.ID(), doc); err != nil {
|
||||
|
||||
@@ -2,7 +2,6 @@ package betterbinary
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
@@ -50,9 +49,9 @@ func Marshal(evt nostr.Event, buf []byte) error {
|
||||
}
|
||||
binary.LittleEndian.PutUint32(buf[3:7], uint32(evt.CreatedAt))
|
||||
|
||||
hex.Decode(buf[7:39], []byte(evt.ID))
|
||||
hex.Decode(buf[39:71], []byte(evt.PubKey))
|
||||
hex.Decode(buf[71:135], []byte(evt.Sig))
|
||||
copy(buf[7:39], evt.ID[:])
|
||||
copy(buf[39:71], evt.PubKey[:])
|
||||
copy(buf[71:135], evt.Sig[:])
|
||||
|
||||
tagBase := 135
|
||||
// buf[135:137] (tagsSectionLength) will be set later when we know the absolute size of the tags section
|
||||
@@ -108,11 +107,11 @@ func Unmarshal(data []byte, evt *nostr.Event) (err error) {
|
||||
}
|
||||
}()
|
||||
|
||||
evt.Kind = int(binary.LittleEndian.Uint16(data[1:3]))
|
||||
evt.Kind = uint16(binary.LittleEndian.Uint16(data[1:3]))
|
||||
evt.CreatedAt = nostr.Timestamp(binary.LittleEndian.Uint32(data[3:7]))
|
||||
evt.ID = hex.EncodeToString(data[7:39])
|
||||
evt.PubKey = hex.EncodeToString(data[39:71])
|
||||
evt.Sig = hex.EncodeToString(data[71:135])
|
||||
evt.ID = nostr.ID(data[7:39])
|
||||
evt.PubKey = nostr.PubKey(data[39:71])
|
||||
evt.Sig = [64]byte(data[71:135])
|
||||
|
||||
const tagbase = 135
|
||||
tagsSectionLength := binary.LittleEndian.Uint16(data[tagbase:])
|
||||
@@ -1 +0,0 @@
|
||||
decode-binary
|
||||
@@ -1,39 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"fiatjaf.com/nostr/eventstore/internal/binary"
|
||||
"fiatjaf.com/nostr"
|
||||
)
|
||||
|
||||
func main() {
|
||||
b, err := io.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to read from stdin: %s\n", err)
|
||||
os.Exit(1)
|
||||
return
|
||||
}
|
||||
b = bytes.TrimSpace(b)
|
||||
|
||||
if bytes.HasPrefix(b, []byte("0x")) {
|
||||
fromHex := make([]byte, (len(b)-2)/2)
|
||||
_, err := hex.Decode(fromHex, b[2:])
|
||||
if err == nil {
|
||||
b = fromHex
|
||||
}
|
||||
}
|
||||
|
||||
var evt nostr.Event
|
||||
err = binary.Unmarshal(b, &evt)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to decode: %s\n", err)
|
||||
os.Exit(1)
|
||||
return
|
||||
}
|
||||
fmt.Println(evt.String())
|
||||
}
|
||||
@@ -1,103 +0,0 @@
|
||||
package binary
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
)
|
||||
|
||||
// Deprecated -- the encoding used here is not very elegant, we'll have a better binary format later.
|
||||
func Unmarshal(data []byte, evt *nostr.Event) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = fmt.Errorf("failed to decode binary for event %s from %s at %d: %v", evt.ID, evt.PubKey, evt.CreatedAt, r)
|
||||
}
|
||||
}()
|
||||
|
||||
evt.ID = hex.EncodeToString(data[0:32])
|
||||
evt.PubKey = hex.EncodeToString(data[32:64])
|
||||
evt.Sig = hex.EncodeToString(data[64:128])
|
||||
evt.CreatedAt = nostr.Timestamp(binary.BigEndian.Uint32(data[128:132]))
|
||||
evt.Kind = int(binary.BigEndian.Uint16(data[132:134]))
|
||||
contentLength := int(binary.BigEndian.Uint16(data[134:136]))
|
||||
evt.Content = string(data[136 : 136+contentLength])
|
||||
|
||||
curr := 136 + contentLength
|
||||
|
||||
nTags := binary.BigEndian.Uint16(data[curr : curr+2])
|
||||
curr++
|
||||
evt.Tags = make(nostr.Tags, nTags)
|
||||
|
||||
for t := range evt.Tags {
|
||||
curr++
|
||||
nItems := int(data[curr])
|
||||
tag := make(nostr.Tag, nItems)
|
||||
for i := range tag {
|
||||
curr = curr + 1
|
||||
itemSize := int(binary.BigEndian.Uint16(data[curr : curr+2]))
|
||||
itemStart := curr + 2
|
||||
item := string(data[itemStart : itemStart+itemSize])
|
||||
tag[i] = item
|
||||
curr = itemStart + itemSize
|
||||
}
|
||||
evt.Tags[t] = tag
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Deprecated -- the encoding used here is not very elegant, we'll have a better binary format later.
|
||||
func Marshal(evt *nostr.Event) ([]byte, error) {
|
||||
content := []byte(evt.Content)
|
||||
buf := make([]byte, 32+32+64+4+2+2+len(content)+65536+len(evt.Tags)*40 /* blergh */)
|
||||
|
||||
hex.Decode(buf[0:32], []byte(evt.ID))
|
||||
hex.Decode(buf[32:64], []byte(evt.PubKey))
|
||||
hex.Decode(buf[64:128], []byte(evt.Sig))
|
||||
|
||||
if evt.CreatedAt > MaxCreatedAt {
|
||||
return nil, fmt.Errorf("created_at is too big: %d", evt.CreatedAt)
|
||||
}
|
||||
binary.BigEndian.PutUint32(buf[128:132], uint32(evt.CreatedAt))
|
||||
|
||||
if evt.Kind > MaxKind {
|
||||
return nil, fmt.Errorf("kind is too big: %d, max is %d", evt.Kind, MaxKind)
|
||||
}
|
||||
binary.BigEndian.PutUint16(buf[132:134], uint16(evt.Kind))
|
||||
|
||||
if contentLength := len(content); contentLength > MaxContentSize {
|
||||
return nil, fmt.Errorf("content is too large: %d, max is %d", contentLength, MaxContentSize)
|
||||
} else {
|
||||
binary.BigEndian.PutUint16(buf[134:136], uint16(contentLength))
|
||||
}
|
||||
copy(buf[136:], content)
|
||||
|
||||
if tagCount := len(evt.Tags); tagCount > MaxTagCount {
|
||||
return nil, fmt.Errorf("can't encode too many tags: %d, max is %d", tagCount, MaxTagCount)
|
||||
} else {
|
||||
binary.BigEndian.PutUint16(buf[136+len(content):136+len(content)+2], uint16(tagCount))
|
||||
}
|
||||
|
||||
buf = buf[0 : 136+len(content)+2]
|
||||
|
||||
for _, tag := range evt.Tags {
|
||||
if itemCount := len(tag); itemCount > MaxTagItemCount {
|
||||
return nil, fmt.Errorf("can't encode a tag with so many items: %d, max is %d", itemCount, MaxTagItemCount)
|
||||
} else {
|
||||
buf = append(buf, uint8(itemCount))
|
||||
}
|
||||
for _, item := range tag {
|
||||
itemb := []byte(item)
|
||||
itemSize := len(itemb)
|
||||
if itemSize > MaxTagItemSize {
|
||||
return nil, fmt.Errorf("tag item is too large: %d, max is %d", itemSize, MaxTagItemSize)
|
||||
}
|
||||
buf = binary.BigEndian.AppendUint16(buf, uint16(itemSize))
|
||||
buf = append(buf, itemb...)
|
||||
buf = append(buf, 0)
|
||||
}
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
package binary
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
)
|
||||
|
||||
const (
|
||||
MaxKind = math.MaxUint16
|
||||
MaxCreatedAt = math.MaxUint32
|
||||
MaxContentSize = math.MaxUint16
|
||||
MaxTagCount = math.MaxUint16
|
||||
MaxTagItemCount = math.MaxUint8
|
||||
MaxTagItemSize = math.MaxUint16
|
||||
)
|
||||
|
||||
func EventEligibleForBinaryEncoding(event *nostr.Event) bool {
|
||||
if len(event.Content) > MaxContentSize || event.Kind > MaxKind || event.CreatedAt > MaxCreatedAt || len(event.Tags) > MaxTagCount {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, tag := range event.Tags {
|
||||
if len(tag) > MaxTagItemCount {
|
||||
return false
|
||||
}
|
||||
for _, item := range tag {
|
||||
if len(item) > MaxTagItemSize {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
@@ -4,12 +4,7 @@ import (
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"fiatjaf.com/nostr/eventstore/badger"
|
||||
"fiatjaf.com/nostr/eventstore/bluge"
|
||||
"fiatjaf.com/nostr/eventstore/edgedb"
|
||||
"fiatjaf.com/nostr/eventstore/lmdb"
|
||||
"fiatjaf.com/nostr/eventstore/mongo"
|
||||
"fiatjaf.com/nostr/eventstore/mysql"
|
||||
"fiatjaf.com/nostr/eventstore/postgresql"
|
||||
"fiatjaf.com/nostr/eventstore/sqlite3"
|
||||
"fiatjaf.com/nostr/eventstore/strfry"
|
||||
)
|
||||
|
||||
@@ -17,11 +12,6 @@ import (
|
||||
var (
|
||||
_ eventstore.Store = (*badger.BadgerBackend)(nil)
|
||||
_ eventstore.Store = (*lmdb.LMDBBackend)(nil)
|
||||
_ eventstore.Store = (*edgedb.EdgeDBBackend)(nil)
|
||||
_ eventstore.Store = (*postgresql.PostgresBackend)(nil)
|
||||
_ eventstore.Store = (*mongo.MongoDBBackend)(nil)
|
||||
_ eventstore.Store = (*sqlite3.SQLite3Backend)(nil)
|
||||
_ eventstore.Store = (*strfry.StrfryBackend)(nil)
|
||||
_ eventstore.Store = (*bluge.BlugeBackend)(nil)
|
||||
_ eventstore.Store = (*mysql.MySQLBackend)(nil)
|
||||
)
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"math"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
mergesortedslices "fiatjaf.com/lib/merge-sorted-slices"
|
||||
"fiatjaf.com/nostr"
|
||||
)
|
||||
|
||||
func IsOlder(previous, next *nostr.Event) bool {
|
||||
func IsOlder(previous, next nostr.Event) bool {
|
||||
return previous.CreatedAt < next.CreatedAt ||
|
||||
(previous.CreatedAt == next.CreatedAt && previous.ID > next.ID)
|
||||
(previous.CreatedAt == next.CreatedAt && bytes.Compare(previous.ID[:], next.ID[:]) == 1)
|
||||
}
|
||||
|
||||
func ChooseNarrowestTag(filter nostr.Filter) (key string, values []string, goodness int) {
|
||||
@@ -80,7 +80,7 @@ func CopyMapWithoutKey[K comparable, V any](originalMap map[K]V, key K) map[K]V
|
||||
}
|
||||
|
||||
type IterEvent struct {
|
||||
*nostr.Event
|
||||
nostr.Event
|
||||
Q int
|
||||
}
|
||||
|
||||
@@ -166,18 +166,18 @@ func SwapDelete[A any](arr []A, i int) []A {
|
||||
}
|
||||
|
||||
func compareIterEvent(a, b IterEvent) int {
|
||||
if a.Event == nil {
|
||||
if b.Event == nil {
|
||||
if a.Event.ID == nostr.ZeroID {
|
||||
if b.Event.ID == nostr.ZeroID {
|
||||
return 0
|
||||
} else {
|
||||
return -1
|
||||
}
|
||||
} else if b.Event == nil {
|
||||
} else if b.Event.ID == nostr.ZeroID {
|
||||
return 1
|
||||
}
|
||||
|
||||
if a.CreatedAt == b.CreatedAt {
|
||||
return strings.Compare(a.ID, b.ID)
|
||||
return slices.Compare(a.ID[:], b.ID[:])
|
||||
}
|
||||
return cmp.Compare(a.CreatedAt, b.CreatedAt)
|
||||
}
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
go test fuzz v1
|
||||
uint(256)
|
||||
uint(31)
|
||||
uint(260)
|
||||
uint(2)
|
||||
uint(69)
|
||||
uint(385)
|
||||
uint(1)
|
||||
@@ -1,8 +0,0 @@
|
||||
go test fuzz v1
|
||||
uint(267)
|
||||
uint(50)
|
||||
uint(355)
|
||||
uint(2)
|
||||
uint(69)
|
||||
uint(213)
|
||||
uint(1)
|
||||
@@ -1,8 +0,0 @@
|
||||
go test fuzz v1
|
||||
uint(280)
|
||||
uint(0)
|
||||
uint(13)
|
||||
uint(2)
|
||||
uint(2)
|
||||
uint(0)
|
||||
uint(0)
|
||||
@@ -1,8 +0,0 @@
|
||||
go test fuzz v1
|
||||
uint(259)
|
||||
uint(126)
|
||||
uint(5)
|
||||
uint(23)
|
||||
uint(0)
|
||||
uint(0)
|
||||
uint(92)
|
||||
@@ -1,8 +0,0 @@
|
||||
go test fuzz v1
|
||||
uint(201)
|
||||
uint(50)
|
||||
uint(13)
|
||||
uint(97)
|
||||
uint(0)
|
||||
uint(0)
|
||||
uint(77)
|
||||
@@ -1,8 +0,0 @@
|
||||
go test fuzz v1
|
||||
uint(164)
|
||||
uint(50)
|
||||
uint(13)
|
||||
uint(1)
|
||||
uint(2)
|
||||
uint(13)
|
||||
uint(0)
|
||||
@@ -1,8 +0,0 @@
|
||||
go test fuzz v1
|
||||
uint(200)
|
||||
uint(50)
|
||||
uint(13)
|
||||
uint(8)
|
||||
uint(2)
|
||||
uint(0)
|
||||
uint(1)
|
||||
@@ -1,8 +0,0 @@
|
||||
go test fuzz v1
|
||||
uint(200)
|
||||
uint(117)
|
||||
uint(13)
|
||||
uint(2)
|
||||
uint(2)
|
||||
uint(0)
|
||||
uint(1)
|
||||
@@ -1,8 +0,0 @@
|
||||
go test fuzz v1
|
||||
uint(200)
|
||||
uint(50)
|
||||
uint(13)
|
||||
uint(2)
|
||||
uint(2)
|
||||
uint(0)
|
||||
uint(0)
|
||||
@@ -2,19 +2,18 @@ package lmdb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
bin "fiatjaf.com/nostr/eventstore/internal/binary"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore/codec/betterbinary"
|
||||
"fiatjaf.com/nostr/nip45"
|
||||
"fiatjaf.com/nostr/nip45/hyperloglog"
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
func (b *LMDBBackend) CountEvents(ctx context.Context, filter nostr.Filter) (int64, error) {
|
||||
func (b *LMDBBackend) CountEvents(filter nostr.Filter) (int64, error) {
|
||||
var count int64 = 0
|
||||
|
||||
queries, extraAuthors, extraKinds, extraTagKey, extraTagValues, since, err := b.prepareQueries(filter)
|
||||
@@ -72,7 +71,7 @@ func (b *LMDBBackend) CountEvents(ctx context.Context, filter nostr.Filter) (int
|
||||
}
|
||||
|
||||
evt := &nostr.Event{}
|
||||
if err := bin.Unmarshal(val, evt); err != nil {
|
||||
if err := betterbinary.Unmarshal(val, evt); err != nil {
|
||||
it.next()
|
||||
continue
|
||||
}
|
||||
@@ -94,8 +93,9 @@ func (b *LMDBBackend) CountEvents(ctx context.Context, filter nostr.Filter) (int
|
||||
return count, err
|
||||
}
|
||||
|
||||
// CountEventsHLL is like CountEvents, but it will build a hyperloglog value while iterating through results, following NIP-45
|
||||
func (b *LMDBBackend) CountEventsHLL(ctx context.Context, filter nostr.Filter, offset int) (int64, *hyperloglog.HyperLogLog, error) {
|
||||
// CountEventsHLL is like CountEvents, but it will build a hyperloglog value while iterating through results,
|
||||
// following NIP-45
|
||||
func (b *LMDBBackend) CountEventsHLL(filter nostr.Filter, offset int) (int64, *hyperloglog.HyperLogLog, error) {
|
||||
if useCache, _ := b.EnableHLLCacheFor(filter.Kinds[0]); useCache {
|
||||
return b.countEventsHLLCached(filter)
|
||||
}
|
||||
@@ -147,7 +147,7 @@ func (b *LMDBBackend) CountEventsHLL(ctx context.Context, filter nostr.Filter, o
|
||||
if extraKinds == nil && extraTagValues == nil {
|
||||
// nothing extra to check
|
||||
count++
|
||||
hll.AddBytes(val[32:64])
|
||||
hll.AddBytes(nostr.PubKey(val[32:64]))
|
||||
} else {
|
||||
// check it against kinds without decoding the entire thing
|
||||
if !slices.Contains(extraKinds, [2]byte(val[132:134])) {
|
||||
@@ -156,7 +156,7 @@ func (b *LMDBBackend) CountEventsHLL(ctx context.Context, filter nostr.Filter, o
|
||||
}
|
||||
|
||||
evt := &nostr.Event{}
|
||||
if err := bin.Unmarshal(val, evt); err != nil {
|
||||
if err := betterbinary.Unmarshal(val, evt); err != nil {
|
||||
it.next()
|
||||
continue
|
||||
}
|
||||
@@ -211,7 +211,7 @@ func (b *LMDBBackend) countEventsHLLCached(filter nostr.Filter) (int64, *hyperlo
|
||||
return count, hll, err
|
||||
}
|
||||
|
||||
func (b *LMDBBackend) updateHyperLogLogCachedValues(txn *lmdb.Txn, evt *nostr.Event) error {
|
||||
func (b *LMDBBackend) updateHyperLogLogCachedValues(txn *lmdb.Txn, evt nostr.Event) error {
|
||||
cacheKey := make([]byte, 2+8)
|
||||
binary.BigEndian.PutUint16(cacheKey[0:2], uint16(evt.Kind))
|
||||
|
||||
|
||||
@@ -1,29 +1,39 @@
|
||||
package lmdb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore/codec/betterbinary"
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
)
|
||||
|
||||
func (b *LMDBBackend) DeleteEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
func (b *LMDBBackend) DeleteEvent(id nostr.ID) error {
|
||||
return b.lmdbEnv.Update(func(txn *lmdb.Txn) error {
|
||||
return b.delete(txn, evt)
|
||||
return b.delete(txn, id)
|
||||
})
|
||||
}
|
||||
|
||||
func (b *LMDBBackend) delete(txn *lmdb.Txn, evt *nostr.Event) error {
|
||||
idPrefix8, _ := hex.DecodeString(evt.ID[0 : 8*2])
|
||||
idx, err := txn.Get(b.indexId, idPrefix8)
|
||||
func (b *LMDBBackend) delete(txn *lmdb.Txn, id nostr.ID) error {
|
||||
// check if we have this actually
|
||||
idx, err := txn.Get(b.indexId, id[0:8])
|
||||
if lmdb.IsNotFound(err) {
|
||||
// we already do not have this
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get current idx for deleting %x: %w", evt.ID[0:8*2], err)
|
||||
return fmt.Errorf("failed to get current idx for deleting %x: %w", id[0:8], err)
|
||||
}
|
||||
|
||||
// if we do, get it so we can compute the indexes
|
||||
buf, err := txn.Get(b.rawEventStore, idx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get raw event %x to delete: %w", id, err)
|
||||
}
|
||||
|
||||
var evt nostr.Event
|
||||
if err := betterbinary.Unmarshal(buf, &evt); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal raw event %x to delete: %w", id, err)
|
||||
}
|
||||
|
||||
// calculate all index keys we have for this event and delete them
|
||||
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
"fiatjaf.com/nostr"
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
@@ -52,13 +52,11 @@ func (b *LMDBBackend) keyName(key key) string {
|
||||
return fmt.Sprintf("<dbi=%s key=%x>", b.dbiName(key.dbi), key.key)
|
||||
}
|
||||
|
||||
func (b *LMDBBackend) getIndexKeysForEvent(evt *nostr.Event) iter.Seq[key] {
|
||||
func (b *LMDBBackend) getIndexKeysForEvent(evt nostr.Event) iter.Seq[key] {
|
||||
return func(yield func(key) bool) {
|
||||
{
|
||||
// ~ by id
|
||||
k := make([]byte, 8)
|
||||
hex.Decode(k[0:8], []byte(evt.ID[0:8*2]))
|
||||
if !yield(key{dbi: b.indexId, key: k[0:8]}) {
|
||||
if !yield(key{dbi: b.indexId, key: evt.ID[0:8]}) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"os"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
)
|
||||
|
||||
var _ eventstore.Store = (*LMDBBackend)(nil)
|
||||
@@ -34,7 +34,7 @@ type LMDBBackend struct {
|
||||
indexPTagKind lmdb.DBI
|
||||
|
||||
hllCache lmdb.DBI
|
||||
EnableHLLCacheFor func(kind int) (useCache bool, skipSavingActualEvent bool)
|
||||
EnableHLLCacheFor func(kind uint16) (useCache bool, skipSavingActualEvent bool)
|
||||
|
||||
lastId atomic.Uint32
|
||||
}
|
||||
|
||||
@@ -3,11 +3,8 @@ package lmdb
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
bin "fiatjaf.com/nostr/eventstore/internal/binary"
|
||||
"fiatjaf.com/nostr"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -28,114 +25,18 @@ func (b *LMDBBackend) runMigrations() error {
|
||||
version = binary.BigEndian.Uint16(v)
|
||||
}
|
||||
|
||||
// all previous migrations are useless because we will just reindex everything
|
||||
if version == 0 {
|
||||
// if there is any data in the relay we will just set the version to the max without saying anything
|
||||
cursor, err := txn.OpenCursor(b.rawEventStore)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open cursor in migration: %w", err)
|
||||
}
|
||||
defer cursor.Close()
|
||||
|
||||
hasAnyEntries := false
|
||||
_, _, err = cursor.Get(nil, nil, lmdb.First)
|
||||
for err == nil {
|
||||
hasAnyEntries = true
|
||||
break
|
||||
}
|
||||
|
||||
if !hasAnyEntries {
|
||||
b.setVersion(txn, 8)
|
||||
version = 8
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// do the migrations in increasing steps (there is no rollback)
|
||||
//
|
||||
|
||||
// this is when we reindex everything
|
||||
if version < 8 {
|
||||
log.Println("[lmdb] migration 8: reindex everything")
|
||||
|
||||
if err := txn.Drop(b.indexId, false); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := txn.Drop(b.indexCreatedAt, false); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := txn.Drop(b.indexKind, false); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := txn.Drop(b.indexPTagKind, false); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := txn.Drop(b.indexPubkey, false); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := txn.Drop(b.indexPubkeyKind, false); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := txn.Drop(b.indexTag, false); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := txn.Drop(b.indexTag32, false); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := txn.Drop(b.indexTagAddr, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cursor, err := txn.OpenCursor(b.rawEventStore)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open cursor in migration 8: %w", err)
|
||||
}
|
||||
defer cursor.Close()
|
||||
|
||||
seen := make(map[[32]byte]struct{})
|
||||
|
||||
idx, val, err := cursor.Get(nil, nil, lmdb.First)
|
||||
for err == nil {
|
||||
idp := *(*[32]byte)(val[0:32])
|
||||
if _, isDup := seen[idp]; isDup {
|
||||
// do not index, but delete this entry
|
||||
if err := txn.Del(b.rawEventStore, idx, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// next
|
||||
idx, val, err = cursor.Get(nil, nil, lmdb.Next)
|
||||
continue
|
||||
}
|
||||
|
||||
seen[idp] = struct{}{}
|
||||
|
||||
evt := &nostr.Event{}
|
||||
if err := bin.Unmarshal(val, evt); err != nil {
|
||||
return fmt.Errorf("error decoding event %x on migration 5: %w", idx, err)
|
||||
}
|
||||
|
||||
for key := range b.getIndexKeysForEvent(evt) {
|
||||
if err := txn.Put(key.dbi, key.key, idx, 0); err != nil {
|
||||
return fmt.Errorf("failed to save index %s for event %s (%v) on migration 8: %w",
|
||||
b.keyName(key), evt.ID, idx, err)
|
||||
}
|
||||
}
|
||||
|
||||
// next
|
||||
idx, val, err = cursor.Get(nil, nil, lmdb.Next)
|
||||
}
|
||||
if lmdbErr, ok := err.(*lmdb.OpError); ok && lmdbErr.Errno != lmdb.NotFound {
|
||||
// exited the loop with an error different from NOTFOUND
|
||||
return err
|
||||
}
|
||||
|
||||
// bump version
|
||||
if err := b.setVersion(txn, 8); err != nil {
|
||||
return err
|
||||
}
|
||||
if version < 1 {
|
||||
}
|
||||
|
||||
// bump version
|
||||
// if err := b.setVersion(txn, 1); err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2,59 +2,49 @@ package lmdb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"iter"
|
||||
"log"
|
||||
"slices"
|
||||
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"fiatjaf.com/nostr/eventstore/internal"
|
||||
bin "fiatjaf.com/nostr/eventstore/internal/binary"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore/codec/betterbinary"
|
||||
"fiatjaf.com/nostr/eventstore/internal"
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
)
|
||||
|
||||
func (b *LMDBBackend) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) {
|
||||
ch := make(chan *nostr.Event)
|
||||
|
||||
if filter.Search != "" {
|
||||
close(ch)
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
// max number of events we'll return
|
||||
maxLimit := b.MaxLimit
|
||||
var limit int
|
||||
if eventstore.IsNegentropySession(ctx) {
|
||||
maxLimit = b.MaxLimitNegentropy
|
||||
limit = maxLimit
|
||||
} else {
|
||||
limit = maxLimit / 4
|
||||
}
|
||||
if filter.Limit > 0 && filter.Limit <= maxLimit {
|
||||
limit = filter.Limit
|
||||
}
|
||||
if tlimit := nostr.GetTheoreticalLimit(filter); tlimit == 0 {
|
||||
close(ch)
|
||||
return ch, nil
|
||||
} else if tlimit > 0 {
|
||||
limit = tlimit
|
||||
}
|
||||
|
||||
go b.lmdbEnv.View(func(txn *lmdb.Txn) error {
|
||||
txn.RawRead = true
|
||||
defer close(ch)
|
||||
results, err := b.query(txn, filter, limit)
|
||||
|
||||
for _, ie := range results {
|
||||
ch <- ie.Event
|
||||
func (b *LMDBBackend) QueryEvents(filter nostr.Filter) iter.Seq[nostr.Event] {
|
||||
return func(yield func(nostr.Event) bool) {
|
||||
if filter.Search != "" {
|
||||
return
|
||||
}
|
||||
|
||||
return err
|
||||
})
|
||||
// max number of events we'll return
|
||||
var limit int
|
||||
limit = b.MaxLimit / 4
|
||||
if filter.Limit > 0 && filter.Limit <= b.MaxLimit {
|
||||
limit = filter.Limit
|
||||
}
|
||||
if tlimit := nostr.GetTheoreticalLimit(filter); tlimit == 0 {
|
||||
return
|
||||
} else if tlimit > 0 {
|
||||
limit = tlimit
|
||||
}
|
||||
|
||||
return ch, nil
|
||||
b.lmdbEnv.View(func(txn *lmdb.Txn) error {
|
||||
txn.RawRead = true
|
||||
results, err := b.query(txn, filter, limit)
|
||||
|
||||
for _, ie := range results {
|
||||
if !yield(ie.Event) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]internal.IterEvent, error) {
|
||||
@@ -73,16 +63,16 @@ func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]in
|
||||
// we will continue to pull from it as soon as some other iterator takes the position
|
||||
oldest := internal.IterEvent{Q: -1}
|
||||
|
||||
secondPhase := false // after we have gathered enough events we will change the way we iterate
|
||||
sndPhase := false // after we have gathered enough events we will change the way we iterate
|
||||
secondBatch := make([][]internal.IterEvent, 0, len(queries)+1)
|
||||
secondPhaseParticipants := make([]int, 0, len(queries)+1)
|
||||
sndPhaseParticipants := make([]int, 0, len(queries)+1)
|
||||
|
||||
// while merging results in the second phase we will alternate between these two lists
|
||||
// to avoid having to create new lists all the time
|
||||
var secondPhaseResultsA []internal.IterEvent
|
||||
var secondPhaseResultsB []internal.IterEvent
|
||||
var secondPhaseResultsToggle bool // this is just a dummy thing we use to keep track of the alternating
|
||||
var secondPhaseHasResultsPending bool
|
||||
var sndPhaseResultsA []internal.IterEvent
|
||||
var sndPhaseResultsB []internal.IterEvent
|
||||
var sndPhaseResultsToggle bool // this is just a dummy thing we use to keep track of the alternating
|
||||
var sndPhaseHasResultsPending bool
|
||||
|
||||
remainingUnexhausted := len(queries) // when all queries are exhausted we can finally end this thing
|
||||
batchSizePerQuery := internal.BatchSizePerNumberOfQueries(limit, remainingUnexhausted)
|
||||
@@ -171,8 +161,8 @@ func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]in
|
||||
}
|
||||
|
||||
// decode the entire thing
|
||||
event := &nostr.Event{}
|
||||
if err := bin.Unmarshal(val, event); err != nil {
|
||||
event := nostr.Event{}
|
||||
if err := betterbinary.Unmarshal(val, &event); err != nil {
|
||||
log.Printf("lmdb: value read error (id %x) on query prefix %x sp %x dbi %d: %s\n", val[0:32],
|
||||
query.prefix, query.startingPoint, query.dbi, err)
|
||||
return nil, fmt.Errorf("event read error: %w", err)
|
||||
@@ -190,18 +180,18 @@ func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]in
|
||||
evt := internal.IterEvent{Event: event, Q: q}
|
||||
//
|
||||
//
|
||||
if secondPhase {
|
||||
if sndPhase {
|
||||
// do the process described below at HIWAWVRTP.
|
||||
// if we've reached here this means we've already passed the `since` check.
|
||||
// now we have to eliminate the event currently at the `since` threshold.
|
||||
nextThreshold := firstPhaseResults[len(firstPhaseResults)-2]
|
||||
if oldest.Event == nil {
|
||||
if oldest.Event.ID == nostr.ZeroID {
|
||||
// fmt.Println(" b1", evt.ID[0:8])
|
||||
// BRANCH WHEN WE DON'T HAVE THE OLDEST EVENT (BWWDHTOE)
|
||||
// when we don't have the oldest set, we will keep the results
|
||||
// and not change the cutting point -- it's bad, but hopefully not that bad.
|
||||
results[q] = append(results[q], evt)
|
||||
secondPhaseHasResultsPending = true
|
||||
sndPhaseHasResultsPending = true
|
||||
} else if nextThreshold.CreatedAt > oldest.CreatedAt {
|
||||
// fmt.Println(" b2", nextThreshold.CreatedAt, ">", oldest.CreatedAt, evt.ID[0:8])
|
||||
// one of the events we have stored is the actual next threshold
|
||||
@@ -218,7 +208,7 @@ func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]in
|
||||
// finally
|
||||
// add this to the results to be merged later
|
||||
results[q] = append(results[q], evt)
|
||||
secondPhaseHasResultsPending = true
|
||||
sndPhaseHasResultsPending = true
|
||||
} else if nextThreshold.CreatedAt < evt.CreatedAt {
|
||||
// the next last event in the firstPhaseResults is the next threshold
|
||||
// fmt.Println(" b3", nextThreshold.CreatedAt, "<", oldest.CreatedAt, evt.ID[0:8])
|
||||
@@ -228,7 +218,7 @@ func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]in
|
||||
// fmt.Println(" new since", since)
|
||||
// add this to the results to be merged later
|
||||
results[q] = append(results[q], evt)
|
||||
secondPhaseHasResultsPending = true
|
||||
sndPhaseHasResultsPending = true
|
||||
// update the oldest event
|
||||
if evt.CreatedAt < oldest.CreatedAt {
|
||||
oldest = evt
|
||||
@@ -247,7 +237,7 @@ func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]in
|
||||
firstPhaseTotalPulled++
|
||||
|
||||
// update the oldest event
|
||||
if oldest.Event == nil || evt.CreatedAt < oldest.CreatedAt {
|
||||
if oldest.Event.ID == nostr.ZeroID || evt.CreatedAt < oldest.CreatedAt {
|
||||
oldest = evt
|
||||
}
|
||||
}
|
||||
@@ -273,20 +263,20 @@ func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]in
|
||||
|
||||
// we will do this check if we don't accumulated the requested number of events yet
|
||||
// fmt.Println("oldest", oldest.Event, "from iter", oldest.Q)
|
||||
if secondPhase && secondPhaseHasResultsPending && (oldest.Event == nil || remainingUnexhausted == 0) {
|
||||
if sndPhase && sndPhaseHasResultsPending && (oldest.Event.ID == nostr.ZeroID || remainingUnexhausted == 0) {
|
||||
// fmt.Println("second phase aggregation!")
|
||||
// when we are in the second phase we will aggressively aggregate results on every iteration
|
||||
//
|
||||
secondBatch = secondBatch[:0]
|
||||
for s := 0; s < len(secondPhaseParticipants); s++ {
|
||||
q := secondPhaseParticipants[s]
|
||||
for s := 0; s < len(sndPhaseParticipants); s++ {
|
||||
q := sndPhaseParticipants[s]
|
||||
|
||||
if len(results[q]) > 0 {
|
||||
secondBatch = append(secondBatch, results[q])
|
||||
}
|
||||
|
||||
if exhausted[q] {
|
||||
secondPhaseParticipants = internal.SwapDelete(secondPhaseParticipants, s)
|
||||
sndPhaseParticipants = internal.SwapDelete(sndPhaseParticipants, s)
|
||||
s--
|
||||
}
|
||||
}
|
||||
@@ -294,29 +284,29 @@ func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]in
|
||||
// every time we get here we will alternate between these A and B lists
|
||||
// combining everything we have into a new partial results list.
|
||||
// after we've done that we can again set the oldest.
|
||||
// fmt.Println(" xxx", secondPhaseResultsToggle)
|
||||
if secondPhaseResultsToggle {
|
||||
secondBatch = append(secondBatch, secondPhaseResultsB)
|
||||
secondPhaseResultsA = internal.MergeSortMultiple(secondBatch, limit, secondPhaseResultsA)
|
||||
oldest = secondPhaseResultsA[len(secondPhaseResultsA)-1]
|
||||
// fmt.Println(" new aggregated a", len(secondPhaseResultsB))
|
||||
// fmt.Println(" xxx", sndPhaseResultsToggle)
|
||||
if sndPhaseResultsToggle {
|
||||
secondBatch = append(secondBatch, sndPhaseResultsB)
|
||||
sndPhaseResultsA = internal.MergeSortMultiple(secondBatch, limit, sndPhaseResultsA)
|
||||
oldest = sndPhaseResultsA[len(sndPhaseResultsA)-1]
|
||||
// fmt.Println(" new aggregated a", len(sndPhaseResultsB))
|
||||
} else {
|
||||
secondBatch = append(secondBatch, secondPhaseResultsA)
|
||||
secondPhaseResultsB = internal.MergeSortMultiple(secondBatch, limit, secondPhaseResultsB)
|
||||
oldest = secondPhaseResultsB[len(secondPhaseResultsB)-1]
|
||||
// fmt.Println(" new aggregated b", len(secondPhaseResultsB))
|
||||
secondBatch = append(secondBatch, sndPhaseResultsA)
|
||||
sndPhaseResultsB = internal.MergeSortMultiple(secondBatch, limit, sndPhaseResultsB)
|
||||
oldest = sndPhaseResultsB[len(sndPhaseResultsB)-1]
|
||||
// fmt.Println(" new aggregated b", len(sndPhaseResultsB))
|
||||
}
|
||||
secondPhaseResultsToggle = !secondPhaseResultsToggle
|
||||
sndPhaseResultsToggle = !sndPhaseResultsToggle
|
||||
|
||||
since = uint32(oldest.CreatedAt)
|
||||
// fmt.Println(" new since", since)
|
||||
|
||||
// reset the `results` list so we can keep using it
|
||||
results = results[:len(queries)]
|
||||
for _, q := range secondPhaseParticipants {
|
||||
for _, q := range sndPhaseParticipants {
|
||||
results[q] = results[q][:0]
|
||||
}
|
||||
} else if !secondPhase && firstPhaseTotalPulled >= limit && remainingUnexhausted > 0 {
|
||||
} else if !sndPhase && firstPhaseTotalPulled >= limit && remainingUnexhausted > 0 {
|
||||
// fmt.Println("have enough!", firstPhaseTotalPulled, "/", limit, "remaining", remainingUnexhausted)
|
||||
|
||||
// we will exclude this oldest number as it is not relevant anymore
|
||||
@@ -360,16 +350,16 @@ func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]in
|
||||
results[q] = results[q][:0]
|
||||
|
||||
// build this index of indexes with everybody who remains
|
||||
secondPhaseParticipants = append(secondPhaseParticipants, q)
|
||||
sndPhaseParticipants = append(sndPhaseParticipants, q)
|
||||
}
|
||||
|
||||
// we create these two lists and alternate between them so we don't have to create a
|
||||
// a new one every time
|
||||
secondPhaseResultsA = make([]internal.IterEvent, 0, limit*2)
|
||||
secondPhaseResultsB = make([]internal.IterEvent, 0, limit*2)
|
||||
sndPhaseResultsA = make([]internal.IterEvent, 0, limit*2)
|
||||
sndPhaseResultsB = make([]internal.IterEvent, 0, limit*2)
|
||||
|
||||
// from now on we won't run this block anymore
|
||||
secondPhase = true
|
||||
sndPhase = true
|
||||
}
|
||||
|
||||
// fmt.Println("remaining", remainingUnexhausted)
|
||||
@@ -378,27 +368,27 @@ func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]in
|
||||
}
|
||||
}
|
||||
|
||||
// fmt.Println("is secondPhase?", secondPhase)
|
||||
// fmt.Println("is sndPhase?", sndPhase)
|
||||
|
||||
var combinedResults []internal.IterEvent
|
||||
|
||||
if secondPhase {
|
||||
if sndPhase {
|
||||
// fmt.Println("ending second phase")
|
||||
// when we reach this point either secondPhaseResultsA or secondPhaseResultsB will be full of stuff,
|
||||
// when we reach this point either sndPhaseResultsA or sndPhaseResultsB will be full of stuff,
|
||||
// the other will be empty
|
||||
var secondPhaseResults []internal.IterEvent
|
||||
// fmt.Println("xxx", secondPhaseResultsToggle, len(secondPhaseResultsA), len(secondPhaseResultsB))
|
||||
if secondPhaseResultsToggle {
|
||||
secondPhaseResults = secondPhaseResultsB
|
||||
combinedResults = secondPhaseResultsA[0:limit] // reuse this
|
||||
// fmt.Println(" using b", len(secondPhaseResultsA))
|
||||
var sndPhaseResults []internal.IterEvent
|
||||
// fmt.Println("xxx", sndPhaseResultsToggle, len(sndPhaseResultsA), len(sndPhaseResultsB))
|
||||
if sndPhaseResultsToggle {
|
||||
sndPhaseResults = sndPhaseResultsB
|
||||
combinedResults = sndPhaseResultsA[0:limit] // reuse this
|
||||
// fmt.Println(" using b", len(sndPhaseResultsA))
|
||||
} else {
|
||||
secondPhaseResults = secondPhaseResultsA
|
||||
combinedResults = secondPhaseResultsB[0:limit] // reuse this
|
||||
// fmt.Println(" using a", len(secondPhaseResultsA))
|
||||
sndPhaseResults = sndPhaseResultsA
|
||||
combinedResults = sndPhaseResultsB[0:limit] // reuse this
|
||||
// fmt.Println(" using a", len(sndPhaseResultsA))
|
||||
}
|
||||
|
||||
all := [][]internal.IterEvent{firstPhaseResults, secondPhaseResults}
|
||||
all := [][]internal.IterEvent{firstPhaseResults, sndPhaseResults}
|
||||
combinedResults = internal.MergeSortMultiple(all, limit, combinedResults)
|
||||
// fmt.Println("final combinedResults", len(combinedResults), cap(combinedResults), limit)
|
||||
} else {
|
||||
|
||||
@@ -1,23 +1,22 @@
|
||||
package lmdb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
"fiatjaf.com/nostr/eventstore/internal"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore/internal"
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
)
|
||||
|
||||
func (b *LMDBBackend) ReplaceEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
func (b *LMDBBackend) ReplaceEvent(evt nostr.Event) error {
|
||||
// sanity checking
|
||||
if evt.CreatedAt > math.MaxUint32 || evt.Kind > math.MaxUint16 {
|
||||
return fmt.Errorf("event with values out of expected boundaries")
|
||||
}
|
||||
|
||||
return b.lmdbEnv.Update(func(txn *lmdb.Txn) error {
|
||||
filter := nostr.Filter{Limit: 1, Kinds: []int{evt.Kind}, Authors: []string{evt.PubKey}}
|
||||
filter := nostr.Filter{Limit: 1, Kinds: []uint16{evt.Kind}, Authors: []nostr.PubKey{evt.PubKey}}
|
||||
if nostr.IsAddressableKind(evt.Kind) {
|
||||
// when addressable, add the "d" tag to the filter
|
||||
filter.Tags = nostr.TagMap{"d": []string{evt.Tags.GetD()}}
|
||||
|
||||
@@ -1,18 +1,16 @@
|
||||
package lmdb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
bin "fiatjaf.com/nostr/eventstore/internal/binary"
|
||||
"fiatjaf.com/nostr"
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
)
|
||||
|
||||
func (b *LMDBBackend) SaveEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
func (b *LMDBBackend) SaveEvent(evt nostr.Event) error {
|
||||
// sanity checking
|
||||
if evt.CreatedAt > math.MaxUint32 || evt.Kind > math.MaxUint16 {
|
||||
return fmt.Errorf("event with values out of expected boundaries")
|
||||
@@ -35,8 +33,7 @@ func (b *LMDBBackend) SaveEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
}
|
||||
|
||||
// check if we already have this id
|
||||
id, _ := hex.DecodeString(evt.ID)
|
||||
_, err := txn.Get(b.indexId, id)
|
||||
_, err := txn.Get(b.indexId, evt.ID[0:8])
|
||||
if operr, ok := err.(*lmdb.OpError); ok && operr.Errno != lmdb.NotFound {
|
||||
// we will only proceed if we get a NotFound
|
||||
return eventstore.ErrDupEvent
|
||||
@@ -46,7 +43,7 @@ func (b *LMDBBackend) SaveEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
})
|
||||
}
|
||||
|
||||
func (b *LMDBBackend) save(txn *lmdb.Txn, evt *nostr.Event) error {
|
||||
func (b *LMDBBackend) save(txn *lmdb.Txn, evt nostr.Event) error {
|
||||
// encode to binary form so we'll save it
|
||||
bin, err := bin.Marshal(evt)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
package eventstore
|
||||
|
||||
import "context"
|
||||
|
||||
var negentropySessionKey = struct{}{}
|
||||
|
||||
func IsNegentropySession(ctx context.Context) bool {
|
||||
return ctx.Value(negentropySessionKey) != nil
|
||||
}
|
||||
|
||||
func SetNegentropy(ctx context.Context) context.Context {
|
||||
return context.WithValue(ctx, negentropySessionKey, struct{}{})
|
||||
}
|
||||
@@ -1,14 +1,15 @@
|
||||
package slicestore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"bytes"
|
||||
"cmp"
|
||||
"fmt"
|
||||
"strings"
|
||||
"iter"
|
||||
"sync"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"fiatjaf.com/nostr/eventstore/internal"
|
||||
"fiatjaf.com/nostr"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
@@ -16,13 +17,13 @@ var _ eventstore.Store = (*SliceStore)(nil)
|
||||
|
||||
type SliceStore struct {
|
||||
sync.Mutex
|
||||
internal []*nostr.Event
|
||||
internal []nostr.Event
|
||||
|
||||
MaxLimit int
|
||||
}
|
||||
|
||||
func (b *SliceStore) Init() error {
|
||||
b.internal = make([]*nostr.Event, 0, 5000)
|
||||
b.internal = make([]nostr.Event, 0, 5000)
|
||||
if b.MaxLimit == 0 {
|
||||
b.MaxLimit = 500
|
||||
}
|
||||
@@ -31,50 +32,44 @@ func (b *SliceStore) Init() error {
|
||||
|
||||
func (b *SliceStore) Close() {}
|
||||
|
||||
func (b *SliceStore) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) {
|
||||
ch := make(chan *nostr.Event)
|
||||
if filter.Limit > b.MaxLimit || (filter.Limit == 0 && !filter.LimitZero) {
|
||||
filter.Limit = b.MaxLimit
|
||||
}
|
||||
func (b *SliceStore) QueryEvents(filter nostr.Filter) iter.Seq[nostr.Event] {
|
||||
return func(yield func(nostr.Event) bool) {
|
||||
if filter.Limit > b.MaxLimit || (filter.Limit == 0 && !filter.LimitZero) {
|
||||
filter.Limit = b.MaxLimit
|
||||
}
|
||||
|
||||
// efficiently determine where to start and end
|
||||
start := 0
|
||||
end := len(b.internal)
|
||||
if filter.Until != nil {
|
||||
start, _ = slices.BinarySearchFunc(b.internal, *filter.Until, eventTimestampComparator)
|
||||
}
|
||||
if filter.Since != nil {
|
||||
end, _ = slices.BinarySearchFunc(b.internal, *filter.Since, eventTimestampComparator)
|
||||
}
|
||||
// efficiently determine where to start and end
|
||||
start := 0
|
||||
end := len(b.internal)
|
||||
if filter.Until != nil {
|
||||
start, _ = slices.BinarySearchFunc(b.internal, *filter.Until, eventTimestampComparator)
|
||||
}
|
||||
if filter.Since != nil {
|
||||
end, _ = slices.BinarySearchFunc(b.internal, *filter.Since, eventTimestampComparator)
|
||||
}
|
||||
|
||||
// ham
|
||||
if end < start {
|
||||
close(ch)
|
||||
return ch, nil
|
||||
}
|
||||
// ham
|
||||
if end < start {
|
||||
return
|
||||
}
|
||||
|
||||
count := 0
|
||||
go func() {
|
||||
count := 0
|
||||
for _, event := range b.internal[start:end] {
|
||||
if count == filter.Limit {
|
||||
break
|
||||
}
|
||||
|
||||
if filter.Matches(event) {
|
||||
select {
|
||||
case ch <- event:
|
||||
case <-ctx.Done():
|
||||
if !yield(event) {
|
||||
return
|
||||
}
|
||||
count++
|
||||
}
|
||||
}
|
||||
close(ch)
|
||||
}()
|
||||
return ch, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (b *SliceStore) CountEvents(ctx context.Context, filter nostr.Filter) (int64, error) {
|
||||
func (b *SliceStore) CountEvents(filter nostr.Filter) (int64, error) {
|
||||
var val int64
|
||||
for _, event := range b.internal {
|
||||
if filter.Matches(event) {
|
||||
@@ -84,7 +79,7 @@ func (b *SliceStore) CountEvents(ctx context.Context, filter nostr.Filter) (int6
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (b *SliceStore) SaveEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
func (b *SliceStore) SaveEvent(evt nostr.Event) error {
|
||||
idx, found := slices.BinarySearchFunc(b.internal, evt, eventComparator)
|
||||
if found {
|
||||
return eventstore.ErrDupEvent
|
||||
@@ -97,8 +92,8 @@ func (b *SliceStore) SaveEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *SliceStore) DeleteEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
idx, found := slices.BinarySearchFunc(b.internal, evt, eventComparator)
|
||||
func (b *SliceStore) DeleteEvent(id nostr.ID) error {
|
||||
idx, found := slices.BinarySearchFunc(b.internal, id, eventIDComparator)
|
||||
if !found {
|
||||
// we don't have this event
|
||||
return nil
|
||||
@@ -110,24 +105,19 @@ func (b *SliceStore) DeleteEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *SliceStore) ReplaceEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
func (b *SliceStore) ReplaceEvent(evt nostr.Event) error {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
|
||||
filter := nostr.Filter{Limit: 1, Kinds: []int{evt.Kind}, Authors: []string{evt.PubKey}}
|
||||
filter := nostr.Filter{Limit: 1, Kinds: []uint16{evt.Kind}, Authors: []nostr.PubKey{evt.PubKey}}
|
||||
if nostr.IsAddressableKind(evt.Kind) {
|
||||
filter.Tags = nostr.TagMap{"d": []string{evt.Tags.GetD()}}
|
||||
}
|
||||
|
||||
ch, err := b.QueryEvents(ctx, filter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query before replacing: %w", err)
|
||||
}
|
||||
|
||||
shouldStore := true
|
||||
for previous := range ch {
|
||||
for previous := range b.QueryEvents(filter) {
|
||||
if internal.IsOlder(previous, evt) {
|
||||
if err := b.DeleteEvent(ctx, previous); err != nil {
|
||||
if err := b.DeleteEvent(previous.ID); err != nil {
|
||||
return fmt.Errorf("failed to delete event for replacing: %w", err)
|
||||
}
|
||||
} else {
|
||||
@@ -136,7 +126,7 @@ func (b *SliceStore) ReplaceEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
}
|
||||
|
||||
if shouldStore {
|
||||
if err := b.SaveEvent(ctx, evt); err != nil && err != eventstore.ErrDupEvent {
|
||||
if err := b.SaveEvent(evt); err != nil && err != eventstore.ErrDupEvent {
|
||||
return fmt.Errorf("failed to save: %w", err)
|
||||
}
|
||||
}
|
||||
@@ -144,14 +134,18 @@ func (b *SliceStore) ReplaceEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func eventTimestampComparator(e *nostr.Event, t nostr.Timestamp) int {
|
||||
func eventTimestampComparator(e nostr.Event, t nostr.Timestamp) int {
|
||||
return int(t) - int(e.CreatedAt)
|
||||
}
|
||||
|
||||
func eventComparator(a *nostr.Event, b *nostr.Event) int {
|
||||
c := int(b.CreatedAt) - int(a.CreatedAt)
|
||||
func eventIDComparator(e nostr.Event, i nostr.ID) int {
|
||||
return bytes.Compare(i[:], e.ID[:])
|
||||
}
|
||||
|
||||
func eventComparator(a nostr.Event, b nostr.Event) int {
|
||||
c := cmp.Compare(b.CreatedAt, a.CreatedAt)
|
||||
if c != 0 {
|
||||
return c
|
||||
}
|
||||
return strings.Compare(b.ID, a.ID)
|
||||
return bytes.Compare(b.ID[:], a.ID[:])
|
||||
}
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
package slicestore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
)
|
||||
|
||||
func TestBasicStuff(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ss := &SliceStore{}
|
||||
ss.Init()
|
||||
defer ss.Close()
|
||||
@@ -22,12 +20,11 @@ func TestBasicStuff(t *testing.T) {
|
||||
if i%3 == 0 {
|
||||
kind = 12
|
||||
}
|
||||
ss.SaveEvent(ctx, &nostr.Event{CreatedAt: nostr.Timestamp(v), Kind: kind})
|
||||
ss.SaveEvent(nostr.Event{CreatedAt: nostr.Timestamp(v), Kind: uint16(kind)})
|
||||
}
|
||||
|
||||
ch, _ := ss.QueryEvents(ctx, nostr.Filter{})
|
||||
list := make([]*nostr.Event, 0, 20)
|
||||
for event := range ch {
|
||||
list := make([]nostr.Event, 0, 20)
|
||||
for event := range ss.QueryEvents(nostr.Filter{}) {
|
||||
list = append(list, event)
|
||||
}
|
||||
|
||||
@@ -39,9 +36,8 @@ func TestBasicStuff(t *testing.T) {
|
||||
}
|
||||
|
||||
until := nostr.Timestamp(9999)
|
||||
ch, _ = ss.QueryEvents(ctx, nostr.Filter{Limit: 15, Until: &until, Kinds: []int{11}})
|
||||
list = make([]*nostr.Event, 0, 7)
|
||||
for event := range ch {
|
||||
list = make([]nostr.Event, 0, 7)
|
||||
for event := range ss.QueryEvents(nostr.Filter{Limit: 15, Until: &until, Kinds: []uint16{11}}) {
|
||||
list = append(list, event)
|
||||
}
|
||||
if len(list) != 7 {
|
||||
@@ -49,9 +45,8 @@ func TestBasicStuff(t *testing.T) {
|
||||
}
|
||||
|
||||
since := nostr.Timestamp(10009)
|
||||
ch, _ = ss.QueryEvents(ctx, nostr.Filter{Since: &since})
|
||||
list = make([]*nostr.Event, 0, 5)
|
||||
for event := range ch {
|
||||
list = make([]nostr.Event, 0, 5)
|
||||
for event := range ss.QueryEvents(nostr.Filter{Since: &since}) {
|
||||
list = append(list, event)
|
||||
}
|
||||
if len(list) != 5 {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package eventstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"iter"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
)
|
||||
@@ -15,18 +15,19 @@ type Store interface {
|
||||
// Close must be called after you're done using the store, to free up resources and so on.
|
||||
Close()
|
||||
|
||||
// QueryEvents should return a channel with the events as they're recovered from a database.
|
||||
// the channel should be closed after the events are all delivered.
|
||||
QueryEvents(context.Context, nostr.Filter) (chan *nostr.Event, error)
|
||||
// DeleteEvent just deletes an event, no side-effects.
|
||||
DeleteEvent(context.Context, *nostr.Event) error
|
||||
// QueryEvents returns events that match the filter
|
||||
QueryEvents(nostr.Filter) iter.Seq[nostr.Event]
|
||||
|
||||
// DeleteEvent deletes an event atomically by ID
|
||||
DeleteEvent(nostr.ID) error
|
||||
|
||||
// SaveEvent just saves an event, no side-effects.
|
||||
SaveEvent(context.Context, *nostr.Event) error
|
||||
SaveEvent(nostr.Event) error
|
||||
|
||||
// ReplaceEvent atomically replaces a replaceable or addressable event.
|
||||
// Conceptually it is like a Query->Delete->Save, but streamlined.
|
||||
ReplaceEvent(context.Context, *nostr.Event) error
|
||||
}
|
||||
ReplaceEvent(nostr.Event) error
|
||||
|
||||
type Counter interface {
|
||||
CountEvents(context.Context, nostr.Filter) (int64, error)
|
||||
// CountEvents counts all events that match a given filter
|
||||
CountEvents(nostr.Filter) (int64, error)
|
||||
}
|
||||
|
||||
@@ -5,13 +5,10 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
embeddedpostgres "github.com/fergusstrange/embedded-postgres"
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"fiatjaf.com/nostr/eventstore/badger"
|
||||
"fiatjaf.com/nostr/eventstore/lmdb"
|
||||
"fiatjaf.com/nostr/eventstore/postgresql"
|
||||
"fiatjaf.com/nostr/eventstore/slicestore"
|
||||
"fiatjaf.com/nostr/eventstore/sqlite3"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -51,27 +48,3 @@ func TestBadger(t *testing.T) {
|
||||
t.Run(test.name, func(t *testing.T) { test.run(t, &badger.BadgerBackend{Path: dbpath + "badger"}) })
|
||||
}
|
||||
}
|
||||
|
||||
func TestSQLite(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
os.RemoveAll(dbpath + "sqlite")
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
test.run(t, &sqlite3.SQLite3Backend{DatabaseURL: dbpath + "sqlite", QueryLimit: 1000, QueryTagsLimit: 50, QueryAuthorsLimit: 2000})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPostgres(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
postgres := embeddedpostgres.NewDatabase()
|
||||
err := postgres.Start()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to start embedded postgres: %s", err)
|
||||
return
|
||||
}
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
test.run(t, &postgresql.PostgresBackend{DatabaseURL: "postgres://postgres:postgres@localhost:5432/postgres?sslmode=disable", QueryLimit: 1000, QueryTagsLimit: 50, QueryAuthorsLimit: 2000})
|
||||
})
|
||||
postgres.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user