eventstore: QueryEvents() to take a maxLimit param now so everything is clearer.
This commit is contained in:
@@ -27,8 +27,6 @@ var _ eventstore.Store = (*BadgerBackend)(nil)
|
|||||||
|
|
||||||
type BadgerBackend struct {
|
type BadgerBackend struct {
|
||||||
Path string
|
Path string
|
||||||
MaxLimit int
|
|
||||||
MaxLimitNegentropy int
|
|
||||||
BadgerOptionsModifier func(badger.Options) badger.Options
|
BadgerOptionsModifier func(badger.Options) badger.Options
|
||||||
|
|
||||||
// Experimental
|
// Experimental
|
||||||
@@ -57,15 +55,6 @@ func (b *BadgerBackend) Init() error {
|
|||||||
return fmt.Errorf("error running migrations: %w", err)
|
return fmt.Errorf("error running migrations: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.MaxLimit != 0 {
|
|
||||||
b.MaxLimitNegentropy = b.MaxLimit
|
|
||||||
} else {
|
|
||||||
b.MaxLimit = 1000
|
|
||||||
if b.MaxLimitNegentropy == 0 {
|
|
||||||
b.MaxLimitNegentropy = 16777216
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := b.DB.View(func(txn *badger.Txn) error {
|
if err := b.DB.View(func(txn *badger.Txn) error {
|
||||||
it := txn.NewIterator(badger.IteratorOptions{
|
it := txn.NewIterator(badger.IteratorOptions{
|
||||||
Prefix: []byte{0},
|
Prefix: []byte{0},
|
||||||
|
|||||||
@@ -16,26 +16,25 @@ import (
|
|||||||
|
|
||||||
var batchFilled = errors.New("batch-filled")
|
var batchFilled = errors.New("batch-filled")
|
||||||
|
|
||||||
func (b *BadgerBackend) QueryEvents(filter nostr.Filter) iter.Seq[nostr.Event] {
|
func (b *BadgerBackend) QueryEvents(filter nostr.Filter, maxLimit int) iter.Seq[nostr.Event] {
|
||||||
return func(yield func(nostr.Event) bool) {
|
return func(yield func(nostr.Event) bool) {
|
||||||
if filter.Search != "" {
|
if filter.Search != "" {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// max number of events we'll return
|
// max number of events we'll return
|
||||||
limit := b.MaxLimit / 4
|
if tlimit := filter.GetTheoreticalLimit(); tlimit == 0 || filter.LimitZero {
|
||||||
if filter.Limit > 0 && filter.Limit <= b.MaxLimit {
|
|
||||||
limit = filter.Limit
|
|
||||||
}
|
|
||||||
if tlimit := nostr.GetTheoreticalLimit(filter); tlimit == 0 {
|
|
||||||
return
|
return
|
||||||
} else if tlimit > 0 {
|
} else if tlimit < maxLimit {
|
||||||
limit = tlimit
|
maxLimit = tlimit
|
||||||
|
}
|
||||||
|
if filter.Limit < maxLimit {
|
||||||
|
maxLimit = filter.Limit
|
||||||
}
|
}
|
||||||
|
|
||||||
// fmt.Println("limit", limit)
|
// fmt.Println("limit", limit)
|
||||||
b.View(func(txn *badger.Txn) error {
|
b.View(func(txn *badger.Txn) error {
|
||||||
results, err := b.query(txn, filter, limit)
|
results, err := b.query(txn, filter, maxLimit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,8 +10,15 @@ import (
|
|||||||
"github.com/blugelabs/bluge/search"
|
"github.com/blugelabs/bluge/search"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (b *BlugeBackend) QueryEvents(filter nostr.Filter) iter.Seq[nostr.Event] {
|
func (b *BlugeBackend) QueryEvents(filter nostr.Filter, maxLimit int) iter.Seq[nostr.Event] {
|
||||||
return func(yield func(nostr.Event) bool) {
|
return func(yield func(nostr.Event) bool) {
|
||||||
|
limit := maxLimit
|
||||||
|
if filter.LimitZero {
|
||||||
|
return
|
||||||
|
} else if filter.Limit < limit {
|
||||||
|
limit = filter.Limit
|
||||||
|
}
|
||||||
|
|
||||||
if len(filter.Search) < 2 {
|
if len(filter.Search) < 2 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -69,14 +76,6 @@ func (b *BlugeBackend) QueryEvents(filter nostr.Filter) iter.Seq[nostr.Event] {
|
|||||||
q = complicatedQuery
|
q = complicatedQuery
|
||||||
}
|
}
|
||||||
|
|
||||||
limit := 40
|
|
||||||
if filter.Limit != 0 {
|
|
||||||
limit = filter.Limit
|
|
||||||
if filter.Limit > 150 {
|
|
||||||
limit = 150
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
req := bluge.NewTopNSearch(limit, q)
|
req := bluge.NewTopNSearch(limit, q)
|
||||||
|
|
||||||
dmi, err := reader.Search(context.Background(), req)
|
dmi, err := reader.Search(context.Background(), req)
|
||||||
@@ -92,7 +91,7 @@ func (b *BlugeBackend) QueryEvents(filter nostr.Filter) iter.Seq[nostr.Event] {
|
|||||||
next.VisitStoredFields(func(field string, value []byte) bool {
|
next.VisitStoredFields(func(field string, value []byte) bool {
|
||||||
id, err := nostr.IDFromHex(string(value))
|
id, err := nostr.IDFromHex(string(value))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
for evt := range b.RawEventStore.QueryEvents(nostr.Filter{IDs: []nostr.ID{id}}) {
|
for evt := range b.RawEventStore.QueryEvents(nostr.Filter{IDs: []nostr.ID{id}}, 1) {
|
||||||
yield(evt)
|
yield(evt)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,13 +12,13 @@ func (b *BlugeBackend) ReplaceEvent(evt nostr.Event) error {
|
|||||||
b.Lock()
|
b.Lock()
|
||||||
defer b.Unlock()
|
defer b.Unlock()
|
||||||
|
|
||||||
filter := nostr.Filter{Limit: 1, Kinds: []nostr.Kind{evt.Kind}, Authors: []nostr.PubKey{evt.PubKey}}
|
filter := nostr.Filter{Kinds: []nostr.Kind{evt.Kind}, Authors: []nostr.PubKey{evt.PubKey}}
|
||||||
if evt.Kind.IsReplaceable() {
|
if evt.Kind.IsAddressable() {
|
||||||
filter.Tags = nostr.TagMap{"d": []string{evt.Tags.GetD()}}
|
filter.Tags = nostr.TagMap{"d": []string{evt.Tags.GetD()}}
|
||||||
}
|
}
|
||||||
|
|
||||||
shouldStore := true
|
shouldStore := true
|
||||||
for previous := range b.QueryEvents(filter) {
|
for previous := range b.QueryEvents(filter, 1) {
|
||||||
if internal.IsOlder(previous, evt) {
|
if internal.IsOlder(previous, evt) {
|
||||||
if err := b.DeleteEvent(previous.ID); err != nil {
|
if err := b.DeleteEvent(previous.ID); err != nil {
|
||||||
return fmt.Errorf("failed to delete event for replacing: %w", err)
|
return fmt.Errorf("failed to delete event for replacing: %w", err)
|
||||||
|
|||||||
@@ -70,9 +70,9 @@ var app = &cli.Command{
|
|||||||
|
|
||||||
switch typ {
|
switch typ {
|
||||||
case "lmdb":
|
case "lmdb":
|
||||||
db = &lmdb.LMDBBackend{Path: path, MaxLimit: 1_000_000}
|
db = &lmdb.LMDBBackend{Path: path}
|
||||||
case "badger":
|
case "badger":
|
||||||
db = &badger.BadgerBackend{Path: path, MaxLimit: 1_000_000}
|
db = &badger.BadgerBackend{Path: path}
|
||||||
case "mmm":
|
case "mmm":
|
||||||
var err error
|
var err error
|
||||||
if db, err = doMmmInit(path); err != nil {
|
if db, err = doMmmInit(path); err != nil {
|
||||||
|
|||||||
@@ -44,7 +44,7 @@ var neg = &cli.Command{
|
|||||||
// create negentropy object and initialize it with events
|
// create negentropy object and initialize it with events
|
||||||
vec := vector.New()
|
vec := vector.New()
|
||||||
neg := negentropy.New(vec, frameSizeLimit)
|
neg := negentropy.New(vec, frameSizeLimit)
|
||||||
for evt := range db.QueryEvents(filter) {
|
for evt := range db.QueryEvents(filter, math.MaxInt) {
|
||||||
vec.Insert(evt.CreatedAt, evt.ID)
|
vec.Insert(evt.CreatedAt, evt.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ func doSave(ctx context.Context, line string, evt nostr.Event) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func doQuery(ctx context.Context, f *nostr.Filter) error {
|
func doQuery(ctx context.Context, f *nostr.Filter) error {
|
||||||
for evt := range db.QueryEvents(*f) {
|
for evt := range db.QueryEvents(*f, 1_000_000) {
|
||||||
fmt.Println(evt)
|
fmt.Println(evt)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ var query = &cli.Command{
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
for evt := range db.QueryEvents(filter) {
|
for evt := range db.QueryEvents(filter, 1_000_000) {
|
||||||
fmt.Println(evt)
|
fmt.Println(evt)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,8 +15,6 @@ var _ eventstore.Store = (*LMDBBackend)(nil)
|
|||||||
|
|
||||||
type LMDBBackend struct {
|
type LMDBBackend struct {
|
||||||
Path string
|
Path string
|
||||||
MaxLimit int
|
|
||||||
MaxLimitNegentropy int
|
|
||||||
MapSize int64
|
MapSize int64
|
||||||
|
|
||||||
lmdbEnv *lmdb.Env
|
lmdbEnv *lmdb.Env
|
||||||
@@ -41,15 +39,6 @@ type LMDBBackend struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (b *LMDBBackend) Init() error {
|
func (b *LMDBBackend) Init() error {
|
||||||
if b.MaxLimit != 0 {
|
|
||||||
b.MaxLimitNegentropy = b.MaxLimit
|
|
||||||
} else {
|
|
||||||
b.MaxLimit = 1500
|
|
||||||
if b.MaxLimitNegentropy == 0 {
|
|
||||||
b.MaxLimitNegentropy = 16777216
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// create directory if it doesn't exist and open it
|
// create directory if it doesn't exist and open it
|
||||||
if err := os.MkdirAll(b.Path, 0755); err != nil {
|
if err := os.MkdirAll(b.Path, 0755); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -14,27 +14,25 @@ import (
|
|||||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (b *LMDBBackend) QueryEvents(filter nostr.Filter) iter.Seq[nostr.Event] {
|
func (b *LMDBBackend) QueryEvents(filter nostr.Filter, maxLimit int) iter.Seq[nostr.Event] {
|
||||||
return func(yield func(nostr.Event) bool) {
|
return func(yield func(nostr.Event) bool) {
|
||||||
if filter.Search != "" {
|
if filter.Search != "" {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// max number of events we'll return
|
// max number of events we'll return
|
||||||
var limit int
|
if tlimit := filter.GetTheoreticalLimit(); tlimit == 0 || filter.LimitZero {
|
||||||
limit = b.MaxLimit / 4
|
|
||||||
if filter.Limit > 0 && filter.Limit <= b.MaxLimit {
|
|
||||||
limit = filter.Limit
|
|
||||||
}
|
|
||||||
if tlimit := nostr.GetTheoreticalLimit(filter); tlimit == 0 {
|
|
||||||
return
|
return
|
||||||
} else if tlimit > 0 {
|
} else if tlimit < maxLimit {
|
||||||
limit = tlimit
|
maxLimit = tlimit
|
||||||
|
}
|
||||||
|
if filter.Limit < maxLimit {
|
||||||
|
maxLimit = filter.Limit
|
||||||
}
|
}
|
||||||
|
|
||||||
b.lmdbEnv.View(func(txn *lmdb.Txn) error {
|
b.lmdbEnv.View(func(txn *lmdb.Txn) error {
|
||||||
txn.RawRead = true
|
txn.RawRead = true
|
||||||
results, err := b.query(txn, filter, limit)
|
results, err := b.query(txn, filter, maxLimit)
|
||||||
|
|
||||||
for _, ie := range results {
|
for _, ie := range results {
|
||||||
if !yield(ie.Event) {
|
if !yield(ie.Event) {
|
||||||
|
|||||||
@@ -14,7 +14,6 @@ type IndexingLayer struct {
|
|||||||
isInitialized bool
|
isInitialized bool
|
||||||
name string
|
name string
|
||||||
|
|
||||||
MaxLimit int
|
|
||||||
mmmm *MultiMmapManager
|
mmmm *MultiMmapManager
|
||||||
|
|
||||||
// this is stored in the knownLayers db as a value, and used to keep track of which layer owns each event
|
// this is stored in the knownLayers db as a value, and used to keep track of which layer owns each event
|
||||||
@@ -53,10 +52,6 @@ func (il *IndexingLayer) Init() error {
|
|||||||
|
|
||||||
path := filepath.Join(il.mmmm.Dir, il.name)
|
path := filepath.Join(il.mmmm.Dir, il.name)
|
||||||
|
|
||||||
if il.MaxLimit == 0 {
|
|
||||||
il.MaxLimit = 500
|
|
||||||
}
|
|
||||||
|
|
||||||
// open lmdb
|
// open lmdb
|
||||||
env, err := lmdb.NewEnv()
|
env, err := lmdb.NewEnv()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -73,32 +73,30 @@ func (b *MultiMmapManager) queryByIDs(yield func(nostr.Event) bool, ids []nostr.
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (il *IndexingLayer) QueryEvents(filter nostr.Filter) iter.Seq[nostr.Event] {
|
func (il *IndexingLayer) QueryEvents(filter nostr.Filter, maxLimit int) iter.Seq[nostr.Event] {
|
||||||
return func(yield func(nostr.Event) bool) {
|
return func(yield func(nostr.Event) bool) {
|
||||||
if len(filter.IDs) > 0 {
|
if len(filter.IDs) > 0 {
|
||||||
il.mmmm.queryByIDs(yield, filter.IDs, nil)
|
il.mmmm.queryByIDs(yield, filter.IDs, nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if filter.Search != "" {
|
if filter.Search != "" {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// max number of events we'll return
|
// max number of events we'll return
|
||||||
limit := il.MaxLimit / 4
|
if tlimit := filter.GetTheoreticalLimit(); tlimit == 0 || filter.LimitZero {
|
||||||
if filter.Limit > 0 && filter.Limit < il.MaxLimit {
|
|
||||||
limit = filter.Limit
|
|
||||||
}
|
|
||||||
if tlimit := nostr.GetTheoreticalLimit(filter); tlimit == 0 {
|
|
||||||
return
|
return
|
||||||
} else if tlimit > 0 {
|
} else if tlimit < maxLimit {
|
||||||
limit = tlimit
|
maxLimit = tlimit
|
||||||
|
}
|
||||||
|
if filter.Limit < maxLimit {
|
||||||
|
maxLimit = filter.Limit
|
||||||
}
|
}
|
||||||
|
|
||||||
il.lmdbEnv.View(func(txn *lmdb.Txn) error {
|
il.lmdbEnv.View(func(txn *lmdb.Txn) error {
|
||||||
txn.RawRead = true
|
txn.RawRead = true
|
||||||
|
|
||||||
results, err := il.query(txn, filter, limit)
|
results, err := il.query(txn, filter, filter.Limit)
|
||||||
|
|
||||||
for _, ie := range results {
|
for _, ie := range results {
|
||||||
if !yield(ie.Event) {
|
if !yield(ie.Event) {
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ func (b NullStore) DeleteEvent(id nostr.ID) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b NullStore) QueryEvents(filter nostr.Filter) iter.Seq[nostr.Event] {
|
func (b NullStore) QueryEvents(filter nostr.Filter, maxLimit int) iter.Seq[nostr.Event] {
|
||||||
return func(yield func(nostr.Event) bool) {}
|
return func(yield func(nostr.Event) bool) {}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -18,24 +18,19 @@ var _ eventstore.Store = (*SliceStore)(nil)
|
|||||||
type SliceStore struct {
|
type SliceStore struct {
|
||||||
sync.Mutex
|
sync.Mutex
|
||||||
internal []nostr.Event
|
internal []nostr.Event
|
||||||
|
|
||||||
MaxLimit int
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *SliceStore) Init() error {
|
func (b *SliceStore) Init() error {
|
||||||
b.internal = make([]nostr.Event, 0, 5000)
|
b.internal = make([]nostr.Event, 0, 5000)
|
||||||
if b.MaxLimit == 0 {
|
|
||||||
b.MaxLimit = 500
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *SliceStore) Close() {}
|
func (b *SliceStore) Close() {}
|
||||||
|
|
||||||
func (b *SliceStore) QueryEvents(filter nostr.Filter) iter.Seq[nostr.Event] {
|
func (b *SliceStore) QueryEvents(filter nostr.Filter, maxLimit int) iter.Seq[nostr.Event] {
|
||||||
return func(yield func(nostr.Event) bool) {
|
return func(yield func(nostr.Event) bool) {
|
||||||
if filter.Limit > b.MaxLimit || (filter.Limit == 0 && !filter.LimitZero) {
|
if filter.Limit > maxLimit || (filter.Limit == 0 && !filter.LimitZero) {
|
||||||
filter.Limit = b.MaxLimit
|
filter.Limit = maxLimit
|
||||||
}
|
}
|
||||||
|
|
||||||
// efficiently determine where to start and end
|
// efficiently determine where to start and end
|
||||||
@@ -136,7 +131,7 @@ func (b *SliceStore) ReplaceEvent(evt nostr.Event) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
shouldStore := true
|
shouldStore := true
|
||||||
for previous := range b.QueryEvents(filter) {
|
for previous := range b.QueryEvents(filter, 1) {
|
||||||
if internal.IsOlder(previous, evt) {
|
if internal.IsOlder(previous, evt) {
|
||||||
if err := b.delete(previous.ID); err != nil {
|
if err := b.delete(previous.ID); err != nil {
|
||||||
return fmt.Errorf("failed to delete event for replacing: %w", err)
|
return fmt.Errorf("failed to delete event for replacing: %w", err)
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ type Store interface {
|
|||||||
Close()
|
Close()
|
||||||
|
|
||||||
// QueryEvents returns events that match the filter
|
// QueryEvents returns events that match the filter
|
||||||
QueryEvents(nostr.Filter) iter.Seq[nostr.Event]
|
QueryEvents(filter nostr.Filter, maxLimit int) iter.Seq[nostr.Event]
|
||||||
|
|
||||||
// DeleteEvent deletes an event atomically by ID
|
// DeleteEvent deletes an event atomically by ID
|
||||||
DeleteEvent(nostr.ID) error
|
DeleteEvent(nostr.ID) error
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package wrappers
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"iter"
|
||||||
|
|
||||||
"fiatjaf.com/nostr"
|
"fiatjaf.com/nostr"
|
||||||
"fiatjaf.com/nostr/eventstore"
|
"fiatjaf.com/nostr/eventstore"
|
||||||
@@ -12,6 +13,11 @@ var _ nostr.Publisher = StorePublisher{}
|
|||||||
|
|
||||||
type StorePublisher struct {
|
type StorePublisher struct {
|
||||||
eventstore.Store
|
eventstore.Store
|
||||||
|
MaxLimit int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w StorePublisher) QueryEvents(filter nostr.Filter) iter.Seq[nostr.Event] {
|
||||||
|
return w.Store.QueryEvents(filter, w.MaxLimit)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w StorePublisher) Publish(ctx context.Context, evt nostr.Event) error {
|
func (w StorePublisher) Publish(ctx context.Context, evt nostr.Event) error {
|
||||||
|
|||||||
15
filter.go
15
filter.go
@@ -1,6 +1,7 @@
|
|||||||
package nostr
|
package nostr
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"math"
|
||||||
"slices"
|
"slices"
|
||||||
|
|
||||||
"github.com/mailru/easyjson"
|
"github.com/mailru/easyjson"
|
||||||
@@ -148,19 +149,19 @@ func (ef Filter) Clone() Filter {
|
|||||||
// GetTheoreticalLimit gets the maximum number of events that a normal filter would ever return, for example, if
|
// GetTheoreticalLimit gets the maximum number of events that a normal filter would ever return, for example, if
|
||||||
// there is a number of "ids" in the filter, the theoretical limit will be that number of ids.
|
// there is a number of "ids" in the filter, the theoretical limit will be that number of ids.
|
||||||
//
|
//
|
||||||
// It returns -1 if there are no theoretical limits.
|
// It returns math.MaxInt if there are no theoretical limits.
|
||||||
//
|
//
|
||||||
// The given .Limit present in the filter is ignored.
|
// The given .Limit present in the filter is ignored.
|
||||||
func GetTheoreticalLimit(filter Filter) int {
|
func (filter Filter) GetTheoreticalLimit() int {
|
||||||
if len(filter.IDs) > 0 {
|
if filter.IDs != nil {
|
||||||
return len(filter.IDs)
|
return len(filter.IDs)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(filter.Kinds) == 0 {
|
if filter.Kinds != nil {
|
||||||
return -1
|
return math.MaxInt
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(filter.Authors) > 0 {
|
if filter.Authors != nil {
|
||||||
allAreReplaceable := true
|
allAreReplaceable := true
|
||||||
for _, kind := range filter.Kinds {
|
for _, kind := range filter.Kinds {
|
||||||
if !kind.IsReplaceable() {
|
if !kind.IsReplaceable() {
|
||||||
@@ -186,5 +187,5 @@ func GetTheoreticalLimit(filter Filter) int {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return -1
|
return math.MaxInt
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -108,15 +108,15 @@ func main() {
|
|||||||
|
|
||||||
### But I don't want to write my own database!
|
### But I don't want to write my own database!
|
||||||
|
|
||||||
Fear no more. Using the https://fiatjaf.com/nostr/eventstore module you get a bunch of compatible databases out of the box and you can just plug them into your relay. For example, [sqlite](https://pkg.go.dev/fiatjaf.com/nostr/eventstore/sqlite3):
|
Fear no more. Using the https://fiatjaf.com/nostr/eventstore module you get a bunch of compatible databases out of the box and you can just plug them into your relay. For example, [sqlite](https://pkg.go.dev/fiatjaf.com/nostr/eventstore/lmdb):
|
||||||
|
|
||||||
```go
|
```go
|
||||||
db := sqlite3.SQLite3Backend{DatabaseURL: "/tmp/khatru-sqlite-tmp"}
|
db := lmdb.LMDBackend{Path: "/tmp/khatru-lmdb-tmp"}
|
||||||
if err := db.Init(); err != nil {
|
if err := db.Init(); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
relay.UseEventstore(db)
|
relay.UseEventstore(db, 500)
|
||||||
```
|
```
|
||||||
|
|
||||||
### But I don't want to write a bunch of custom policies!
|
### But I don't want to write a bunch of custom policies!
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ type EventStoreBlobIndexWrapper struct {
|
|||||||
|
|
||||||
func (es EventStoreBlobIndexWrapper) Keep(ctx context.Context, blob BlobDescriptor, pubkey nostr.PubKey) error {
|
func (es EventStoreBlobIndexWrapper) Keep(ctx context.Context, blob BlobDescriptor, pubkey nostr.PubKey) error {
|
||||||
next, stop := iter.Pull(
|
next, stop := iter.Pull(
|
||||||
es.Store.QueryEvents(nostr.Filter{Authors: []nostr.PubKey{pubkey}, Kinds: []nostr.Kind{24242}, Tags: nostr.TagMap{"x": []string{blob.SHA256}}}),
|
es.Store.QueryEvents(nostr.Filter{Authors: []nostr.PubKey{pubkey}, Kinds: []nostr.Kind{24242}, Tags: nostr.TagMap{"x": []string{blob.SHA256}}}, 1),
|
||||||
)
|
)
|
||||||
defer stop()
|
defer stop()
|
||||||
|
|
||||||
@@ -46,7 +46,7 @@ func (es EventStoreBlobIndexWrapper) List(ctx context.Context, pubkey nostr.PubK
|
|||||||
for evt := range es.Store.QueryEvents(nostr.Filter{
|
for evt := range es.Store.QueryEvents(nostr.Filter{
|
||||||
Authors: []nostr.PubKey{pubkey},
|
Authors: []nostr.PubKey{pubkey},
|
||||||
Kinds: []nostr.Kind{24242},
|
Kinds: []nostr.Kind{24242},
|
||||||
}) {
|
}, 1000) {
|
||||||
yield(es.parseEvent(evt))
|
yield(es.parseEvent(evt))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -54,7 +54,7 @@ func (es EventStoreBlobIndexWrapper) List(ctx context.Context, pubkey nostr.PubK
|
|||||||
|
|
||||||
func (es EventStoreBlobIndexWrapper) Get(ctx context.Context, sha256 string) (*BlobDescriptor, error) {
|
func (es EventStoreBlobIndexWrapper) Get(ctx context.Context, sha256 string) (*BlobDescriptor, error) {
|
||||||
next, stop := iter.Pull(
|
next, stop := iter.Pull(
|
||||||
es.Store.QueryEvents(nostr.Filter{Tags: nostr.TagMap{"x": []string{sha256}}, Kinds: []nostr.Kind{24242}, Limit: 1}),
|
es.Store.QueryEvents(nostr.Filter{Tags: nostr.TagMap{"x": []string{sha256}}, Kinds: []nostr.Kind{24242}, Limit: 1}, 1),
|
||||||
)
|
)
|
||||||
|
|
||||||
defer stop()
|
defer stop()
|
||||||
@@ -74,8 +74,7 @@ func (es EventStoreBlobIndexWrapper) Delete(ctx context.Context, sha256 string,
|
|||||||
Tags: nostr.TagMap{"x": []string{sha256}},
|
Tags: nostr.TagMap{"x": []string{sha256}},
|
||||||
Kinds: []nostr.Kind{24242},
|
Kinds: []nostr.Kind{24242},
|
||||||
Limit: 1,
|
Limit: 1,
|
||||||
},
|
}, 1),
|
||||||
),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
defer stop()
|
defer stop()
|
||||||
|
|||||||
@@ -34,18 +34,3 @@ func main () {
|
|||||||
```
|
```
|
||||||
|
|
||||||
Note that in this case we're using the [LMDB](https://pkg.go.dev/fiatjaf.com/nostr/eventstore/lmdb) adapter for normal queries and it explicitly rejects any filter that contains a `Search` field, while [Bluge](https://pkg.go.dev/fiatjaf.com/nostr/eventstore/bluge) rejects any filter _without_ a `Search` value, which make them pair well together.
|
Note that in this case we're using the [LMDB](https://pkg.go.dev/fiatjaf.com/nostr/eventstore/lmdb) adapter for normal queries and it explicitly rejects any filter that contains a `Search` field, while [Bluge](https://pkg.go.dev/fiatjaf.com/nostr/eventstore/bluge) rejects any filter _without_ a `Search` value, which make them pair well together.
|
||||||
|
|
||||||
Other adapters, like [SQLite](https://pkg.go.dev/fiatjaf.com/nostr/eventstore/sqlite3), implement search functionality on their own, so if you don't want to use that you would have to have a middleware between, like:
|
|
||||||
|
|
||||||
```go
|
|
||||||
relay.StoreEvent = policies.SeqStore(db.SaveEvent, search.SaveEvent)
|
|
||||||
relay.QueryStored = func (ctx context.Context, filter nostr.Filter) iter.Seq[nostr.Event] {
|
|
||||||
if len(filter.Search) > 0 {
|
|
||||||
return search.QueryEvents(ctx, filter)
|
|
||||||
} else {
|
|
||||||
filterNoSearch := filter
|
|
||||||
filterNoSearch.Search = ""
|
|
||||||
return normal.QueryEvents(ctx, filterNoSearch)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ func main() {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
relay.UseEventstore(db)
|
relay.UseEventstore(db, 500)
|
||||||
|
|
||||||
fmt.Println("running on :3334")
|
fmt.Println("running on :3334")
|
||||||
http.ListenAndServe(":3334", relay)
|
http.ListenAndServe(":3334", relay)
|
||||||
@@ -44,10 +44,6 @@ func main() {
|
|||||||
|
|
||||||
[LMDB](https://pkg.go.dev/fiatjaf.com/nostr/eventstore/lmdb) works the same way.
|
[LMDB](https://pkg.go.dev/fiatjaf.com/nostr/eventstore/lmdb) works the same way.
|
||||||
|
|
||||||
[SQLite](https://pkg.go.dev/fiatjaf.com/nostr/eventstore/sqlite3) also stores things locally so it only needs a `Path`.
|
|
||||||
|
|
||||||
[PostgreSQL](https://pkg.go.dev/fiatjaf.com/nostr/eventstore/postgresql) and [MySQL](https://pkg.go.dev/fiatjaf.com/nostr/eventstore/mysql) use remote connections to database servers, so they take a `DatabaseURL` parameter, but after that it's the same.
|
|
||||||
|
|
||||||
## Using two at a time
|
## Using two at a time
|
||||||
|
|
||||||
If you want to use two different adapters at the same time that's easy. Just use the `policies.Seq*` functions:
|
If you want to use two different adapters at the same time that's easy. Just use the `policies.Seq*` functions:
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ groupsRelay, _ := khatru29.Init(relay29.Options{Domain: "example.com", DB: group
|
|||||||
publicStore := slicestore.SliceStore{}
|
publicStore := slicestore.SliceStore{}
|
||||||
publicStore.Init()
|
publicStore.Init()
|
||||||
publicRelay := khatru.NewRelay()
|
publicRelay := khatru.NewRelay()
|
||||||
publicRelay.UseEventStore(publicStore)
|
publicRelay.UseEventStore(publicStore, 1000)
|
||||||
// ...
|
// ...
|
||||||
|
|
||||||
// a higher-level relay that just routes between the two above
|
// a higher-level relay that just routes between the two above
|
||||||
|
|||||||
@@ -31,15 +31,15 @@ relay.Info.Description = "this is my custom relay"
|
|||||||
relay.Info.Icon = "https://external-content.duckduckgo.com/iu/?u=https%3A%2F%2Fliquipedia.net%2Fcommons%2Fimages%2F3%2F35%2FSCProbe.jpg&f=1&nofb=1&ipt=0cbbfef25bce41da63d910e86c3c343e6c3b9d63194ca9755351bb7c2efa3359&ipo=images"
|
relay.Info.Icon = "https://external-content.duckduckgo.com/iu/?u=https%3A%2F%2Fliquipedia.net%2Fcommons%2Fimages%2F3%2F35%2FSCProbe.jpg&f=1&nofb=1&ipt=0cbbfef25bce41da63d910e86c3c343e6c3b9d63194ca9755351bb7c2efa3359&ipo=images"
|
||||||
```
|
```
|
||||||
|
|
||||||
Now we must set up the basic functions for accepting events and answering queries. We could make our own querying engine from scratch, but we can also use [eventstore](https://fiatjaf.com/nostr/eventstore). In this example we'll use the SQLite adapter:
|
Now we must set up the basic functions for accepting events and answering queries. We could make our own querying engine from scratch, but we can also use [eventstore](https://pkg.go.dev/fiatjaf.com/nostr/eventstore). In this example we'll use the Badger adapter:
|
||||||
|
|
||||||
```go
|
```go
|
||||||
db := sqlite3.SQLite3Backend{DatabaseURL: "/tmp/khatru-sqlite-tmp"}
|
db := badger.BadgerBackend{Path: "/tmp/khatru-badger-tmp"}
|
||||||
if err := db.Init(); err != nil {
|
if err := db.Init(); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
relay.UseEventstore(db)
|
relay.UseEventstore(db, 500)
|
||||||
```
|
```
|
||||||
|
|
||||||
These are lists of functions that will be called in order every time an `EVENT` is received, or a `REQ` query is received. You can add more than one handler there, you can have a function that reads from some other server, but just in some cases, you can do anything.
|
These are lists of functions that will be called in order every time an `EVENT` is received, or a `REQ` query is received. You can add more than one handler there, you can have a function that reads from some other server, but just in some cases, you can do anything.
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ features:
|
|||||||
- title: It plugs into event stores easily
|
- title: It plugs into event stores easily
|
||||||
icon: 📦
|
icon: 📦
|
||||||
link: /core/eventstore
|
link: /core/eventstore
|
||||||
details: khatru's companion, the `eventstore` library, provides all methods for storing and querying events efficiently from SQLite, LMDB, Postgres, Badger and others.
|
details: khatru's companion, the `eventstore` library, provides all methods for storing and querying events efficiently from LMDB, Badger and others.
|
||||||
- title: It supports NIP-42 AUTH
|
- title: It supports NIP-42 AUTH
|
||||||
icon: 🪪
|
icon: 🪪
|
||||||
link: /core/auth
|
link: /core/auth
|
||||||
@@ -48,7 +48,7 @@ func main() {
|
|||||||
relay := khatru.NewRelay()
|
relay := khatru.NewRelay()
|
||||||
db := badger.BadgerBackend{Path: "/tmp/khatru-badgern-tmp"}
|
db := badger.BadgerBackend{Path: "/tmp/khatru-badgern-tmp"}
|
||||||
db.Init()
|
db.Init()
|
||||||
relay.UseEventStore(db)
|
relay.UseEventStore(db, 400)
|
||||||
http.ListenAndServe(":3334", relay)
|
http.ListenAndServe(":3334", relay)
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ func main() {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
relay.UseEventstore(db)
|
relay.UseEventstore(db, 400)
|
||||||
|
|
||||||
relay.Negentropy = true
|
relay.Negentropy = true
|
||||||
|
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ func main() {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
relay.UseEventstore(db)
|
relay.UseEventstore(db, 400)
|
||||||
|
|
||||||
fmt.Println("running on :3334")
|
fmt.Println("running on :3334")
|
||||||
http.ListenAndServe(":3334", relay)
|
http.ListenAndServe(":3334", relay)
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ func main() {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
relay.UseEventstore(db)
|
relay.UseEventstore(db, 400)
|
||||||
|
|
||||||
bdb := &badger.BadgerBackend{Path: "/tmp/khatru-badger-blossom-tmp"}
|
bdb := &badger.BadgerBackend{Path: "/tmp/khatru-badger-blossom-tmp"}
|
||||||
if err := bdb.Init(); err != nil {
|
if err := bdb.Init(); err != nil {
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ func main() {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
relay.UseEventstore(db)
|
relay.UseEventstore(db, 400)
|
||||||
|
|
||||||
relay.OnEvent = policies.PreventTooManyIndexableTags(10, nil, nil)
|
relay.OnEvent = policies.PreventTooManyIndexableTags(10, nil, nil)
|
||||||
relay.OnRequest = policies.NoComplexFilters
|
relay.OnRequest = policies.NoComplexFilters
|
||||||
|
|||||||
@@ -15,17 +15,17 @@ func main() {
|
|||||||
db1 := &slicestore.SliceStore{}
|
db1 := &slicestore.SliceStore{}
|
||||||
db1.Init()
|
db1.Init()
|
||||||
r1 := khatru.NewRelay()
|
r1 := khatru.NewRelay()
|
||||||
r1.UseEventstore(db1)
|
r1.UseEventstore(db1, 400)
|
||||||
|
|
||||||
db2 := &badger.BadgerBackend{Path: "/tmp/t"}
|
db2 := &badger.BadgerBackend{Path: "/tmp/t"}
|
||||||
db2.Init()
|
db2.Init()
|
||||||
r2 := khatru.NewRelay()
|
r2 := khatru.NewRelay()
|
||||||
r2.UseEventstore(db2)
|
r2.UseEventstore(db2, 400)
|
||||||
|
|
||||||
db3 := &slicestore.SliceStore{}
|
db3 := &slicestore.SliceStore{}
|
||||||
db3.Init()
|
db3.Init()
|
||||||
r3 := khatru.NewRelay()
|
r3 := khatru.NewRelay()
|
||||||
r3.UseEventstore(db3)
|
r3.UseEventstore(db3, 400)
|
||||||
|
|
||||||
router := khatru.NewRouter()
|
router := khatru.NewRouter()
|
||||||
|
|
||||||
|
|||||||
@@ -117,9 +117,14 @@ type Relay struct {
|
|||||||
expirationManager *expirationManager
|
expirationManager *expirationManager
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rl *Relay) UseEventstore(store eventstore.Store) {
|
// UseEventstore hooks up an eventstore.Store into the relay in the default way.
|
||||||
|
// It should be used in 85% of the cases, when you don't want to do any complicated scheme with your event storage.
|
||||||
|
//
|
||||||
|
// maxQueryLimit is the default max limit to be enforced when querying events, to prevent users for downloading way
|
||||||
|
// too much, setting it to something like 500 or 1000 should be ok in most cases.
|
||||||
|
func (rl *Relay) UseEventstore(store eventstore.Store, maxQueryLimit int) {
|
||||||
rl.QueryStored = func(ctx context.Context, filter nostr.Filter) iter.Seq[nostr.Event] {
|
rl.QueryStored = func(ctx context.Context, filter nostr.Filter) iter.Seq[nostr.Event] {
|
||||||
return store.QueryEvents(filter)
|
return store.QueryEvents(filter, maxQueryLimit)
|
||||||
}
|
}
|
||||||
rl.Count = func(ctx context.Context, filter nostr.Filter) (uint32, error) {
|
rl.Count = func(ctx context.Context, filter nostr.Filter) (uint32, error) {
|
||||||
return store.CountEvents(filter)
|
return store.CountEvents(filter)
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ func FuzzReplaceableEvents(f *testing.F) {
|
|||||||
relay := NewRelay()
|
relay := NewRelay()
|
||||||
store := &lmdb.LMDBBackend{Path: "/tmp/fuzz"}
|
store := &lmdb.LMDBBackend{Path: "/tmp/fuzz"}
|
||||||
store.Init()
|
store.Init()
|
||||||
relay.UseEventstore(store)
|
relay.UseEventstore(store, 4000)
|
||||||
|
|
||||||
defer store.Close()
|
defer store.Close()
|
||||||
|
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ func TestBasicRelayFunctionality(t *testing.T) {
|
|||||||
store := &slicestore.SliceStore{}
|
store := &slicestore.SliceStore{}
|
||||||
store.Init()
|
store.Init()
|
||||||
|
|
||||||
relay.UseEventstore(store)
|
relay.UseEventstore(store, 400)
|
||||||
|
|
||||||
// start test server
|
// start test server
|
||||||
server := httptest.NewServer(relay)
|
server := httptest.NewServer(relay)
|
||||||
@@ -239,7 +239,7 @@ func TestBasicRelayFunctionality(t *testing.T) {
|
|||||||
relay.expirationManager.interval = 3 * time.Second // check every 3 seconds
|
relay.expirationManager.interval = 3 * time.Second // check every 3 seconds
|
||||||
store := &slicestore.SliceStore{}
|
store := &slicestore.SliceStore{}
|
||||||
store.Init()
|
store.Init()
|
||||||
relay.UseEventstore(store)
|
relay.UseEventstore(store, 400)
|
||||||
|
|
||||||
// start test server
|
// start test server
|
||||||
server := httptest.NewServer(relay)
|
server := httptest.NewServer(relay)
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package main
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"slices"
|
"slices"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -18,7 +19,7 @@ func main() {
|
|||||||
db.Init()
|
db.Init()
|
||||||
|
|
||||||
sk := nostr.Generate()
|
sk := nostr.Generate()
|
||||||
local := wrappers.StorePublisher{Store: db}
|
local := wrappers.StorePublisher{Store: db, MaxLimit: math.MaxInt}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
for i := 0; i < 20; i++ {
|
for i := 0; i < 20; i++ {
|
||||||
|
|||||||
@@ -124,19 +124,18 @@ func (sys *System) FetchFeedPage(
|
|||||||
}
|
}
|
||||||
|
|
||||||
filter := nostr.Filter{Authors: []nostr.PubKey{pubkey}, Kinds: kinds}
|
filter := nostr.Filter{Authors: []nostr.PubKey{pubkey}, Kinds: kinds}
|
||||||
|
|
||||||
if until > oldestTimestamp {
|
if until > oldestTimestamp {
|
||||||
// we can use our local database
|
// we can use our local database
|
||||||
filter.Until = until
|
filter.Until = until
|
||||||
|
|
||||||
count := 0
|
count := 0
|
||||||
for evt := range sys.Store.QueryEvents(filter) {
|
for evt := range sys.Store.QueryEvents(filter, limitPerKey) {
|
||||||
events = append(events, evt)
|
events = append(events, evt)
|
||||||
count++
|
count++
|
||||||
if count >= limitPerKey {
|
if count >= limitPerKey {
|
||||||
// we got enough from the local store
|
// we got enough from the local store
|
||||||
wg.Done()
|
wg.Done()
|
||||||
continue
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ func TestStreamLiveFeed(t *testing.T) {
|
|||||||
for _, r := range []*khatru.Relay{relay1, relay2, relay3} {
|
for _, r := range []*khatru.Relay{relay1, relay2, relay3} {
|
||||||
db := &slicestore.SliceStore{}
|
db := &slicestore.SliceStore{}
|
||||||
db.Init()
|
db.Init()
|
||||||
r.UseEventstore(db)
|
r.UseEventstore(db, 4000)
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -57,7 +57,10 @@ func fetchGenericList[V comparable, I TagItemWithValue[V]](
|
|||||||
|
|
||||||
v := GenericList[V, I]{PubKey: pubkey}
|
v := GenericList[V, I]{PubKey: pubkey}
|
||||||
|
|
||||||
for evt := range sys.Store.QueryEvents(nostr.Filter{Kinds: []nostr.Kind{actualKind}, Authors: []nostr.PubKey{pubkey}}) {
|
for evt := range sys.Store.QueryEvents(nostr.Filter{
|
||||||
|
Kinds: []nostr.Kind{actualKind},
|
||||||
|
Authors: []nostr.PubKey{pubkey},
|
||||||
|
}, 1) {
|
||||||
// ok, we found something locally
|
// ok, we found something locally
|
||||||
items := parseItemsFromEventTags(evt, parseTag)
|
items := parseItemsFromEventTags(evt, parseTag)
|
||||||
v.Event = &evt
|
v.Event = &evt
|
||||||
|
|||||||
@@ -111,7 +111,7 @@ func (sys *System) FetchProfileMetadata(ctx context.Context, pubkey nostr.PubKey
|
|||||||
|
|
||||||
pm.PubKey = pubkey
|
pm.PubKey = pubkey
|
||||||
|
|
||||||
for evt := range sys.Store.QueryEvents(nostr.Filter{Kinds: []nostr.Kind{0}, Authors: []nostr.PubKey{pubkey}}) {
|
for evt := range sys.Store.QueryEvents(nostr.Filter{Kinds: []nostr.Kind{0}, Authors: []nostr.PubKey{pubkey}}, 1) {
|
||||||
// ok, we found something locally
|
// ok, we found something locally
|
||||||
pm, _ = ParseMetadata(evt)
|
pm, _ = ParseMetadata(evt)
|
||||||
pm.PubKey = pubkey
|
pm.PubKey = pubkey
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ func fetchGenericSets[V comparable, I TagItemWithValue[V]](
|
|||||||
v := GenericSets[V, I]{PubKey: pubkey}
|
v := GenericSets[V, I]{PubKey: pubkey}
|
||||||
|
|
||||||
events := slices.Collect(
|
events := slices.Collect(
|
||||||
sys.Store.QueryEvents(nostr.Filter{Kinds: []nostr.Kind{actualKind}, Authors: []nostr.PubKey{pubkey}}),
|
sys.Store.QueryEvents(nostr.Filter{Kinds: []nostr.Kind{actualKind}, Authors: []nostr.PubKey{pubkey}}, 100),
|
||||||
)
|
)
|
||||||
if len(events) != 0 {
|
if len(events) != 0 {
|
||||||
// ok, we found something locally
|
// ok, we found something locally
|
||||||
|
|||||||
@@ -95,7 +95,7 @@ func (sys *System) FetchSpecificEvent(
|
|||||||
|
|
||||||
// try to fetch in our internal eventstore first
|
// try to fetch in our internal eventstore first
|
||||||
if !params.SkipLocalStore {
|
if !params.SkipLocalStore {
|
||||||
for evt := range sys.Store.QueryEvents(filter) {
|
for evt := range sys.Store.QueryEvents(filter, 1) {
|
||||||
return &evt, nil, nil
|
return &evt, nil, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -136,7 +136,7 @@ func NewSystem() *System {
|
|||||||
sys.Store = &nullstore.NullStore{}
|
sys.Store = &nullstore.NullStore{}
|
||||||
sys.Store.Init()
|
sys.Store.Init()
|
||||||
}
|
}
|
||||||
sys.Publisher = wrappers.StorePublisher{Store: sys.Store}
|
sys.Publisher = wrappers.StorePublisher{Store: sys.Store, MaxLimit: 1000}
|
||||||
|
|
||||||
sys.initializeReplaceableDataloaders()
|
sys.initializeReplaceableDataloaders()
|
||||||
sys.initializeAddressableDataloaders()
|
sys.initializeAddressableDataloaders()
|
||||||
|
|||||||
Reference in New Issue
Block a user