diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml deleted file mode 100644 index 9877a4c..0000000 --- a/.github/workflows/test.yml +++ /dev/null @@ -1,21 +0,0 @@ -name: test every commit -on: - - push - - pull_request - -jobs: - test: - # temporary using newest ubuntu instead of ubuntu-latest since - # libsecp256k1-dev does not have secp256k1_schnorrsig_sign32 in jammy - runs-on: ubuntu-24.04 - steps: - - uses: actions/checkout@v4 - - - name: Install libsecp256k1-dev - run: sudo apt-get install libsecp256k1-dev - - - uses: actions/setup-go@v5 - with: - go-version-file: ./go.mod - - - run: go test ./... -tags=sonic diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..fdddb29 --- /dev/null +++ b/LICENSE @@ -0,0 +1,24 @@ +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff --git a/LICENSE.md b/LICENSE.md deleted file mode 100644 index ccd11e0..0000000 --- a/LICENSE.md +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2022 nbd - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/eventstore/.gitignore b/eventstore/.gitignore new file mode 100644 index 0000000..182da1e --- /dev/null +++ b/eventstore/.gitignore @@ -0,0 +1 @@ +knowledge.md diff --git a/eventstore/README.md b/eventstore/README.md new file mode 100644 index 0000000..842f48e --- /dev/null +++ b/eventstore/README.md @@ -0,0 +1,31 @@ +# eventstore + +A collection of reusable database connectors, wrappers and schemas that store Nostr events and expose a simple Go interface: + +```go +type Store interface { + // Init is called at the very beginning by [Server.Start], after [Relay.Init], + // allowing a storage to initialize its internal resources. + Init() error + + // Close must be called after you're done using the store, to free up resources and so on. + Close() + + // QueryEvents is invoked upon a client's REQ as described in NIP-01. + // it should return a channel with the events as they're recovered from a database. + // the channel should be closed after the events are all delivered. + QueryEvents(context.Context, nostr.Filter) (chan *nostr.Event, error) + + // DeleteEvent is used to handle deletion events, as per NIP-09. + DeleteEvent(context.Context, *nostr.Event) error + + // SaveEvent is called once Relay.AcceptEvent reports true. + SaveEvent(context.Context, *nostr.Event) error +} +``` + +[![Go Reference](https://pkg.go.dev/badge/github.com/fiatjaf/eventstore.svg)](https://pkg.go.dev/github.com/fiatjaf/eventstore) [![Run Tests](https://github.com/fiatjaf/eventstore/actions/workflows/test.yml/badge.svg)](https://github.com/fiatjaf/eventstore/actions/workflows/test.yml) + +## command-line tool + +There is an [`eventstore` command-line tool](cmd/eventstore) that can be used to query these databases directly. diff --git a/eventstore/badger/count.go b/eventstore/badger/count.go new file mode 100644 index 0000000..77af9e6 --- /dev/null +++ b/eventstore/badger/count.go @@ -0,0 +1,168 @@ +package badger + +import ( + "context" + "encoding/binary" + "log" + + "github.com/dgraph-io/badger/v4" + bin "github.com/fiatjaf/eventstore/internal/binary" + "github.com/nbd-wtf/go-nostr" + "github.com/nbd-wtf/go-nostr/nip45/hyperloglog" +) + +func (b *BadgerBackend) CountEvents(ctx context.Context, filter nostr.Filter) (int64, error) { + var count int64 = 0 + + queries, extraFilter, since, err := prepareQueries(filter) + if err != nil { + return 0, err + } + + err = b.View(func(txn *badger.Txn) error { + // iterate only through keys and in reverse order + opts := badger.IteratorOptions{ + Reverse: true, + } + + // actually iterate + for _, q := range queries { + it := txn.NewIterator(opts) + defer it.Close() + + for it.Seek(q.startingPoint); it.ValidForPrefix(q.prefix); it.Next() { + item := it.Item() + key := item.Key() + + idxOffset := len(key) - 4 // this is where the idx actually starts + + // "id" indexes don't contain a timestamp + if !q.skipTimestamp { + createdAt := binary.BigEndian.Uint32(key[idxOffset-4 : idxOffset]) + if createdAt < since { + break + } + } + + idx := make([]byte, 5) + idx[0] = rawEventStorePrefix + copy(idx[1:], key[idxOffset:]) + + if extraFilter == nil { + count++ + } else { + // fetch actual event + item, err := txn.Get(idx) + if err != nil { + if err == badger.ErrDiscardedTxn { + return err + } + log.Printf("badger: count (%v) failed to get %d from raw event store: %s\n", q, idx, err) + return err + } + + err = item.Value(func(val []byte) error { + evt := &nostr.Event{} + if err := bin.Unmarshal(val, evt); err != nil { + return err + } + + // check if this matches the other filters that were not part of the index + if extraFilter.Matches(evt) { + count++ + } + + return nil + }) + if err != nil { + log.Printf("badger: count value read error: %s\n", err) + } + } + } + } + + return nil + }) + + return count, err +} + +func (b *BadgerBackend) CountEventsHLL(ctx context.Context, filter nostr.Filter, offset int) (int64, *hyperloglog.HyperLogLog, error) { + var count int64 = 0 + + queries, extraFilter, since, err := prepareQueries(filter) + if err != nil { + return 0, nil, err + } + + hll := hyperloglog.New(offset) + + err = b.View(func(txn *badger.Txn) error { + // iterate only through keys and in reverse order + opts := badger.IteratorOptions{ + Reverse: true, + } + + // actually iterate + for _, q := range queries { + it := txn.NewIterator(opts) + defer it.Close() + + for it.Seek(q.startingPoint); it.ValidForPrefix(q.prefix); it.Next() { + item := it.Item() + key := item.Key() + + idxOffset := len(key) - 4 // this is where the idx actually starts + + // "id" indexes don't contain a timestamp + if !q.skipTimestamp { + createdAt := binary.BigEndian.Uint32(key[idxOffset-4 : idxOffset]) + if createdAt < since { + break + } + } + + idx := make([]byte, 5) + idx[0] = rawEventStorePrefix + copy(idx[1:], key[idxOffset:]) + + // fetch actual event + item, err := txn.Get(idx) + if err != nil { + if err == badger.ErrDiscardedTxn { + return err + } + log.Printf("badger: count (%v) failed to get %d from raw event store: %s\n", q, idx, err) + return err + } + + err = item.Value(func(val []byte) error { + if extraFilter == nil { + hll.AddBytes(val[32:64]) + count++ + return nil + } + + evt := &nostr.Event{} + if err := bin.Unmarshal(val, evt); err != nil { + return err + } + if extraFilter.Matches(evt) { + hll.Add(evt.PubKey) + count++ + return nil + } + + return nil + }) + if err != nil { + log.Printf("badger: count value read error: %s\n", err) + } + } + } + + return nil + }) + + return count, hll, err +} diff --git a/eventstore/badger/delete.go b/eventstore/badger/delete.go new file mode 100644 index 0000000..e4bd690 --- /dev/null +++ b/eventstore/badger/delete.go @@ -0,0 +1,72 @@ +package badger + +import ( + "context" + "encoding/hex" + "log" + + "github.com/dgraph-io/badger/v4" + "github.com/nbd-wtf/go-nostr" +) + +var serialDelete uint32 = 0 + +func (b *BadgerBackend) DeleteEvent(ctx context.Context, evt *nostr.Event) error { + deletionHappened := false + + err := b.Update(func(txn *badger.Txn) error { + var err error + deletionHappened, err = b.delete(txn, evt) + return err + }) + if err != nil { + return err + } + + // after deleting, run garbage collector (sometimes) + if deletionHappened { + serialDelete = (serialDelete + 1) % 256 + if serialDelete == 0 { + if err := b.RunValueLogGC(0.8); err != nil && err != badger.ErrNoRewrite { + log.Println("badger gc errored:" + err.Error()) + } + } + } + + return nil +} + +func (b *BadgerBackend) delete(txn *badger.Txn, evt *nostr.Event) (bool, error) { + idx := make([]byte, 1, 5) + idx[0] = rawEventStorePrefix + + // query event by id to get its idx + idPrefix8, _ := hex.DecodeString(evt.ID[0 : 8*2]) + prefix := make([]byte, 1+8) + prefix[0] = indexIdPrefix + copy(prefix[1:], idPrefix8) + opts := badger.IteratorOptions{ + PrefetchValues: false, + } + it := txn.NewIterator(opts) + it.Seek(prefix) + if it.ValidForPrefix(prefix) { + idx = append(idx, it.Item().Key()[1+8:]...) + } + it.Close() + + // if no idx was found, end here, this event doesn't exist + if len(idx) == 1 { + return false, nil + } + + // calculate all index keys we have for this event and delete them + for k := range b.getIndexKeysForEvent(evt, idx[1:]) { + if err := txn.Delete(k); err != nil { + return false, err + } + } + + // delete the raw event + return true, txn.Delete(idx) +} diff --git a/eventstore/badger/fuzz_test.go b/eventstore/badger/fuzz_test.go new file mode 100644 index 0000000..d03a3c4 --- /dev/null +++ b/eventstore/badger/fuzz_test.go @@ -0,0 +1,158 @@ +package badger + +import ( + "cmp" + "context" + "encoding/binary" + "encoding/hex" + "fmt" + "testing" + "time" + + "github.com/dgraph-io/badger/v4" + "github.com/fiatjaf/eventstore" + "github.com/nbd-wtf/go-nostr" + "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" +) + +func FuzzQuery(f *testing.F) { + ctx := context.Background() + + f.Add(uint(200), uint(50), uint(13), uint(2), uint(2), uint(0), uint(1)) + f.Fuzz(func(t *testing.T, total, limit, authors, timestampAuthorFactor, seedFactor, kinds, kindFactor uint) { + total++ + authors++ + seedFactor++ + kindFactor++ + if kinds == 1 { + kinds++ + } + if limit == 0 { + return + } + + // ~ setup db + + bdb, err := badger.Open(badger.DefaultOptions("").WithInMemory(true)) + if err != nil { + t.Fatalf("failed to create database: %s", err) + return + } + db := &BadgerBackend{} + db.DB = bdb + + if err := db.runMigrations(); err != nil { + t.Fatalf("error: %s", err) + return + } + + if err := db.DB.View(func(txn *badger.Txn) error { + it := txn.NewIterator(badger.IteratorOptions{ + Prefix: []byte{0}, + Reverse: true, + }) + it.Seek([]byte{1}) + if it.Valid() { + key := it.Item().Key() + idx := key[1:] + serial := binary.BigEndian.Uint32(idx) + db.serial.Store(serial) + } + it.Close() + return nil + }); err != nil { + t.Fatalf("failed to initialize serial: %s", err) + return + } + + db.MaxLimit = 500 + defer db.Close() + + // ~ start actual test + + filter := nostr.Filter{ + Authors: make([]string, authors), + Limit: int(limit), + } + maxKind := 1 + if kinds > 0 { + filter.Kinds = make([]int, kinds) + for i := range filter.Kinds { + filter.Kinds[i] = int(kindFactor) * i + } + maxKind = filter.Kinds[len(filter.Kinds)-1] + } + + for i := 0; i < int(authors); i++ { + sk := make([]byte, 32) + binary.BigEndian.PutUint32(sk, uint32(i%int(authors*seedFactor))+1) + pk, _ := nostr.GetPublicKey(hex.EncodeToString(sk)) + filter.Authors[i] = pk + } + + expected := make([]*nostr.Event, 0, total) + for i := 0; i < int(total); i++ { + skseed := uint32(i%int(authors*seedFactor)) + 1 + sk := make([]byte, 32) + binary.BigEndian.PutUint32(sk, skseed) + + evt := &nostr.Event{ + CreatedAt: nostr.Timestamp(skseed)*nostr.Timestamp(timestampAuthorFactor) + nostr.Timestamp(i), + Content: fmt.Sprintf("unbalanced %d", i), + Tags: nostr.Tags{}, + Kind: i % maxKind, + } + err := evt.Sign(hex.EncodeToString(sk)) + require.NoError(t, err) + + err = db.SaveEvent(ctx, evt) + require.NoError(t, err) + + if filter.Matches(evt) { + expected = append(expected, evt) + } + } + + slices.SortFunc(expected, nostr.CompareEventPtrReverse) + if len(expected) > int(limit) { + expected = expected[0:limit] + } + + w := eventstore.RelayWrapper{Store: db} + + start := time.Now() + // fmt.Println(filter) + res, err := w.QuerySync(ctx, filter) + end := time.Now() + + require.NoError(t, err) + require.Equal(t, len(expected), len(res), "number of results is different than expected") + + require.Less(t, end.Sub(start).Milliseconds(), int64(1500), "query took too long") + require.True(t, slices.IsSortedFunc(res, func(a, b *nostr.Event) int { return cmp.Compare(b.CreatedAt, a.CreatedAt) }), "results are not sorted") + + nresults := len(expected) + + getTimestamps := func(events []*nostr.Event) []nostr.Timestamp { + res := make([]nostr.Timestamp, len(events)) + for i, evt := range events { + res[i] = evt.CreatedAt + } + return res + } + + // fmt.Println(" expected result") + // for i := range expected { + // fmt.Println(" ", expected[i].CreatedAt, expected[i].ID[0:8], " ", res[i].CreatedAt, res[i].ID[0:8], " ", i) + // } + + require.Equal(t, expected[0].CreatedAt, res[0].CreatedAt, "first result is wrong") + require.Equal(t, expected[nresults-1].CreatedAt, res[nresults-1].CreatedAt, "last result is wrong") + require.Equal(t, getTimestamps(expected), getTimestamps(res)) + + for _, evt := range res { + require.True(t, filter.Matches(evt), "event %s doesn't match filter %s", evt, filter) + } + }) +} diff --git a/eventstore/badger/helpers.go b/eventstore/badger/helpers.go new file mode 100644 index 0000000..fd33756 --- /dev/null +++ b/eventstore/badger/helpers.go @@ -0,0 +1,162 @@ +package badger + +import ( + "encoding/binary" + "encoding/hex" + "iter" + "strconv" + "strings" + + "github.com/nbd-wtf/go-nostr" + "golang.org/x/exp/slices" +) + +func getTagIndexPrefix(tagValue string) ([]byte, int) { + var k []byte // the key with full length for created_at and idx at the end, but not filled with these + var offset int // the offset -- i.e. where the prefix ends and the created_at and idx would start + + if kind, pkb, d := getAddrTagElements(tagValue); len(pkb) == 32 { + // store value in the new special "a" tag index + k = make([]byte, 1+2+8+len(d)+4+4) + k[0] = indexTagAddrPrefix + binary.BigEndian.PutUint16(k[1:], kind) + copy(k[1+2:], pkb[0:8]) + copy(k[1+2+8:], d) + offset = 1 + 2 + 8 + len(d) + } else if vb, _ := hex.DecodeString(tagValue); len(vb) == 32 { + // store value as bytes + k = make([]byte, 1+8+4+4) + k[0] = indexTag32Prefix + copy(k[1:], vb[0:8]) + offset = 1 + 8 + } else { + // store whatever as utf-8 + k = make([]byte, 1+len(tagValue)+4+4) + k[0] = indexTagPrefix + copy(k[1:], tagValue) + offset = 1 + len(tagValue) + } + + return k, offset +} + +func (b *BadgerBackend) getIndexKeysForEvent(evt *nostr.Event, idx []byte) iter.Seq[[]byte] { + return func(yield func([]byte) bool) { + { + // ~ by id + idPrefix8, _ := hex.DecodeString(evt.ID[0 : 8*2]) + k := make([]byte, 1+8+4) + k[0] = indexIdPrefix + copy(k[1:], idPrefix8) + copy(k[1+8:], idx) + if !yield(k) { + return + } + } + + { + // ~ by pubkey+date + pubkeyPrefix8, _ := hex.DecodeString(evt.PubKey[0 : 8*2]) + k := make([]byte, 1+8+4+4) + k[0] = indexPubkeyPrefix + copy(k[1:], pubkeyPrefix8) + binary.BigEndian.PutUint32(k[1+8:], uint32(evt.CreatedAt)) + copy(k[1+8+4:], idx) + if !yield(k) { + return + } + } + + { + // ~ by kind+date + k := make([]byte, 1+2+4+4) + k[0] = indexKindPrefix + binary.BigEndian.PutUint16(k[1:], uint16(evt.Kind)) + binary.BigEndian.PutUint32(k[1+2:], uint32(evt.CreatedAt)) + copy(k[1+2+4:], idx) + if !yield(k) { + return + } + } + + { + // ~ by pubkey+kind+date + pubkeyPrefix8, _ := hex.DecodeString(evt.PubKey[0 : 8*2]) + k := make([]byte, 1+8+2+4+4) + k[0] = indexPubkeyKindPrefix + copy(k[1:], pubkeyPrefix8) + binary.BigEndian.PutUint16(k[1+8:], uint16(evt.Kind)) + binary.BigEndian.PutUint32(k[1+8+2:], uint32(evt.CreatedAt)) + copy(k[1+8+2+4:], idx) + if !yield(k) { + return + } + } + + // ~ by tagvalue+date + customIndex := b.IndexLongerTag != nil + customSkip := b.SkipIndexingTag != nil + + for i, tag := range evt.Tags { + if len(tag) < 2 || len(tag[0]) != 1 || len(tag[1]) == 0 || len(tag[1]) > 100 { + if !customIndex || !b.IndexLongerTag(evt, tag[0], tag[1]) { + // not indexable + continue + } + } + + firstIndex := slices.IndexFunc(evt.Tags, func(t nostr.Tag) bool { return len(t) >= 2 && t[1] == tag[1] }) + if firstIndex != i { + // duplicate + continue + } + + if customSkip && b.SkipIndexingTag(evt, tag[0], tag[1]) { + // purposefully skipped + continue + } + + // get key prefix (with full length) and offset where to write the last parts + k, offset := getTagIndexPrefix(tag[1]) + + // write the last parts (created_at and idx) + binary.BigEndian.PutUint32(k[offset:], uint32(evt.CreatedAt)) + copy(k[offset+4:], idx) + if !yield(k) { + return + } + } + + { + // ~ by date only + k := make([]byte, 1+4+4) + k[0] = indexCreatedAtPrefix + binary.BigEndian.PutUint32(k[1:], uint32(evt.CreatedAt)) + copy(k[1+4:], idx) + if !yield(k) { + return + } + } + } +} + +func getAddrTagElements(tagValue string) (kind uint16, pkb []byte, d string) { + spl := strings.Split(tagValue, ":") + if len(spl) == 3 { + if pkb, _ := hex.DecodeString(spl[1]); len(pkb) == 32 { + if kind, err := strconv.ParseUint(spl[0], 10, 16); err == nil { + return uint16(kind), pkb, spl[2] + } + } + } + return 0, nil, "" +} + +func filterMatchesTags(ef *nostr.Filter, event *nostr.Event) bool { + for f, v := range ef.Tags { + if v != nil && !event.Tags.ContainsAny(f, v) { + return false + } + } + return true +} diff --git a/eventstore/badger/lib.go b/eventstore/badger/lib.go new file mode 100644 index 0000000..410acfc --- /dev/null +++ b/eventstore/badger/lib.go @@ -0,0 +1,100 @@ +package badger + +import ( + "encoding/binary" + "fmt" + "sync/atomic" + + "github.com/dgraph-io/badger/v4" + "github.com/fiatjaf/eventstore" + "github.com/nbd-wtf/go-nostr" +) + +const ( + dbVersionKey byte = 255 + rawEventStorePrefix byte = 0 + indexCreatedAtPrefix byte = 1 + indexIdPrefix byte = 2 + indexKindPrefix byte = 3 + indexPubkeyPrefix byte = 4 + indexPubkeyKindPrefix byte = 5 + indexTagPrefix byte = 6 + indexTag32Prefix byte = 7 + indexTagAddrPrefix byte = 8 +) + +var _ eventstore.Store = (*BadgerBackend)(nil) + +type BadgerBackend struct { + Path string + MaxLimit int + MaxLimitNegentropy int + BadgerOptionsModifier func(badger.Options) badger.Options + + // Experimental + SkipIndexingTag func(event *nostr.Event, tagName string, tagValue string) bool + // Experimental + IndexLongerTag func(event *nostr.Event, tagName string, tagValue string) bool + + *badger.DB + + serial atomic.Uint32 +} + +func (b *BadgerBackend) Init() error { + opts := badger.DefaultOptions(b.Path) + if b.BadgerOptionsModifier != nil { + opts = b.BadgerOptionsModifier(opts) + } + + db, err := badger.Open(opts) + if err != nil { + return err + } + b.DB = db + + if err := b.runMigrations(); err != nil { + return fmt.Errorf("error running migrations: %w", err) + } + + if b.MaxLimit != 0 { + b.MaxLimitNegentropy = b.MaxLimit + } else { + b.MaxLimit = 1000 + if b.MaxLimitNegentropy == 0 { + b.MaxLimitNegentropy = 16777216 + } + } + + if err := b.DB.View(func(txn *badger.Txn) error { + it := txn.NewIterator(badger.IteratorOptions{ + Prefix: []byte{0}, + Reverse: true, + }) + it.Seek([]byte{1}) + if it.Valid() { + key := it.Item().Key() + idx := key[1:] + serial := binary.BigEndian.Uint32(idx) + b.serial.Store(serial) + } + it.Close() + return nil + }); err != nil { + return fmt.Errorf("error initializing serial: %w", err) + } + + return nil +} + +func (b *BadgerBackend) Close() { + b.DB.Close() +} + +func (b *BadgerBackend) Serial() []byte { + next := b.serial.Add(1) + vb := make([]byte, 5) + vb[0] = rawEventStorePrefix + binary.BigEndian.PutUint32(vb[1:], next) + return vb +} diff --git a/eventstore/badger/migrations.go b/eventstore/badger/migrations.go new file mode 100644 index 0000000..bdfb3a8 --- /dev/null +++ b/eventstore/badger/migrations.go @@ -0,0 +1,66 @@ +package badger + +import ( + "encoding/binary" + "fmt" + + "github.com/dgraph-io/badger/v4" +) + +func (b *BadgerBackend) runMigrations() error { + return b.Update(func(txn *badger.Txn) error { + var version uint16 + + item, err := txn.Get([]byte{dbVersionKey}) + if err == badger.ErrKeyNotFound { + version = 0 + } else if err != nil { + return err + } else { + item.Value(func(val []byte) error { + version = binary.BigEndian.Uint16(val) + return nil + }) + } + + // do the migrations in increasing steps (there is no rollback) + // + + // the 3 first migrations go to trash because on version 3 we need to export and import all the data anyway + if version < 3 { + // if there is any data in the relay we will stop and notify the user, + // otherwise we just set version to 3 and proceed + prefix := []byte{indexIdPrefix} + it := txn.NewIterator(badger.IteratorOptions{ + PrefetchValues: true, + PrefetchSize: 100, + Prefix: prefix, + }) + defer it.Close() + + hasAnyEntries := false + for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() { + hasAnyEntries = true + break + } + + if hasAnyEntries { + return fmt.Errorf("your database is at version %d, but in order to migrate up to version 3 you must manually export all the events and then import again: run an old version of this software, export the data, then delete the database files, run the new version, import the data back in.", version) + } + + b.bumpVersion(txn, 3) + } + + if version < 4 { + // ... + } + + return nil + }) +} + +func (b *BadgerBackend) bumpVersion(txn *badger.Txn, version uint16) error { + buf := make([]byte, 2) + binary.BigEndian.PutUint16(buf, version) + return txn.Set([]byte{dbVersionKey}, buf) +} diff --git a/eventstore/badger/query.go b/eventstore/badger/query.go new file mode 100644 index 0000000..cf9998b --- /dev/null +++ b/eventstore/badger/query.go @@ -0,0 +1,432 @@ +package badger + +import ( + "context" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "log" + + "github.com/dgraph-io/badger/v4" + "github.com/fiatjaf/eventstore" + "github.com/fiatjaf/eventstore/internal" + bin "github.com/fiatjaf/eventstore/internal/binary" + "github.com/nbd-wtf/go-nostr" + "golang.org/x/exp/slices" +) + +var batchFilled = errors.New("batch-filled") + +func (b *BadgerBackend) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) { + ch := make(chan *nostr.Event) + + if filter.Search != "" { + close(ch) + return ch, nil + } + + // max number of events we'll return + maxLimit := b.MaxLimit + var limit int + if eventstore.IsNegentropySession(ctx) { + maxLimit = b.MaxLimitNegentropy + limit = maxLimit + } else { + limit = maxLimit / 4 + } + if filter.Limit > 0 && filter.Limit <= maxLimit { + limit = filter.Limit + } + if tlimit := nostr.GetTheoreticalLimit(filter); tlimit == 0 { + close(ch) + return ch, nil + } else if tlimit > 0 { + limit = tlimit + } + + // fmt.Println("limit", limit) + + go b.View(func(txn *badger.Txn) error { + defer close(ch) + + results, err := b.query(txn, filter, limit) + if err != nil { + return err + } + + for _, evt := range results { + ch <- evt.Event + } + + return nil + }) + + return ch, nil +} + +func (b *BadgerBackend) query(txn *badger.Txn, filter nostr.Filter, limit int) ([]internal.IterEvent, error) { + queries, extraFilter, since, err := prepareQueries(filter) + if err != nil { + return nil, err + } + + iterators := make([]*badger.Iterator, len(queries)) + exhausted := make([]bool, len(queries)) // indicates that a query won't be used anymore + results := make([][]internal.IterEvent, len(queries)) + pulledPerQuery := make([]int, len(queries)) + + // these are kept updated so we never pull from the iterator that is at further distance + // (i.e. the one that has the oldest event among all) + // we will continue to pull from it as soon as some other iterator takes the position + oldest := internal.IterEvent{Q: -1} + + secondPhase := false // after we have gathered enough events we will change the way we iterate + secondBatch := make([][]internal.IterEvent, 0, len(queries)+1) + secondPhaseParticipants := make([]int, 0, len(queries)+1) + + // while merging results in the second phase we will alternate between these two lists + // to avoid having to create new lists all the time + var secondPhaseResultsA []internal.IterEvent + var secondPhaseResultsB []internal.IterEvent + var secondPhaseResultsToggle bool // this is just a dummy thing we use to keep track of the alternating + var secondPhaseHasResultsPending bool + + remainingUnexhausted := len(queries) // when all queries are exhausted we can finally end this thing + batchSizePerQuery := internal.BatchSizePerNumberOfQueries(limit, remainingUnexhausted) + firstPhaseTotalPulled := 0 + + exhaust := func(q int) { + exhausted[q] = true + remainingUnexhausted-- + if q == oldest.Q { + oldest = internal.IterEvent{Q: -1} + } + } + + var firstPhaseResults []internal.IterEvent + + for q := range queries { + iterators[q] = txn.NewIterator(badger.IteratorOptions{ + Reverse: true, + PrefetchValues: false, // we don't even have values, only keys + Prefix: queries[q].prefix, + }) + defer iterators[q].Close() + iterators[q].Seek(queries[q].startingPoint) + results[q] = make([]internal.IterEvent, 0, batchSizePerQuery*2) + } + + // we will reuse this throughout the iteration + valIdx := make([]byte, 5) + + // fmt.Println("queries", len(queries)) + + for c := 0; ; c++ { + batchSizePerQuery = internal.BatchSizePerNumberOfQueries(limit, remainingUnexhausted) + + // fmt.Println(" iteration", c, "remaining", remainingUnexhausted, "batchsize", batchSizePerQuery) + // we will go through all the iterators in batches until we have pulled all the required results + for q, query := range queries { + if exhausted[q] { + continue + } + if oldest.Q == q && remainingUnexhausted > 1 { + continue + } + // fmt.Println(" query", q, unsafe.Pointer(&results[q]), hex.EncodeToString(query.prefix), len(results[q])) + + it := iterators[q] + pulledThisIteration := 0 + + for { + if !it.Valid() { + // fmt.Println(" reached end") + exhaust(q) + break + } + + item := it.Item() + key := item.Key() + + idxOffset := len(key) - 4 // this is where the idx actually starts + + // "id" indexes don't contain a timestamp + if !query.skipTimestamp { + createdAt := binary.BigEndian.Uint32(key[idxOffset-4 : idxOffset]) + if createdAt < since { + // fmt.Println(" reached since", createdAt, "<", since) + exhaust(q) + break + } + } + + valIdx[0] = rawEventStorePrefix + copy(valIdx[1:], key[idxOffset:]) + + // fetch actual event + item, err := txn.Get(valIdx) + if err != nil { + if err == badger.ErrDiscardedTxn { + return nil, err + } + log.Printf("badger: failed to get %x based on prefix %x, index key %x from raw event store: %s\n", + valIdx, query.prefix, key, err) + return nil, err + } + + if err := item.Value(func(val []byte) error { + // fmt.Println(" event", hex.EncodeToString(val[0:4]), "kind", binary.BigEndian.Uint16(val[132:134]), "author", hex.EncodeToString(val[32:36]), "ts", nostr.Timestamp(binary.BigEndian.Uint32(val[128:132]))) + + // check it against pubkeys without decoding the entire thing + if extraFilter != nil && extraFilter.Authors != nil && + !slices.Contains(extraFilter.Authors, hex.EncodeToString(val[32:64])) { + // fmt.Println(" skipped (authors)") + return nil + } + + // check it against kinds without decoding the entire thing + if extraFilter != nil && extraFilter.Kinds != nil && + !slices.Contains(extraFilter.Kinds, int(binary.BigEndian.Uint16(val[132:134]))) { + // fmt.Println(" skipped (kinds)") + return nil + } + + event := &nostr.Event{} + if err := bin.Unmarshal(val, event); err != nil { + log.Printf("badger: value read error (id %x): %s\n", val[0:32], err) + return err + } + + // check if this matches the other filters that were not part of the index + if extraFilter != nil && !filterMatchesTags(extraFilter, event) { + // fmt.Println(" skipped (filter)", extraFilter, event) + return nil + } + + // this event is good to be used + evt := internal.IterEvent{Event: event, Q: q} + // + // + if secondPhase { + // do the process described below at HIWAWVRTP. + // if we've reached here this means we've already passed the `since` check. + // now we have to eliminate the event currently at the `since` threshold. + nextThreshold := firstPhaseResults[len(firstPhaseResults)-2] + if oldest.Event == nil { + // fmt.Println(" b1") + // BRANCH WHEN WE DON'T HAVE THE OLDEST EVENT (BWWDHTOE) + // when we don't have the oldest set, we will keep the results + // and not change the cutting point -- it's bad, but hopefully not that bad. + results[q] = append(results[q], evt) + secondPhaseHasResultsPending = true + } else if nextThreshold.CreatedAt > oldest.CreatedAt { + // fmt.Println(" b2", nextThreshold.CreatedAt, ">", oldest.CreatedAt) + // one of the events we have stored is the actual next threshold + // eliminate last, update since with oldest + firstPhaseResults = firstPhaseResults[0 : len(firstPhaseResults)-1] + since = uint32(oldest.CreatedAt) + // fmt.Println(" new since", since) + // we null the oldest Event as we can't rely on it anymore + // (we'll fall under BWWDHTOE above) until we have a new oldest set. + oldest = internal.IterEvent{Q: -1} + // anything we got that would be above this won't trigger an update to + // the oldest anyway, because it will be discarded as being after the limit. + // + // finally + // add this to the results to be merged later + results[q] = append(results[q], evt) + secondPhaseHasResultsPending = true + } else if nextThreshold.CreatedAt < evt.CreatedAt { + // the next last event in the firstPhaseResults is the next threshold + // fmt.Println(" b3", nextThreshold.CreatedAt, "<", oldest.CreatedAt) + // eliminate last, update since with the antelast + firstPhaseResults = firstPhaseResults[0 : len(firstPhaseResults)-1] + since = uint32(nextThreshold.CreatedAt) + // fmt.Println(" new since", since) + // add this to the results to be merged later + results[q] = append(results[q], evt) + secondPhaseHasResultsPending = true + // update the oldest event + if evt.CreatedAt < oldest.CreatedAt { + oldest = evt + } + } else { + // fmt.Println(" b4") + // oops, _we_ are the next `since` threshold + firstPhaseResults[len(firstPhaseResults)-1] = evt + since = uint32(evt.CreatedAt) + // fmt.Println(" new since", since) + // do not add us to the results to be merged later + // as we're already inhabiting the firstPhaseResults slice + } + } else { + results[q] = append(results[q], evt) + firstPhaseTotalPulled++ + + // update the oldest event + if oldest.Event == nil || evt.CreatedAt < oldest.CreatedAt { + oldest = evt + } + } + + pulledPerQuery[q]++ + pulledThisIteration++ + if pulledThisIteration > batchSizePerQuery { + return batchFilled + } + if pulledPerQuery[q] >= limit { + exhaust(q) + return batchFilled + } + + return nil + }); err == batchFilled { + // fmt.Println(" #") + it.Next() + break + } else if err != nil { + return nil, fmt.Errorf("iteration error: %w", err) + } + + it.Next() + } + } + + // we will do this check if we don't accumulated the requested number of events yet + // fmt.Println("oldest", oldest.Event, "from iter", oldest.Q) + if secondPhase && secondPhaseHasResultsPending && (oldest.Event == nil || remainingUnexhausted == 0) { + // fmt.Println("second phase aggregation!") + // when we are in the second phase we will aggressively aggregate results on every iteration + // + secondBatch = secondBatch[:0] + for s := 0; s < len(secondPhaseParticipants); s++ { + q := secondPhaseParticipants[s] + + if len(results[q]) > 0 { + secondBatch = append(secondBatch, results[q]) + } + + if exhausted[q] { + secondPhaseParticipants = internal.SwapDelete(secondPhaseParticipants, s) + s-- + } + } + + // every time we get here we will alternate between these A and B lists + // combining everything we have into a new partial results list. + // after we've done that we can again set the oldest. + // fmt.Println(" xxx", secondPhaseResultsToggle) + if secondPhaseResultsToggle { + secondBatch = append(secondBatch, secondPhaseResultsB) + secondPhaseResultsA = internal.MergeSortMultiple(secondBatch, limit, secondPhaseResultsA) + oldest = secondPhaseResultsA[len(secondPhaseResultsA)-1] + // fmt.Println(" new aggregated a", len(secondPhaseResultsB)) + } else { + secondBatch = append(secondBatch, secondPhaseResultsA) + secondPhaseResultsB = internal.MergeSortMultiple(secondBatch, limit, secondPhaseResultsB) + oldest = secondPhaseResultsB[len(secondPhaseResultsB)-1] + // fmt.Println(" new aggregated b", len(secondPhaseResultsB)) + } + secondPhaseResultsToggle = !secondPhaseResultsToggle + + since = uint32(oldest.CreatedAt) + // fmt.Println(" new since", since) + + // reset the `results` list so we can keep using it + results = results[:len(queries)] + for _, q := range secondPhaseParticipants { + results[q] = results[q][:0] + } + } else if !secondPhase && firstPhaseTotalPulled >= limit && remainingUnexhausted > 0 { + // fmt.Println("have enough!", firstPhaseTotalPulled, "/", limit, "remaining", remainingUnexhausted) + + // we will exclude this oldest number as it is not relevant anymore + // (we now want to keep track only of the oldest among the remaining iterators) + oldest = internal.IterEvent{Q: -1} + + // HOW IT WORKS AFTER WE'VE REACHED THIS POINT (HIWAWVRTP) + // now we can combine the results we have and check what is our current oldest event. + // we also discard anything that is after the current cutting point (`limit`). + // so if we have [1,2,3], [10, 15, 20] and [7, 21, 49] but we only want 6 total + // we can just keep [1,2,3,7,10,15] and discard [20, 21, 49], + // and also adjust our `since` parameter to `15`, discarding anything we get after it + // and immediately declaring that iterator exhausted. + // also every time we get result that is more recent than this updated `since` we can + // keep it but also discard the previous since, moving the needle one back -- for example, + // if we get an `8` we can keep it and move the `since` parameter to `10`, discarding `15` + // in the process. + all := make([][]internal.IterEvent, len(results)) + copy(all, results) // we have to use this otherwise mergeSortMultiple will scramble our results slice + firstPhaseResults = internal.MergeSortMultiple(all, limit, nil) + oldest = firstPhaseResults[limit-1] + since = uint32(oldest.CreatedAt) + // fmt.Println("new since", since) + + for q := range queries { + if exhausted[q] { + continue + } + + // we also automatically exhaust any of the iterators that have already passed the + // cutting point (`since`) + if results[q][len(results[q])-1].CreatedAt < oldest.CreatedAt { + exhausted[q] = true + remainingUnexhausted-- + continue + } + + // for all the remaining iterators, + // since we have merged all the events in this `firstPhaseResults` slice, we can empty the + // current `results` slices and reuse them. + results[q] = results[q][:0] + + // build this index of indexes with everybody who remains + secondPhaseParticipants = append(secondPhaseParticipants, q) + } + + // we create these two lists and alternate between them so we don't have to create a + // a new one every time + secondPhaseResultsA = make([]internal.IterEvent, 0, limit*2) + secondPhaseResultsB = make([]internal.IterEvent, 0, limit*2) + + // from now on we won't run this block anymore + secondPhase = true + } + + // fmt.Println("remaining", remainingUnexhausted) + if remainingUnexhausted == 0 { + break + } + } + + // fmt.Println("is secondPhase?", secondPhase) + + var combinedResults []internal.IterEvent + + if secondPhase { + // fmt.Println("ending second phase") + // when we reach this point either secondPhaseResultsA or secondPhaseResultsB will be full of stuff, + // the other will be empty + var secondPhaseResults []internal.IterEvent + // fmt.Println("xxx", secondPhaseResultsToggle, len(secondPhaseResultsA), len(secondPhaseResultsB)) + if secondPhaseResultsToggle { + secondPhaseResults = secondPhaseResultsB + combinedResults = secondPhaseResultsA[0:limit] // reuse this + // fmt.Println(" using b", len(secondPhaseResultsA)) + } else { + secondPhaseResults = secondPhaseResultsA + combinedResults = secondPhaseResultsB[0:limit] // reuse this + // fmt.Println(" using a", len(secondPhaseResultsA)) + } + + all := [][]internal.IterEvent{firstPhaseResults, secondPhaseResults} + combinedResults = internal.MergeSortMultiple(all, limit, combinedResults) + // fmt.Println("final combinedResults", len(combinedResults), cap(combinedResults), limit) + } else { + combinedResults = make([]internal.IterEvent, limit) + combinedResults = internal.MergeSortMultiple(results, limit, combinedResults) + } + + return combinedResults, nil +} diff --git a/eventstore/badger/query_planner.go b/eventstore/badger/query_planner.go new file mode 100644 index 0000000..686ff88 --- /dev/null +++ b/eventstore/badger/query_planner.go @@ -0,0 +1,147 @@ +package badger + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + + "github.com/fiatjaf/eventstore/internal" + "github.com/nbd-wtf/go-nostr" +) + +type query struct { + i int + prefix []byte + startingPoint []byte + skipTimestamp bool +} + +func prepareQueries(filter nostr.Filter) ( + queries []query, + extraFilter *nostr.Filter, + since uint32, + err error, +) { + // these things have to run for every result we return + defer func() { + if queries == nil { + return + } + + var until uint32 = 4294967295 + if filter.Until != nil { + if fu := uint32(*filter.Until); fu < until { + until = fu + 1 + } + } + + for i, q := range queries { + queries[i].startingPoint = binary.BigEndian.AppendUint32(q.prefix, uint32(until)) + } + + // this is where we'll end the iteration + if filter.Since != nil { + if fs := uint32(*filter.Since); fs > since { + since = fs + } + } + }() + + var index byte + + if len(filter.IDs) > 0 { + queries = make([]query, len(filter.IDs)) + for i, idHex := range filter.IDs { + prefix := make([]byte, 1+8) + prefix[0] = indexIdPrefix + if len(idHex) != 64 { + return nil, nil, 0, fmt.Errorf("invalid id '%s'", idHex) + } + hex.Decode(prefix[1:], []byte(idHex[0:8*2])) + queries[i] = query{i: i, prefix: prefix, skipTimestamp: true} + } + + return queries, extraFilter, since, nil + } + + if len(filter.Tags) > 0 { + // we will select ONE tag to query with + tagKey, tagValues, goodness := internal.ChooseNarrowestTag(filter) + + // we won't use a tag index for this as long as we have something else to match with + if goodness < 3 && (len(filter.Authors) > 0 || len(filter.Kinds) > 0) { + goto pubkeyMatching + } + + queries = make([]query, len(tagValues)) + for i, value := range tagValues { + // get key prefix (with full length) and offset where to write the created_at + k, offset := getTagIndexPrefix(value) + // remove the last parts part to get just the prefix we want here + prefix := k[0:offset] + queries[i] = query{i: i, prefix: prefix} + i++ + } + + extraFilter = &nostr.Filter{ + Kinds: filter.Kinds, + Authors: filter.Authors, + Tags: internal.CopyMapWithoutKey(filter.Tags, tagKey), + } + + return queries, extraFilter, since, nil + } + +pubkeyMatching: + if len(filter.Authors) > 0 { + if len(filter.Kinds) == 0 { + queries = make([]query, len(filter.Authors)) + for i, pubkeyHex := range filter.Authors { + if len(pubkeyHex) != 64 { + return nil, nil, 0, fmt.Errorf("invalid pubkey '%s'", pubkeyHex) + } + prefix := make([]byte, 1+8) + prefix[0] = indexPubkeyPrefix + hex.Decode(prefix[1:], []byte(pubkeyHex[0:8*2])) + queries[i] = query{i: i, prefix: prefix} + } + } else { + queries = make([]query, len(filter.Authors)*len(filter.Kinds)) + i := 0 + for _, pubkeyHex := range filter.Authors { + for _, kind := range filter.Kinds { + if len(pubkeyHex) != 64 { + return nil, nil, 0, fmt.Errorf("invalid pubkey '%s'", pubkeyHex) + } + + prefix := make([]byte, 1+8+2) + prefix[0] = indexPubkeyKindPrefix + hex.Decode(prefix[1:], []byte(pubkeyHex[0:8*2])) + binary.BigEndian.PutUint16(prefix[1+8:], uint16(kind)) + queries[i] = query{i: i, prefix: prefix} + i++ + } + } + } + extraFilter = &nostr.Filter{Tags: filter.Tags} + } else if len(filter.Kinds) > 0 { + index = indexKindPrefix + queries = make([]query, len(filter.Kinds)) + for i, kind := range filter.Kinds { + prefix := make([]byte, 1+2) + prefix[0] = index + binary.BigEndian.PutUint16(prefix[1:], uint16(kind)) + queries[i] = query{i: i, prefix: prefix} + } + extraFilter = &nostr.Filter{Tags: filter.Tags} + } else { + index = indexCreatedAtPrefix + queries = make([]query, 1) + prefix := make([]byte, 1) + prefix[0] = index + queries[0] = query{i: 0, prefix: prefix} + extraFilter = nil + } + + return queries, extraFilter, since, nil +} diff --git a/eventstore/badger/replace.go b/eventstore/badger/replace.go new file mode 100644 index 0000000..a5b5a3a --- /dev/null +++ b/eventstore/badger/replace.go @@ -0,0 +1,49 @@ +package badger + +import ( + "context" + "fmt" + "math" + + "github.com/dgraph-io/badger/v4" + "github.com/fiatjaf/eventstore/internal" + "github.com/nbd-wtf/go-nostr" +) + +func (b *BadgerBackend) ReplaceEvent(ctx context.Context, evt *nostr.Event) error { + // sanity checking + if evt.CreatedAt > math.MaxUint32 || evt.Kind > math.MaxUint16 { + return fmt.Errorf("event with values out of expected boundaries") + } + + return b.Update(func(txn *badger.Txn) error { + filter := nostr.Filter{Limit: 1, Kinds: []int{evt.Kind}, Authors: []string{evt.PubKey}} + if nostr.IsAddressableKind(evt.Kind) { + // when addressable, add the "d" tag to the filter + filter.Tags = nostr.TagMap{"d": []string{evt.Tags.GetD()}} + } + + // now we fetch the past events, whatever they are, delete them and then save the new + results, err := b.query(txn, filter, 10) // in theory limit could be just 1 and this should work + if err != nil { + return fmt.Errorf("failed to query past events with %s: %w", filter, err) + } + + shouldStore := true + for _, previous := range results { + if internal.IsOlder(previous.Event, evt) { + if _, err := b.delete(txn, previous.Event); err != nil { + return fmt.Errorf("failed to delete event %s for replacing: %w", previous.Event.ID, err) + } + } else { + // there is a newer event already stored, so we won't store this + shouldStore = false + } + } + if shouldStore { + return b.save(txn, evt) + } + + return nil + }) +} diff --git a/eventstore/badger/save.go b/eventstore/badger/save.go new file mode 100644 index 0000000..5a1cac1 --- /dev/null +++ b/eventstore/badger/save.go @@ -0,0 +1,59 @@ +package badger + +import ( + "context" + "encoding/hex" + "fmt" + "math" + + "github.com/dgraph-io/badger/v4" + "github.com/fiatjaf/eventstore" + bin "github.com/fiatjaf/eventstore/internal/binary" + "github.com/nbd-wtf/go-nostr" +) + +func (b *BadgerBackend) SaveEvent(ctx context.Context, evt *nostr.Event) error { + // sanity checking + if evt.CreatedAt > math.MaxUint32 || evt.Kind > math.MaxUint16 { + return fmt.Errorf("event with values out of expected boundaries") + } + + return b.Update(func(txn *badger.Txn) error { + // query event by id to ensure we don't save duplicates + id, _ := hex.DecodeString(evt.ID) + prefix := make([]byte, 1+8) + prefix[0] = indexIdPrefix + copy(prefix[1:], id) + it := txn.NewIterator(badger.IteratorOptions{}) + defer it.Close() + it.Seek(prefix) + if it.ValidForPrefix(prefix) { + // event exists + return eventstore.ErrDupEvent + } + + return b.save(txn, evt) + }) +} + +func (b *BadgerBackend) save(txn *badger.Txn, evt *nostr.Event) error { + // encode to binary + bin, err := bin.Marshal(evt) + if err != nil { + return err + } + + idx := b.Serial() + // raw event store + if err := txn.Set(idx, bin); err != nil { + return err + } + + for k := range b.getIndexKeysForEvent(evt, idx[1:]) { + if err := txn.Set(k, nil); err != nil { + return err + } + } + + return nil +} diff --git a/eventstore/badger/testdata/fuzz/FuzzQuery b/eventstore/badger/testdata/fuzz/FuzzQuery new file mode 120000 index 0000000..eed0ba0 --- /dev/null +++ b/eventstore/badger/testdata/fuzz/FuzzQuery @@ -0,0 +1 @@ +../../../internal/testdata/fuzz/FuzzQuery \ No newline at end of file diff --git a/eventstore/bluge/bluge_test.go b/eventstore/bluge/bluge_test.go new file mode 100644 index 0000000..af96523 --- /dev/null +++ b/eventstore/bluge/bluge_test.go @@ -0,0 +1,81 @@ +package bluge + +import ( + "context" + "os" + "testing" + + "github.com/fiatjaf/eventstore/badger" + "github.com/nbd-wtf/go-nostr" + "github.com/stretchr/testify/assert" +) + +func TestBlugeFlow(t *testing.T) { + os.RemoveAll("/tmp/blugetest-badger") + os.RemoveAll("/tmp/blugetest-bluge") + + bb := &badger.BadgerBackend{Path: "/tmp/blugetest-badger"} + bb.Init() + defer bb.Close() + + bl := BlugeBackend{ + Path: "/tmp/blugetest-bluge", + RawEventStore: bb, + } + bl.Init() + defer bl.Close() + + ctx := context.Background() + + willDelete := make([]*nostr.Event, 0, 3) + + for i, content := range []string{ + "good morning mr paper maker", + "good night", + "I'll see you again in the paper house", + "tonight we dine in my house", + "the paper in this house if very good, mr", + } { + evt := &nostr.Event{Content: content, Tags: nostr.Tags{}} + evt.Sign("0000000000000000000000000000000000000000000000000000000000000001") + + bb.SaveEvent(ctx, evt) + bl.SaveEvent(ctx, evt) + + if i%2 == 0 { + willDelete = append(willDelete, evt) + } + } + + { + ch, err := bl.QueryEvents(ctx, nostr.Filter{Search: "good"}) + if err != nil { + t.Fatalf("QueryEvents error: %s", err) + return + } + n := 0 + for range ch { + n++ + } + assert.Equal(t, 3, n) + } + + for _, evt := range willDelete { + bl.DeleteEvent(ctx, evt) + } + + { + ch, err := bl.QueryEvents(ctx, nostr.Filter{Search: "good"}) + if err != nil { + t.Fatalf("QueryEvents error: %s", err) + return + } + n := 0 + for res := range ch { + n++ + assert.Equal(t, res.Content, "good night") + assert.Equal(t, res.PubKey, "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798") + } + assert.Equal(t, 1, n) + } +} diff --git a/eventstore/bluge/delete.go b/eventstore/bluge/delete.go new file mode 100644 index 0000000..aee1031 --- /dev/null +++ b/eventstore/bluge/delete.go @@ -0,0 +1,11 @@ +package bluge + +import ( + "context" + + "github.com/nbd-wtf/go-nostr" +) + +func (b *BlugeBackend) DeleteEvent(ctx context.Context, evt *nostr.Event) error { + return b.writer.Delete(eventIdentifier(evt.ID)) +} diff --git a/eventstore/bluge/helpers.go b/eventstore/bluge/helpers.go new file mode 100644 index 0000000..ab421d6 --- /dev/null +++ b/eventstore/bluge/helpers.go @@ -0,0 +1,23 @@ +package bluge + +import "encoding/hex" + +const ( + contentField = "c" + kindField = "k" + createdAtField = "a" + pubkeyField = "p" +) + +type eventIdentifier string + +const idField = "i" + +func (id eventIdentifier) Field() string { + return idField +} + +func (id eventIdentifier) Term() []byte { + v, _ := hex.DecodeString(string(id)) + return v +} diff --git a/eventstore/bluge/lib.go b/eventstore/bluge/lib.go new file mode 100644 index 0000000..727f611 --- /dev/null +++ b/eventstore/bluge/lib.go @@ -0,0 +1,52 @@ +package bluge + +import ( + "fmt" + "sync" + + "github.com/blugelabs/bluge" + "github.com/blugelabs/bluge/analysis/token" + "github.com/fiatjaf/eventstore" + "golang.org/x/text/unicode/norm" +) + +var _ eventstore.Store = (*BlugeBackend)(nil) + +type BlugeBackend struct { + sync.Mutex + // Path is where the index will be saved + Path string + + // RawEventStore is where we'll fetch the raw events from + // bluge will only store ids, so the actual events must be somewhere else + RawEventStore eventstore.Store + + searchConfig bluge.Config + writer *bluge.Writer +} + +func (b *BlugeBackend) Close() { + defer b.writer.Close() +} + +func (b *BlugeBackend) Init() error { + if b.Path == "" { + return fmt.Errorf("missing Path") + } + if b.RawEventStore == nil { + return fmt.Errorf("missing RawEventStore") + } + + b.searchConfig = bluge.DefaultConfig(b.Path) + b.searchConfig.DefaultSearchAnalyzer.TokenFilters = append(b.searchConfig.DefaultSearchAnalyzer.TokenFilters, + token.NewUnicodeNormalizeFilter(norm.NFKC), + ) + + var err error + b.writer, err = bluge.OpenWriter(b.searchConfig) + if err != nil { + return fmt.Errorf("error opening writer: %w", err) + } + + return nil +} diff --git a/eventstore/bluge/query.go b/eventstore/bluge/query.go new file mode 100644 index 0000000..c6019c6 --- /dev/null +++ b/eventstore/bluge/query.go @@ -0,0 +1,117 @@ +package bluge + +import ( + "context" + "encoding/hex" + "fmt" + "strconv" + + "github.com/blugelabs/bluge" + "github.com/blugelabs/bluge/search" + "github.com/nbd-wtf/go-nostr" +) + +func (b *BlugeBackend) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) { + ch := make(chan *nostr.Event) + + if len(filter.Search) < 2 { + close(ch) + return ch, nil + } + + reader, err := b.writer.Reader() + if err != nil { + close(ch) + return nil, fmt.Errorf("unable to open reader: %w", err) + } + + searchQ := bluge.NewMatchQuery(filter.Search) + searchQ.SetField(contentField) + var q bluge.Query = searchQ + + complicatedQuery := bluge.NewBooleanQuery().AddMust(searchQ) + + if len(filter.Kinds) > 0 { + eitherKind := bluge.NewBooleanQuery() + eitherKind.SetMinShould(1) + for _, kind := range filter.Kinds { + kindQ := bluge.NewTermQuery(strconv.Itoa(kind)) + kindQ.SetField(kindField) + eitherKind.AddShould(kindQ) + } + complicatedQuery.AddMust(eitherKind) + q = complicatedQuery + } + + if len(filter.Authors) > 0 { + eitherPubkey := bluge.NewBooleanQuery() + eitherPubkey.SetMinShould(1) + for _, pubkey := range filter.Authors { + if len(pubkey) != 64 { + continue + } + pubkeyQ := bluge.NewTermQuery(pubkey[56:]) + pubkeyQ.SetField(pubkeyField) + eitherPubkey.AddShould(pubkeyQ) + } + complicatedQuery.AddMust(eitherPubkey) + q = complicatedQuery + } + + if filter.Since != nil || filter.Until != nil { + min := 0.0 + if filter.Since != nil { + min = float64(*filter.Since) + } + max := float64(nostr.Now()) + if filter.Until != nil { + max = float64(*filter.Until) + } + dateRangeQ := bluge.NewNumericRangeInclusiveQuery(min, max, true, true) + dateRangeQ.SetField(createdAtField) + complicatedQuery.AddMust(dateRangeQ) + q = complicatedQuery + } + + limit := 40 + if filter.Limit != 0 { + limit = filter.Limit + if filter.Limit > 150 { + limit = 150 + } + } + + req := bluge.NewTopNSearch(limit, q) + + dmi, err := reader.Search(context.Background(), req) + if err != nil { + close(ch) + reader.Close() + return ch, fmt.Errorf("error executing search: %w", err) + } + + go func() { + defer reader.Close() + defer close(ch) + + var next *search.DocumentMatch + for next, err = dmi.Next(); next != nil; next, err = dmi.Next() { + next.VisitStoredFields(func(field string, value []byte) bool { + id := hex.EncodeToString(value) + rawch, err := b.RawEventStore.QueryEvents(ctx, nostr.Filter{IDs: []string{id}}) + if err != nil { + return false + } + for evt := range rawch { + ch <- evt + } + return false + }) + } + if err != nil { + return + } + }() + + return ch, nil +} diff --git a/eventstore/bluge/replace.go b/eventstore/bluge/replace.go new file mode 100644 index 0000000..86ce0cc --- /dev/null +++ b/eventstore/bluge/replace.go @@ -0,0 +1,44 @@ +package bluge + +import ( + "context" + "fmt" + + "github.com/fiatjaf/eventstore" + "github.com/fiatjaf/eventstore/internal" + "github.com/nbd-wtf/go-nostr" +) + +func (b *BlugeBackend) ReplaceEvent(ctx context.Context, evt *nostr.Event) error { + b.Lock() + defer b.Unlock() + + filter := nostr.Filter{Limit: 1, Kinds: []int{evt.Kind}, Authors: []string{evt.PubKey}} + if nostr.IsAddressableKind(evt.Kind) { + filter.Tags = nostr.TagMap{"d": []string{evt.Tags.GetD()}} + } + + ch, err := b.QueryEvents(ctx, filter) + if err != nil { + return fmt.Errorf("failed to query before replacing: %w", err) + } + + shouldStore := true + for previous := range ch { + if internal.IsOlder(previous, evt) { + if err := b.DeleteEvent(ctx, previous); err != nil { + return fmt.Errorf("failed to delete event for replacing: %w", err) + } + } else { + shouldStore = false + } + } + + if shouldStore { + if err := b.SaveEvent(ctx, evt); err != nil && err != eventstore.ErrDupEvent { + return fmt.Errorf("failed to save: %w", err) + } + } + + return nil +} diff --git a/eventstore/bluge/save.go b/eventstore/bluge/save.go new file mode 100644 index 0000000..ac5b107 --- /dev/null +++ b/eventstore/bluge/save.go @@ -0,0 +1,28 @@ +package bluge + +import ( + "context" + "fmt" + "strconv" + + "github.com/blugelabs/bluge" + "github.com/nbd-wtf/go-nostr" +) + +func (b *BlugeBackend) SaveEvent(ctx context.Context, evt *nostr.Event) error { + id := eventIdentifier(evt.ID) + doc := &bluge.Document{ + bluge.NewKeywordFieldBytes(id.Field(), id.Term()).Sortable().StoreValue(), + } + + doc.AddField(bluge.NewTextField(contentField, evt.Content)) + doc.AddField(bluge.NewTextField(kindField, strconv.Itoa(evt.Kind))) + doc.AddField(bluge.NewTextField(pubkeyField, evt.PubKey[56:])) + doc.AddField(bluge.NewNumericField(createdAtField, float64(evt.CreatedAt))) + + if err := b.writer.Update(doc.ID(), doc); err != nil { + return fmt.Errorf("failed to write '%s' document: %w", evt.ID, err) + } + + return nil +} diff --git a/eventstore/cmd/eventstore/.gitignore b/eventstore/cmd/eventstore/.gitignore new file mode 100644 index 0000000..7992fce --- /dev/null +++ b/eventstore/cmd/eventstore/.gitignore @@ -0,0 +1 @@ +eventstore diff --git a/eventstore/cmd/eventstore/README.md b/eventstore/cmd/eventstore/README.md new file mode 100644 index 0000000..aa7ec9f --- /dev/null +++ b/eventstore/cmd/eventstore/README.md @@ -0,0 +1,39 @@ +# eventstore command-line tool + +``` +go install github.com/fiatjaf/eventstore/cmd/eventstore@latest +``` + +## Usage + +This should be pretty straightforward. You pipe events or filters, as JSON, to the `eventstore` command, and they yield something. You can use [nak](https://github.com/fiatjaf/nak) to generate these events or filters easily. + +### Querying the last 100 events of kind 1 + +```fish +~> nak req -k 1 -l 100 --bare | eventstore -d /path/to/store query +~> # or +~> echo '{"kinds":[1],"limit":100}' | eventstore -d /path/to/store query +``` + +This will automatically determine the storage type being used at `/path/to/store`, but you can also specify it manually using the `-t` option (`-t lmdb`, `-t sqlite` etc). + +### Saving an event to the store + +```fish +~> nak event -k 1 -c hello | eventstore -d /path/to/store save +~> # or +~> echo '{"id":"35369e6bae5f77c4e1745c2eb5db84c4493e87f6e449aee62a261bbc1fea2788","pubkey":"79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798","created_at":1701193836,"kind":1,"tags":[],"content":"hello","sig":"ef08d559e042d9af4cdc3328a064f737603d86ec4f929f193d5a3ce9ea22a3fb8afc1923ee3c3742fd01856065352c5632e91f633528c80e9c5711fa1266824c"}' | eventstore -d /path/to/store save +``` + +You can also create a database from scratch if it's a disk database, but then you have to specify `-t` to `sqlite`, `badger` or `lmdb`. + +### Connecting to Postgres, MySQL and other remote databases + +You should be able to connect by just passing the database connection URI to `-d`: + +```bash +~> eventstore -d 'postgres://myrelay:38yg4o83yf48a3s7g@localhost:5432/myrelay?sslmode=disable' +``` + +That should be prefixed with `postgres://` for Postgres, `mysql://` for MySQL and `https://` for ElasticSearch. diff --git a/eventstore/cmd/eventstore/delete.go b/eventstore/cmd/eventstore/delete.go new file mode 100644 index 0000000..2acc661 --- /dev/null +++ b/eventstore/cmd/eventstore/delete.go @@ -0,0 +1,39 @@ +package main + +import ( + "context" + "fmt" + "os" + + "github.com/urfave/cli/v3" + "github.com/nbd-wtf/go-nostr" +) + +var delete_ = &cli.Command{ + Name: "delete", + ArgsUsage: "[]", + Usage: "deletes an event by id and all its associated index entries", + Description: "takes an id either as an argument or reads a stream of ids from stdin and deletes them from the currently open eventstore.", + Action: func(ctx context.Context, c *cli.Command) error { + hasError := false + for line := range getStdinLinesOrFirstArgument(c) { + f := nostr.Filter{IDs: []string{line}} + ch, err := db.QueryEvents(ctx, f) + if err != nil { + fmt.Fprintf(os.Stderr, "error querying for %s: %s\n", f, err) + hasError = true + } + for evt := range ch { + if err := db.DeleteEvent(ctx, evt); err != nil { + fmt.Fprintf(os.Stderr, "error deleting %s: %s\n", evt, err) + hasError = true + } + } + } + + if hasError { + os.Exit(123) + } + return nil + }, +} diff --git a/eventstore/cmd/eventstore/helpers.go b/eventstore/cmd/eventstore/helpers.go new file mode 100644 index 0000000..ce65b33 --- /dev/null +++ b/eventstore/cmd/eventstore/helpers.go @@ -0,0 +1,134 @@ +package main + +import ( + "bufio" + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/urfave/cli/v3" +) + +const ( + LINE_PROCESSING_ERROR = iota +) + +func detect(dir string) (string, error) { + mayBeMMM := false + if n := strings.Index(dir, "/"); n > 0 { + mayBeMMM = true + dir = filepath.Dir(dir) + } + + f, err := os.Stat(dir) + if err != nil { + return "", err + } + if !f.IsDir() { + f, err := os.Open(dir) + if err != nil { + return "", err + } + buf := make([]byte, 15) + f.Read(buf) + if string(buf) == "SQLite format 3" { + return "sqlite", nil + } + + return "", fmt.Errorf("unknown db format") + } + + entries, err := os.ReadDir(dir) + if err != nil { + return "", err + } + + if mayBeMMM { + for _, entry := range entries { + if entry.Name() == "mmmm" { + if entries, err := os.ReadDir(filepath.Join(dir, "mmmm")); err == nil { + for _, e := range entries { + if strings.HasSuffix(e.Name(), ".mdb") { + return "mmm", nil + } + } + } + } + } + } + for _, entry := range entries { + if strings.HasSuffix(entry.Name(), ".mdb") { + return "lmdb", nil + } + if strings.HasSuffix(entry.Name(), ".vlog") { + return "badger", nil + } + } + + return "", fmt.Errorf("undetected") +} + +func getStdin() string { + stat, _ := os.Stdin.Stat() + if (stat.Mode() & os.ModeCharDevice) == 0 { + read := bytes.NewBuffer(make([]byte, 0, 1000)) + _, err := io.Copy(read, os.Stdin) + if err == nil { + return read.String() + } + } + return "" +} + +func isPiped() bool { + stat, _ := os.Stdin.Stat() + return stat.Mode()&os.ModeCharDevice == 0 +} + +func getStdinLinesOrFirstArgument(c *cli.Command) chan string { + // try the first argument + target := c.Args().First() + if target != "" { + single := make(chan string, 1) + single <- target + close(single) + return single + } + + // try the stdin + multi := make(chan string) + writeStdinLinesOrNothing(multi) + return multi +} + +func getStdinLinesOrBlank() chan string { + multi := make(chan string) + if hasStdinLines := writeStdinLinesOrNothing(multi); !hasStdinLines { + single := make(chan string, 1) + single <- "" + close(single) + return single + } else { + return multi + } +} + +func writeStdinLinesOrNothing(ch chan string) (hasStdinLines bool) { + if isPiped() { + // piped + go func() { + scanner := bufio.NewScanner(os.Stdin) + for scanner.Scan() { + ch <- strings.TrimSpace(scanner.Text()) + } + close(ch) + }() + return true + } else { + // not piped + return false + } +} diff --git a/eventstore/cmd/eventstore/main.go b/eventstore/cmd/eventstore/main.go new file mode 100644 index 0000000..03de8a3 --- /dev/null +++ b/eventstore/cmd/eventstore/main.go @@ -0,0 +1,168 @@ +package main + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "log" + "os" + "strings" + + "github.com/fiatjaf/eventstore" + "github.com/fiatjaf/eventstore/badger" + "github.com/fiatjaf/eventstore/elasticsearch" + "github.com/fiatjaf/eventstore/lmdb" + "github.com/fiatjaf/eventstore/mysql" + "github.com/fiatjaf/eventstore/postgresql" + "github.com/fiatjaf/eventstore/slicestore" + "github.com/fiatjaf/eventstore/sqlite3" + "github.com/fiatjaf/eventstore/strfry" + "github.com/nbd-wtf/go-nostr" + "github.com/urfave/cli/v3" +) + +var db eventstore.Store + +var app = &cli.Command{ + Name: "eventstore", + Usage: "a CLI for all the eventstore backends", + UsageText: "eventstore -d ./data/sqlite ...", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "store", + Aliases: []string{"d"}, + Usage: "path to the database file or directory or database connection uri", + Required: true, + }, + &cli.StringFlag{ + Name: "type", + Aliases: []string{"t"}, + Usage: "store type ('sqlite', 'lmdb', 'badger', 'postgres', 'mysql', 'elasticsearch', 'mmm')", + }, + }, + Before: func(ctx context.Context, c *cli.Command) (context.Context, error) { + path := strings.Trim(c.String("store"), "/") + typ := c.String("type") + if typ != "" { + // bypass automatic detection + // this also works for creating disk databases from scratch + } else { + // try to detect based on url scheme + switch { + case strings.HasPrefix(path, "postgres://"), strings.HasPrefix(path, "postgresql://"): + typ = "postgres" + case strings.HasPrefix(path, "mysql://"): + typ = "mysql" + case strings.HasPrefix(path, "https://"): + // if we ever add something else that uses URLs we'll have to modify this + typ = "elasticsearch" + case strings.HasSuffix(path, ".conf"): + typ = "strfry" + case strings.HasSuffix(path, ".jsonl"): + typ = "file" + default: + // try to detect based on the form and names of disk files + dbname, err := detect(path) + if err != nil { + if os.IsNotExist(err) { + return ctx, fmt.Errorf( + "'%s' does not exist, to create a store there specify the --type argument", path) + } + return ctx, fmt.Errorf("failed to detect store type: %w", err) + } + typ = dbname + } + } + + switch typ { + case "sqlite": + db = &sqlite3.SQLite3Backend{ + DatabaseURL: path, + QueryLimit: 1_000_000, + QueryAuthorsLimit: 1_000_000, + QueryKindsLimit: 1_000_000, + QueryIDsLimit: 1_000_000, + QueryTagsLimit: 1_000_000, + } + case "lmdb": + db = &lmdb.LMDBBackend{Path: path, MaxLimit: 1_000_000} + case "badger": + db = &badger.BadgerBackend{Path: path, MaxLimit: 1_000_000} + case "mmm": + var err error + if db, err = doMmmInit(path); err != nil { + return ctx, err + } + case "postgres", "postgresql": + db = &postgresql.PostgresBackend{ + DatabaseURL: path, + QueryLimit: 1_000_000, + QueryAuthorsLimit: 1_000_000, + QueryKindsLimit: 1_000_000, + QueryIDsLimit: 1_000_000, + QueryTagsLimit: 1_000_000, + } + case "mysql": + db = &mysql.MySQLBackend{ + DatabaseURL: path, + QueryLimit: 1_000_000, + QueryAuthorsLimit: 1_000_000, + QueryKindsLimit: 1_000_000, + QueryIDsLimit: 1_000_000, + QueryTagsLimit: 1_000_000, + } + case "elasticsearch": + db = &elasticsearch.ElasticsearchStorage{URL: path} + case "strfry": + db = &strfry.StrfryBackend{ConfigPath: path} + case "file": + db = &slicestore.SliceStore{} + + // run this after we've called db.Init() + defer func() { + f, err := os.Open(path) + if err != nil { + log.Printf("failed to file at '%s': %s\n", path, err) + os.Exit(3) + } + scanner := bufio.NewScanner(f) + scanner.Buffer(make([]byte, 16*1024*1024), 256*1024*1024) + i := 0 + for scanner.Scan() { + var evt nostr.Event + if err := json.Unmarshal(scanner.Bytes(), &evt); err != nil { + log.Printf("invalid event read at line %d: %s (`%s`)\n", i, err, scanner.Text()) + } + db.SaveEvent(ctx, &evt) + i++ + } + }() + case "": + return ctx, fmt.Errorf("couldn't determine store type, you can use --type to specify it manually") + default: + return ctx, fmt.Errorf("'%s' store type is not supported by this CLI", typ) + } + + if err := db.Init(); err != nil { + return ctx, err + } + + return ctx, nil + }, + Commands: []*cli.Command{ + queryOrSave, + query, + save, + delete_, + neg, + }, + DefaultCommand: "query-or-save", +} + +func main() { + if err := app.Run(context.Background(), os.Args); err != nil { + fmt.Println(err) + os.Exit(1) + } +} diff --git a/eventstore/cmd/eventstore/main_mmm.go b/eventstore/cmd/eventstore/main_mmm.go new file mode 100644 index 0000000..9b115c5 --- /dev/null +++ b/eventstore/cmd/eventstore/main_mmm.go @@ -0,0 +1,34 @@ +//go:build !windows + +package main + +import ( + "context" + "os" + "path/filepath" + + "github.com/fiatjaf/eventstore" + "github.com/fiatjaf/eventstore/mmm" + "github.com/nbd-wtf/go-nostr" + "github.com/rs/zerolog" +) + +func doMmmInit(path string) (eventstore.Store, error) { + logger := zerolog.New(zerolog.NewConsoleWriter(func(w *zerolog.ConsoleWriter) { + w.Out = os.Stderr + })) + mmmm := mmm.MultiMmapManager{ + Dir: filepath.Dir(path), + Logger: &logger, + } + if err := mmmm.Init(); err != nil { + return nil, err + } + il := &mmm.IndexingLayer{ + ShouldIndex: func(ctx context.Context, e *nostr.Event) bool { return false }, + } + if err := mmmm.EnsureLayer(filepath.Base(path), il); err != nil { + return nil, err + } + return il, nil +} diff --git a/eventstore/cmd/eventstore/main_other.go b/eventstore/cmd/eventstore/main_other.go new file mode 100644 index 0000000..c9e9018 --- /dev/null +++ b/eventstore/cmd/eventstore/main_other.go @@ -0,0 +1,14 @@ +//go:build windows + +package main + +import ( + "fmt" + "runtime" + + "github.com/fiatjaf/eventstore" +) + +func doMmmInit(path string) (eventstore.Store, error) { + return nil, fmt.Errorf("unsupported OSs (%v)", runtime.GOOS) +} diff --git a/eventstore/cmd/eventstore/neg.go b/eventstore/cmd/eventstore/neg.go new file mode 100644 index 0000000..ca857de --- /dev/null +++ b/eventstore/cmd/eventstore/neg.go @@ -0,0 +1,97 @@ +package main + +import ( + "context" + "fmt" + "io" + "math" + "os" + "sync" + + "github.com/urfave/cli/v3" + "github.com/mailru/easyjson" + "github.com/nbd-wtf/go-nostr" + "github.com/nbd-wtf/go-nostr/nip77/negentropy" + "github.com/nbd-wtf/go-nostr/nip77/negentropy/storage/vector" +) + +var neg = &cli.Command{ + Name: "neg", + ArgsUsage: " []", + Usage: "initiates a negentropy session with a filter or reconciles a received negentropy message", + Description: "applies the filter to the currently open eventstore. if no negentropy message was given it will initiate the process and emit one, if one was given either as an argument or via stdin, it will be reconciled against the current eventstore.\nthe next reconciliation message will be emitted on stdout.\na stream of need/have ids (or nothing) will be emitted to stderr.", + Flags: []cli.Flag{ + &cli.UintFlag{ + Name: "frame-size-limit", + }, + }, + Action: func(ctx context.Context, c *cli.Command) error { + jfilter := c.Args().First() + if jfilter == "" { + return fmt.Errorf("missing filter argument") + } + + filter := nostr.Filter{} + if err := easyjson.Unmarshal([]byte(jfilter), &filter); err != nil { + return fmt.Errorf("invalid filter %s: %s\n", jfilter, err) + } + + frameSizeLimit := int(c.Uint("frame-size-limit")) + if frameSizeLimit == 0 { + frameSizeLimit = math.MaxInt + } + + // create negentropy object and initialize it with events + vec := vector.New() + neg := negentropy.New(vec, frameSizeLimit) + ch, err := db.QueryEvents(ctx, filter) + if err != nil { + return fmt.Errorf("error querying: %s\n", err) + } + for evt := range ch { + vec.Insert(evt.CreatedAt, evt.ID) + } + + wg := sync.WaitGroup{} + go func() { + defer wg.Done() + for item := range neg.Haves { + fmt.Fprintf(os.Stderr, "have %s", item) + } + }() + go func() { + defer wg.Done() + for item := range neg.HaveNots { + fmt.Fprintf(os.Stderr, "need %s", item) + } + }() + + // get negentropy message from argument or stdin pipe + var msg string + if isPiped() { + data, err := io.ReadAll(os.Stdin) + if err != nil { + return fmt.Errorf("failed to read from stdin: %w", err) + } + msg = string(data) + } else { + msg = c.Args().Get(1) + } + + if msg == "" { + // initiate the process + out := neg.Start() + fmt.Println(out) + } else { + // process the message + out, err := neg.Reconcile(msg) + if err != nil { + return fmt.Errorf("negentropy failed: %s", err) + } + fmt.Println(out) + } + + wg.Wait() + return nil + }, +} diff --git a/eventstore/cmd/eventstore/query-or-save.go b/eventstore/cmd/eventstore/query-or-save.go new file mode 100644 index 0000000..af515be --- /dev/null +++ b/eventstore/cmd/eventstore/query-or-save.go @@ -0,0 +1,61 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "os" + + "github.com/urfave/cli/v3" + "github.com/nbd-wtf/go-nostr" +) + +// this is the default command when no subcommands are given, we will just try everything +var queryOrSave = &cli.Command{ + Hidden: true, + Name: "query-or-save", + Action: func(ctx context.Context, c *cli.Command) error { + line := getStdin() + + ee := &nostr.EventEnvelope{} + re := &nostr.ReqEnvelope{} + e := &nostr.Event{} + f := &nostr.Filter{} + if json.Unmarshal([]byte(line), ee) == nil && ee.Event.ID != "" { + e = &ee.Event + return doSave(ctx, line, e) + } + if json.Unmarshal([]byte(line), e) == nil && e.ID != "" { + return doSave(ctx, line, e) + } + if json.Unmarshal([]byte(line), re) == nil && len(re.Filters) > 0 { + f = &re.Filters[0] + return doQuery(ctx, f) + } + if json.Unmarshal([]byte(line), f) == nil && len(f.String()) > 2 { + return doQuery(ctx, f) + } + + return fmt.Errorf("couldn't parse input '%s'", line) + }, +} + +func doSave(ctx context.Context, line string, e *nostr.Event) error { + if err := db.SaveEvent(ctx, e); err != nil { + return fmt.Errorf("failed to save event '%s': %s", line, err) + } + fmt.Fprintf(os.Stderr, "saved %s", e.ID) + return nil +} + +func doQuery(ctx context.Context, f *nostr.Filter) error { + ch, err := db.QueryEvents(ctx, *f) + if err != nil { + return fmt.Errorf("error querying: %w", err) + } + + for evt := range ch { + fmt.Println(evt) + } + return nil +} diff --git a/eventstore/cmd/eventstore/query.go b/eventstore/cmd/eventstore/query.go new file mode 100644 index 0000000..a75636c --- /dev/null +++ b/eventstore/cmd/eventstore/query.go @@ -0,0 +1,45 @@ +package main + +import ( + "context" + "fmt" + "os" + + "github.com/mailru/easyjson" + "github.com/nbd-wtf/go-nostr" + "github.com/urfave/cli/v3" +) + +var query = &cli.Command{ + Name: "query", + ArgsUsage: "[]", + Usage: "queries an eventstore for events, takes a filter as argument", + Description: "applies the filter to the currently open eventstore, returning up to a million events.\n takes either a filter as an argument or reads a stream of filters from stdin.", + Action: func(ctx context.Context, c *cli.Command) error { + hasError := false + for line := range getStdinLinesOrFirstArgument(c) { + filter := nostr.Filter{} + if err := easyjson.Unmarshal([]byte(line), &filter); err != nil { + fmt.Fprintf(os.Stderr, "invalid filter '%s': %s\n", line, err) + hasError = true + continue + } + + ch, err := db.QueryEvents(ctx, filter) + if err != nil { + fmt.Fprintf(os.Stderr, "error querying: %s\n", err) + hasError = true + continue + } + + for evt := range ch { + fmt.Println(evt) + } + } + + if hasError { + os.Exit(123) + } + return nil + }, +} diff --git a/eventstore/cmd/eventstore/save.go b/eventstore/cmd/eventstore/save.go new file mode 100644 index 0000000..2628b33 --- /dev/null +++ b/eventstore/cmd/eventstore/save.go @@ -0,0 +1,42 @@ +package main + +import ( + "context" + "fmt" + "os" + + "github.com/urfave/cli/v3" + "github.com/mailru/easyjson" + "github.com/nbd-wtf/go-nostr" +) + +var save = &cli.Command{ + Name: "save", + ArgsUsage: "[]", + Usage: "stores an event", + Description: "takes either an event as an argument or reads a stream of events from stdin and inserts those in the currently opened eventstore.\ndoesn't perform any kind of signature checking or replacement.", + Action: func(ctx context.Context, c *cli.Command) error { + hasError := false + for line := range getStdinLinesOrFirstArgument(c) { + var event nostr.Event + if err := easyjson.Unmarshal([]byte(line), &event); err != nil { + fmt.Fprintf(os.Stderr, "invalid event '%s': %s\n", line, err) + hasError = true + continue + } + + if err := db.SaveEvent(ctx, &event); err != nil { + fmt.Fprintf(os.Stderr, "failed to save event '%s': %s\n", line, err) + hasError = true + continue + } + + fmt.Fprintf(os.Stderr, "saved %s\n", event.ID) + } + + if hasError { + os.Exit(123) + } + return nil + }, +} diff --git a/eventstore/errors.go b/eventstore/errors.go new file mode 100644 index 0000000..553ce65 --- /dev/null +++ b/eventstore/errors.go @@ -0,0 +1,5 @@ +package eventstore + +import "errors" + +var ErrDupEvent = errors.New("duplicate: event already exists") diff --git a/eventstore/internal/binary/cmd/decode-binary/.gitignore b/eventstore/internal/binary/cmd/decode-binary/.gitignore new file mode 100644 index 0000000..e5d9c00 --- /dev/null +++ b/eventstore/internal/binary/cmd/decode-binary/.gitignore @@ -0,0 +1 @@ +decode-binary diff --git a/eventstore/internal/binary/cmd/decode-binary/main.go b/eventstore/internal/binary/cmd/decode-binary/main.go new file mode 100644 index 0000000..92c3046 --- /dev/null +++ b/eventstore/internal/binary/cmd/decode-binary/main.go @@ -0,0 +1,39 @@ +package main + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + + "github.com/fiatjaf/eventstore/internal/binary" + "github.com/nbd-wtf/go-nostr" +) + +func main() { + b, err := io.ReadAll(os.Stdin) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to read from stdin: %s\n", err) + os.Exit(1) + return + } + b = bytes.TrimSpace(b) + + if bytes.HasPrefix(b, []byte("0x")) { + fromHex := make([]byte, (len(b)-2)/2) + _, err := hex.Decode(fromHex, b[2:]) + if err == nil { + b = fromHex + } + } + + var evt nostr.Event + err = binary.Unmarshal(b, &evt) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to decode: %s\n", err) + os.Exit(1) + return + } + fmt.Println(evt.String()) +} diff --git a/eventstore/internal/binary/hybrid.go b/eventstore/internal/binary/hybrid.go new file mode 100644 index 0000000..c9c96f5 --- /dev/null +++ b/eventstore/internal/binary/hybrid.go @@ -0,0 +1,103 @@ +package binary + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + + "github.com/nbd-wtf/go-nostr" +) + +// Deprecated -- the encoding used here is not very elegant, we'll have a better binary format later. +func Unmarshal(data []byte, evt *nostr.Event) (err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("failed to decode binary for event %s from %s at %d: %v", evt.ID, evt.PubKey, evt.CreatedAt, r) + } + }() + + evt.ID = hex.EncodeToString(data[0:32]) + evt.PubKey = hex.EncodeToString(data[32:64]) + evt.Sig = hex.EncodeToString(data[64:128]) + evt.CreatedAt = nostr.Timestamp(binary.BigEndian.Uint32(data[128:132])) + evt.Kind = int(binary.BigEndian.Uint16(data[132:134])) + contentLength := int(binary.BigEndian.Uint16(data[134:136])) + evt.Content = string(data[136 : 136+contentLength]) + + curr := 136 + contentLength + + nTags := binary.BigEndian.Uint16(data[curr : curr+2]) + curr++ + evt.Tags = make(nostr.Tags, nTags) + + for t := range evt.Tags { + curr++ + nItems := int(data[curr]) + tag := make(nostr.Tag, nItems) + for i := range tag { + curr = curr + 1 + itemSize := int(binary.BigEndian.Uint16(data[curr : curr+2])) + itemStart := curr + 2 + item := string(data[itemStart : itemStart+itemSize]) + tag[i] = item + curr = itemStart + itemSize + } + evt.Tags[t] = tag + } + + return err +} + +// Deprecated -- the encoding used here is not very elegant, we'll have a better binary format later. +func Marshal(evt *nostr.Event) ([]byte, error) { + content := []byte(evt.Content) + buf := make([]byte, 32+32+64+4+2+2+len(content)+65536+len(evt.Tags)*40 /* blergh */) + + hex.Decode(buf[0:32], []byte(evt.ID)) + hex.Decode(buf[32:64], []byte(evt.PubKey)) + hex.Decode(buf[64:128], []byte(evt.Sig)) + + if evt.CreatedAt > MaxCreatedAt { + return nil, fmt.Errorf("created_at is too big: %d", evt.CreatedAt) + } + binary.BigEndian.PutUint32(buf[128:132], uint32(evt.CreatedAt)) + + if evt.Kind > MaxKind { + return nil, fmt.Errorf("kind is too big: %d, max is %d", evt.Kind, MaxKind) + } + binary.BigEndian.PutUint16(buf[132:134], uint16(evt.Kind)) + + if contentLength := len(content); contentLength > MaxContentSize { + return nil, fmt.Errorf("content is too large: %d, max is %d", contentLength, MaxContentSize) + } else { + binary.BigEndian.PutUint16(buf[134:136], uint16(contentLength)) + } + copy(buf[136:], content) + + if tagCount := len(evt.Tags); tagCount > MaxTagCount { + return nil, fmt.Errorf("can't encode too many tags: %d, max is %d", tagCount, MaxTagCount) + } else { + binary.BigEndian.PutUint16(buf[136+len(content):136+len(content)+2], uint16(tagCount)) + } + + buf = buf[0 : 136+len(content)+2] + + for _, tag := range evt.Tags { + if itemCount := len(tag); itemCount > MaxTagItemCount { + return nil, fmt.Errorf("can't encode a tag with so many items: %d, max is %d", itemCount, MaxTagItemCount) + } else { + buf = append(buf, uint8(itemCount)) + } + for _, item := range tag { + itemb := []byte(item) + itemSize := len(itemb) + if itemSize > MaxTagItemSize { + return nil, fmt.Errorf("tag item is too large: %d, max is %d", itemSize, MaxTagItemSize) + } + buf = binary.BigEndian.AppendUint16(buf, uint16(itemSize)) + buf = append(buf, itemb...) + buf = append(buf, 0) + } + } + return buf, nil +} diff --git a/eventstore/internal/binary/limits.go b/eventstore/internal/binary/limits.go new file mode 100644 index 0000000..0e0b50b --- /dev/null +++ b/eventstore/internal/binary/limits.go @@ -0,0 +1,35 @@ +package binary + +import ( + "math" + + "github.com/nbd-wtf/go-nostr" +) + +const ( + MaxKind = math.MaxUint16 + MaxCreatedAt = math.MaxUint32 + MaxContentSize = math.MaxUint16 + MaxTagCount = math.MaxUint16 + MaxTagItemCount = math.MaxUint8 + MaxTagItemSize = math.MaxUint16 +) + +func EventEligibleForBinaryEncoding(event *nostr.Event) bool { + if len(event.Content) > MaxContentSize || event.Kind > MaxKind || event.CreatedAt > MaxCreatedAt || len(event.Tags) > MaxTagCount { + return false + } + + for _, tag := range event.Tags { + if len(tag) > MaxTagItemCount { + return false + } + for _, item := range tag { + if len(item) > MaxTagItemSize { + return false + } + } + } + + return true +} diff --git a/eventstore/internal/checks/interface.go b/eventstore/internal/checks/interface.go new file mode 100644 index 0000000..4402d43 --- /dev/null +++ b/eventstore/internal/checks/interface.go @@ -0,0 +1,27 @@ +package checks + +import ( + "github.com/fiatjaf/eventstore" + "github.com/fiatjaf/eventstore/badger" + "github.com/fiatjaf/eventstore/bluge" + "github.com/fiatjaf/eventstore/edgedb" + "github.com/fiatjaf/eventstore/lmdb" + "github.com/fiatjaf/eventstore/mongo" + "github.com/fiatjaf/eventstore/mysql" + "github.com/fiatjaf/eventstore/postgresql" + "github.com/fiatjaf/eventstore/sqlite3" + "github.com/fiatjaf/eventstore/strfry" +) + +// compile-time checks to ensure all backends implement Store +var ( + _ eventstore.Store = (*badger.BadgerBackend)(nil) + _ eventstore.Store = (*lmdb.LMDBBackend)(nil) + _ eventstore.Store = (*edgedb.EdgeDBBackend)(nil) + _ eventstore.Store = (*postgresql.PostgresBackend)(nil) + _ eventstore.Store = (*mongo.MongoDBBackend)(nil) + _ eventstore.Store = (*sqlite3.SQLite3Backend)(nil) + _ eventstore.Store = (*strfry.StrfryBackend)(nil) + _ eventstore.Store = (*bluge.BlugeBackend)(nil) + _ eventstore.Store = (*mysql.MySQLBackend)(nil) +) diff --git a/eventstore/internal/helpers.go b/eventstore/internal/helpers.go new file mode 100644 index 0000000..239c657 --- /dev/null +++ b/eventstore/internal/helpers.go @@ -0,0 +1,183 @@ +package internal + +import ( + "cmp" + "math" + "slices" + "strings" + + mergesortedslices "fiatjaf.com/lib/merge-sorted-slices" + "github.com/nbd-wtf/go-nostr" +) + +func IsOlder(previous, next *nostr.Event) bool { + return previous.CreatedAt < next.CreatedAt || + (previous.CreatedAt == next.CreatedAt && previous.ID > next.ID) +} + +func ChooseNarrowestTag(filter nostr.Filter) (key string, values []string, goodness int) { + var tagKey string + var tagValues []string + for key, values := range filter.Tags { + switch key { + case "e", "E", "q": + // 'e' and 'q' are the narrowest possible, so if we have that we will use it and that's it + tagKey = key + tagValues = values + goodness = 9 + break + case "a", "A", "i", "I", "g", "r": + // these are second-best as they refer to relatively static things + goodness = 8 + tagKey = key + tagValues = values + case "d": + // this is as good as long as we have an "authors" + if len(filter.Authors) != 0 && goodness < 7 { + goodness = 7 + tagKey = key + tagValues = values + } else if goodness < 4 { + goodness = 4 + tagKey = key + tagValues = values + } + case "h", "t", "l", "k", "K": + // these things denote "categories", so they are a little more broad + if goodness < 6 { + goodness = 6 + tagKey = key + tagValues = values + } + case "p": + // this is broad and useless for a pure tag search, but we will still prefer it over others + // for secondary filtering + if goodness < 2 { + goodness = 2 + tagKey = key + tagValues = values + } + default: + // all the other tags are probably too broad and useless + if goodness == 0 { + tagKey = key + tagValues = values + } + } + } + + return tagKey, tagValues, goodness +} + +func CopyMapWithoutKey[K comparable, V any](originalMap map[K]V, key K) map[K]V { + newMap := make(map[K]V, len(originalMap)-1) + for k, v := range originalMap { + if k != key { + newMap[k] = v + } + } + return newMap +} + +type IterEvent struct { + *nostr.Event + Q int +} + +// MergeSortMultipleBatches takes the results of multiple iterators, which are already sorted, +// and merges them into a single big sorted slice +func MergeSortMultiple(batches [][]IterEvent, limit int, dst []IterEvent) []IterEvent { + // clear up empty lists here while simultaneously computing the total count. + // this helps because if there are a bunch of empty lists then this pre-clean + // step will get us in the faster 'merge' branch otherwise we would go to the other. + // we would have to do the cleaning anyway inside it. + // and even if we still go on the other we save one iteration by already computing the + // total count. + total := 0 + for i := len(batches) - 1; i >= 0; i-- { + if len(batches[i]) == 0 { + batches = SwapDelete(batches, i) + } else { + total += len(batches[i]) + } + } + + if limit == -1 { + limit = total + } + + // this amazing equation will ensure that if one of the two sides goes very small (like 1 or 2) + // the other can go very high (like 500) and we're still in the 'merge' branch. + // if values go somewhere in the middle then they may match the 'merge' branch (batches=20,limit=70) + // or not (batches=25, limit=60) + if math.Log(float64(len(batches)*2))+math.Log(float64(limit)) < 8 { + if dst == nil { + dst = make([]IterEvent, limit) + } else if cap(dst) < limit { + dst = slices.Grow(dst, limit-len(dst)) + } + dst = dst[0:limit] + return mergesortedslices.MergeFuncNoEmptyListsIntoSlice(dst, batches, compareIterEvent) + } else { + if dst == nil { + dst = make([]IterEvent, total) + } else if cap(dst) < total { + dst = slices.Grow(dst, total-len(dst)) + } + dst = dst[0:total] + + // use quicksort in a dumb way that will still be fast because it's cheated + lastIndex := 0 + for _, batch := range batches { + copy(dst[lastIndex:], batch) + lastIndex += len(batch) + } + + slices.SortFunc(dst, compareIterEvent) + + for i, j := 0, total-1; i < j; i, j = i+1, j-1 { + dst[i], dst[j] = dst[j], dst[i] + } + + if limit < len(dst) { + return dst[0:limit] + } + return dst + } +} + +// BatchSizePerNumberOfQueries tries to make an educated guess for the batch size given the total filter limit and +// the number of abstract queries we'll be conducting at the same time +func BatchSizePerNumberOfQueries(totalFilterLimit int, numberOfQueries int) int { + if numberOfQueries == 1 || totalFilterLimit*numberOfQueries < 50 { + return totalFilterLimit + } + + return int( + math.Ceil( + math.Pow(float64(totalFilterLimit), 0.80) / math.Pow(float64(numberOfQueries), 0.71), + ), + ) +} + +func SwapDelete[A any](arr []A, i int) []A { + arr[i] = arr[len(arr)-1] + return arr[:len(arr)-1] +} + +func compareIterEvent(a, b IterEvent) int { + if a.Event == nil { + if b.Event == nil { + return 0 + } else { + return -1 + } + } else if b.Event == nil { + return 1 + } + + if a.CreatedAt == b.CreatedAt { + return strings.Compare(a.ID, b.ID) + } + return cmp.Compare(a.CreatedAt, b.CreatedAt) +} diff --git a/eventstore/internal/testdata/fuzz/FuzzQuery/2387982a59ec5d22 b/eventstore/internal/testdata/fuzz/FuzzQuery/2387982a59ec5d22 new file mode 100644 index 0000000..35c2e7c --- /dev/null +++ b/eventstore/internal/testdata/fuzz/FuzzQuery/2387982a59ec5d22 @@ -0,0 +1,8 @@ +go test fuzz v1 +uint(256) +uint(31) +uint(260) +uint(2) +uint(69) +uint(385) +uint(1) diff --git a/eventstore/internal/testdata/fuzz/FuzzQuery/25234b78dd36a5fd b/eventstore/internal/testdata/fuzz/FuzzQuery/25234b78dd36a5fd new file mode 100644 index 0000000..46eee56 --- /dev/null +++ b/eventstore/internal/testdata/fuzz/FuzzQuery/25234b78dd36a5fd @@ -0,0 +1,8 @@ +go test fuzz v1 +uint(267) +uint(50) +uint(355) +uint(2) +uint(69) +uint(213) +uint(1) diff --git a/eventstore/internal/testdata/fuzz/FuzzQuery/35a474e7be3cdc57 b/eventstore/internal/testdata/fuzz/FuzzQuery/35a474e7be3cdc57 new file mode 100644 index 0000000..f668119 --- /dev/null +++ b/eventstore/internal/testdata/fuzz/FuzzQuery/35a474e7be3cdc57 @@ -0,0 +1,8 @@ +go test fuzz v1 +uint(280) +uint(0) +uint(13) +uint(2) +uint(2) +uint(0) +uint(0) diff --git a/eventstore/internal/testdata/fuzz/FuzzQuery/6e88633b00eff43d b/eventstore/internal/testdata/fuzz/FuzzQuery/6e88633b00eff43d new file mode 100644 index 0000000..3a4b052 --- /dev/null +++ b/eventstore/internal/testdata/fuzz/FuzzQuery/6e88633b00eff43d @@ -0,0 +1,8 @@ +go test fuzz v1 +uint(259) +uint(126) +uint(5) +uint(23) +uint(0) +uint(0) +uint(92) diff --git a/eventstore/internal/testdata/fuzz/FuzzQuery/70a3844d6c7ec116 b/eventstore/internal/testdata/fuzz/FuzzQuery/70a3844d6c7ec116 new file mode 100644 index 0000000..2b67e29 --- /dev/null +++ b/eventstore/internal/testdata/fuzz/FuzzQuery/70a3844d6c7ec116 @@ -0,0 +1,8 @@ +go test fuzz v1 +uint(201) +uint(50) +uint(13) +uint(97) +uint(0) +uint(0) +uint(77) diff --git a/eventstore/internal/testdata/fuzz/FuzzQuery/98cca88a26b20e30 b/eventstore/internal/testdata/fuzz/FuzzQuery/98cca88a26b20e30 new file mode 100644 index 0000000..9445b8e --- /dev/null +++ b/eventstore/internal/testdata/fuzz/FuzzQuery/98cca88a26b20e30 @@ -0,0 +1,8 @@ +go test fuzz v1 +uint(164) +uint(50) +uint(13) +uint(1) +uint(2) +uint(13) +uint(0) diff --git a/eventstore/internal/testdata/fuzz/FuzzQuery/dabb8bfe01b215a2 b/eventstore/internal/testdata/fuzz/FuzzQuery/dabb8bfe01b215a2 new file mode 100644 index 0000000..ac26f30 --- /dev/null +++ b/eventstore/internal/testdata/fuzz/FuzzQuery/dabb8bfe01b215a2 @@ -0,0 +1,8 @@ +go test fuzz v1 +uint(200) +uint(50) +uint(13) +uint(8) +uint(2) +uint(0) +uint(1) diff --git a/eventstore/internal/testdata/fuzz/FuzzQuery/debae0ec843d23ec b/eventstore/internal/testdata/fuzz/FuzzQuery/debae0ec843d23ec new file mode 100644 index 0000000..5676736 --- /dev/null +++ b/eventstore/internal/testdata/fuzz/FuzzQuery/debae0ec843d23ec @@ -0,0 +1,8 @@ +go test fuzz v1 +uint(200) +uint(117) +uint(13) +uint(2) +uint(2) +uint(0) +uint(1) diff --git a/eventstore/internal/testdata/fuzz/FuzzQuery/f6d74a34318165c2 b/eventstore/internal/testdata/fuzz/FuzzQuery/f6d74a34318165c2 new file mode 100644 index 0000000..ad6fd8f --- /dev/null +++ b/eventstore/internal/testdata/fuzz/FuzzQuery/f6d74a34318165c2 @@ -0,0 +1,8 @@ +go test fuzz v1 +uint(200) +uint(50) +uint(13) +uint(2) +uint(2) +uint(0) +uint(0) diff --git a/eventstore/lmdb/count.go b/eventstore/lmdb/count.go new file mode 100644 index 0000000..a32fe1b --- /dev/null +++ b/eventstore/lmdb/count.go @@ -0,0 +1,241 @@ +package lmdb + +import ( + "bytes" + "context" + "encoding/binary" + "encoding/hex" + + "github.com/PowerDNS/lmdb-go/lmdb" + bin "github.com/fiatjaf/eventstore/internal/binary" + "github.com/nbd-wtf/go-nostr" + "github.com/nbd-wtf/go-nostr/nip45" + "github.com/nbd-wtf/go-nostr/nip45/hyperloglog" + "golang.org/x/exp/slices" +) + +func (b *LMDBBackend) CountEvents(ctx context.Context, filter nostr.Filter) (int64, error) { + var count int64 = 0 + + queries, extraAuthors, extraKinds, extraTagKey, extraTagValues, since, err := b.prepareQueries(filter) + if err != nil { + return 0, err + } + + err = b.lmdbEnv.View(func(txn *lmdb.Txn) error { + // actually iterate + for _, q := range queries { + cursor, err := txn.OpenCursor(q.dbi) + if err != nil { + continue + } + + it := &iterator{cursor: cursor} + it.seek(q.startingPoint) + + for { + // we already have a k and a v and an err from the cursor setup, so check and use these + if it.err != nil || + len(it.key) != q.keySize || + !bytes.HasPrefix(it.key, q.prefix) { + // either iteration has errored or we reached the end of this prefix + break // stop this cursor and move to the next one + } + + // "id" indexes don't contain a timestamp + if q.timestampSize == 4 { + createdAt := binary.BigEndian.Uint32(it.key[len(it.key)-4:]) + if createdAt < since { + break + } + } + + if extraAuthors == nil && extraKinds == nil && extraTagValues == nil { + count++ + } else { + // fetch actual event + val, err := txn.Get(b.rawEventStore, it.valIdx) + if err != nil { + panic(err) + } + + // check it against pubkeys without decoding the entire thing + if !slices.Contains(extraAuthors, [32]byte(val[32:64])) { + it.next() + continue + } + + // check it against kinds without decoding the entire thing + if !slices.Contains(extraKinds, [2]byte(val[132:134])) { + it.next() + continue + } + + evt := &nostr.Event{} + if err := bin.Unmarshal(val, evt); err != nil { + it.next() + continue + } + + // if there is still a tag to be checked, do it now + if !evt.Tags.ContainsAny(extraTagKey, extraTagValues) { + it.next() + continue + } + + count++ + } + } + } + + return nil + }) + + return count, err +} + +// CountEventsHLL is like CountEvents, but it will build a hyperloglog value while iterating through results, following NIP-45 +func (b *LMDBBackend) CountEventsHLL(ctx context.Context, filter nostr.Filter, offset int) (int64, *hyperloglog.HyperLogLog, error) { + if useCache, _ := b.EnableHLLCacheFor(filter.Kinds[0]); useCache { + return b.countEventsHLLCached(filter) + } + + var count int64 = 0 + + // this is different than CountEvents because some of these extra checks are not applicable in HLL-valid filters + queries, _, extraKinds, extraTagKey, extraTagValues, since, err := b.prepareQueries(filter) + if err != nil { + return 0, nil, err + } + + hll := hyperloglog.New(offset) + + err = b.lmdbEnv.View(func(txn *lmdb.Txn) error { + // actually iterate + for _, q := range queries { + cursor, err := txn.OpenCursor(q.dbi) + if err != nil { + continue + } + + it := &iterator{cursor: cursor} + it.seek(q.startingPoint) + + for { + // we already have a k and a v and an err from the cursor setup, so check and use these + if it.err != nil || + len(it.key) != q.keySize || + !bytes.HasPrefix(it.key, q.prefix) { + // either iteration has errored or we reached the end of this prefix + break // stop this cursor and move to the next one + } + + // "id" indexes don't contain a timestamp + if q.timestampSize == 4 { + createdAt := binary.BigEndian.Uint32(it.key[len(it.key)-4:]) + if createdAt < since { + break + } + } + + // fetch actual event (we need it regardless because we need the pubkey for the hll) + val, err := txn.Get(b.rawEventStore, it.valIdx) + if err != nil { + panic(err) + } + + if extraKinds == nil && extraTagValues == nil { + // nothing extra to check + count++ + hll.AddBytes(val[32:64]) + } else { + // check it against kinds without decoding the entire thing + if !slices.Contains(extraKinds, [2]byte(val[132:134])) { + it.next() + continue + } + + evt := &nostr.Event{} + if err := bin.Unmarshal(val, evt); err != nil { + it.next() + continue + } + + // if there is still a tag to be checked, do it now + if !evt.Tags.ContainsAny(extraTagKey, extraTagValues) { + it.next() + continue + } + + count++ + hll.Add(evt.PubKey) + } + } + } + + return nil + }) + + return count, hll, err +} + +// countEventsHLLCached will just return a cached value from disk (and presumably we don't even have the events required to compute this anymore). +func (b *LMDBBackend) countEventsHLLCached(filter nostr.Filter) (int64, *hyperloglog.HyperLogLog, error) { + cacheKey := make([]byte, 2+8) + binary.BigEndian.PutUint16(cacheKey[0:2], uint16(filter.Kinds[0])) + switch filter.Kinds[0] { + case 3: + hex.Decode(cacheKey[2:2+8], []byte(filter.Tags["p"][0][0:8*2])) + case 7: + hex.Decode(cacheKey[2:2+8], []byte(filter.Tags["e"][0][0:8*2])) + case 1111: + hex.Decode(cacheKey[2:2+8], []byte(filter.Tags["E"][0][0:8*2])) + } + + var count int64 + var hll *hyperloglog.HyperLogLog + + err := b.lmdbEnv.View(func(txn *lmdb.Txn) error { + val, err := txn.Get(b.hllCache, cacheKey) + if err != nil { + if lmdb.IsNotFound(err) { + return nil + } + return err + } + hll = hyperloglog.NewWithRegisters(val, 0) // offset doesn't matter here + count = int64(hll.Count()) + return nil + }) + + return count, hll, err +} + +func (b *LMDBBackend) updateHyperLogLogCachedValues(txn *lmdb.Txn, evt *nostr.Event) error { + cacheKey := make([]byte, 2+8) + binary.BigEndian.PutUint16(cacheKey[0:2], uint16(evt.Kind)) + + for ref, offset := range nip45.HyperLogLogEventPubkeyOffsetsAndReferencesForEvent(evt) { + // setup cache key (reusing buffer) + hex.Decode(cacheKey[2:2+8], []byte(ref[0:8*2])) + + // fetch hll value from cache db + hll := hyperloglog.New(offset) + val, err := txn.Get(b.hllCache, cacheKey) + if err == nil { + hll.SetRegisters(val) + } else if !lmdb.IsNotFound(err) { + return err + } + + // add this event + hll.Add(evt.PubKey) + + // save values back again + if err := txn.Put(b.hllCache, cacheKey, hll.GetRegisters(), 0); err != nil { + return err + } + } + + return nil +} diff --git a/eventstore/lmdb/delete.go b/eventstore/lmdb/delete.go new file mode 100644 index 0000000..5123696 --- /dev/null +++ b/eventstore/lmdb/delete.go @@ -0,0 +1,43 @@ +package lmdb + +import ( + "context" + "encoding/hex" + "fmt" + + "github.com/PowerDNS/lmdb-go/lmdb" + "github.com/nbd-wtf/go-nostr" +) + +func (b *LMDBBackend) DeleteEvent(ctx context.Context, evt *nostr.Event) error { + return b.lmdbEnv.Update(func(txn *lmdb.Txn) error { + return b.delete(txn, evt) + }) +} + +func (b *LMDBBackend) delete(txn *lmdb.Txn, evt *nostr.Event) error { + idPrefix8, _ := hex.DecodeString(evt.ID[0 : 8*2]) + idx, err := txn.Get(b.indexId, idPrefix8) + if lmdb.IsNotFound(err) { + // we already do not have this + return nil + } + if err != nil { + return fmt.Errorf("failed to get current idx for deleting %x: %w", evt.ID[0:8*2], err) + } + + // calculate all index keys we have for this event and delete them + for k := range b.getIndexKeysForEvent(evt) { + err := txn.Del(k.dbi, k.key, idx) + if err != nil { + return fmt.Errorf("failed to delete index entry %s for %x: %w", b.keyName(k), evt.ID[0:8*2], err) + } + } + + // delete the raw event + if err := txn.Del(b.rawEventStore, idx, nil); err != nil { + return fmt.Errorf("failed to delete raw event %x (idx %x): %w", evt.ID[0:8*2], idx, err) + } + + return nil +} diff --git a/eventstore/lmdb/fuzz_test.go b/eventstore/lmdb/fuzz_test.go new file mode 100644 index 0000000..26a1738 --- /dev/null +++ b/eventstore/lmdb/fuzz_test.go @@ -0,0 +1,137 @@ +package lmdb + +import ( + "cmp" + "context" + "encoding/binary" + "encoding/hex" + "fmt" + "os" + "testing" + "time" + + "github.com/PowerDNS/lmdb-go/lmdb" + "github.com/fiatjaf/eventstore" + "github.com/nbd-wtf/go-nostr" + "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" +) + +func FuzzQuery(f *testing.F) { + ctx := context.Background() + + f.Add(uint(200), uint(50), uint(13), uint(2), uint(2), uint(0), uint(1)) + f.Fuzz(func(t *testing.T, total, limit, authors, timestampAuthorFactor, seedFactor, kinds, kindFactor uint) { + total++ + authors++ + seedFactor++ + kindFactor++ + if kinds == 1 { + kinds++ + } + if limit == 0 { + return + } + + // ~ setup db + if err := os.RemoveAll("/tmp/lmdbtest"); err != nil { + t.Fatal(err) + return + } + db := &LMDBBackend{} + db.Path = "/tmp/lmdbtest" + db.extraFlags = lmdb.NoSync + db.MaxLimit = 500 + if err := db.Init(); err != nil { + t.Fatal(err) + return + } + defer db.Close() + + // ~ start actual test + + filter := nostr.Filter{ + Authors: make([]string, authors), + Limit: int(limit), + } + maxKind := 1 + if kinds > 0 { + filter.Kinds = make([]int, kinds) + for i := range filter.Kinds { + filter.Kinds[i] = int(kindFactor) * i + } + maxKind = filter.Kinds[len(filter.Kinds)-1] + } + + for i := 0; i < int(authors); i++ { + sk := make([]byte, 32) + binary.BigEndian.PutUint32(sk, uint32(i%int(authors*seedFactor))+1) + pk, _ := nostr.GetPublicKey(hex.EncodeToString(sk)) + filter.Authors[i] = pk + } + + expected := make([]*nostr.Event, 0, total) + for i := 0; i < int(total); i++ { + skseed := uint32(i%int(authors*seedFactor)) + 1 + sk := make([]byte, 32) + binary.BigEndian.PutUint32(sk, skseed) + + evt := &nostr.Event{ + CreatedAt: nostr.Timestamp(skseed)*nostr.Timestamp(timestampAuthorFactor) + nostr.Timestamp(i), + Content: fmt.Sprintf("unbalanced %d", i), + Tags: nostr.Tags{}, + Kind: i % maxKind, + } + err := evt.Sign(hex.EncodeToString(sk)) + require.NoError(t, err) + + err = db.SaveEvent(ctx, evt) + require.NoError(t, err) + + if filter.Matches(evt) { + expected = append(expected, evt) + } + } + + slices.SortFunc(expected, nostr.CompareEventPtrReverse) + if len(expected) > int(limit) { + expected = expected[0:limit] + } + + w := eventstore.RelayWrapper{Store: db} + + start := time.Now() + + res, err := w.QuerySync(ctx, filter) + end := time.Now() + + require.NoError(t, err) + require.Equal(t, len(expected), len(res), "number of results is different than expected") + + require.Less(t, end.Sub(start).Milliseconds(), int64(1500), "query took too long") + nresults := len(expected) + + getTimestamps := func(events []*nostr.Event) []nostr.Timestamp { + res := make([]nostr.Timestamp, len(events)) + for i, evt := range events { + res[i] = evt.CreatedAt + } + return res + } + + fmt.Println(" expected result") + for i := range expected { + fmt.Println(" ", expected[i].CreatedAt, expected[i].ID[0:8], " ", res[i].CreatedAt, res[i].ID[0:8], " ", i) + } + + require.Equal(t, expected[0].CreatedAt, res[0].CreatedAt, "first result is wrong") + require.Equal(t, expected[nresults-1].CreatedAt, res[nresults-1].CreatedAt, "last result (%d) is wrong", nresults-1) + require.Equal(t, getTimestamps(expected), getTimestamps(res)) + + for _, evt := range res { + require.True(t, filter.Matches(evt), "event %s doesn't match filter %s", evt, filter) + } + + require.True(t, slices.IsSortedFunc(res, func(a, b *nostr.Event) int { return cmp.Compare(b.CreatedAt, a.CreatedAt) }), "results are not sorted") + }) +} diff --git a/eventstore/lmdb/helpers.go b/eventstore/lmdb/helpers.go new file mode 100644 index 0000000..d7b8e42 --- /dev/null +++ b/eventstore/lmdb/helpers.go @@ -0,0 +1,213 @@ +package lmdb + +import ( + "crypto/md5" + "encoding/binary" + "encoding/hex" + "fmt" + "iter" + "strconv" + "strings" + + "github.com/PowerDNS/lmdb-go/lmdb" + "github.com/nbd-wtf/go-nostr" + "golang.org/x/exp/slices" +) + +// this iterator always goes backwards +type iterator struct { + cursor *lmdb.Cursor + key []byte + valIdx []byte + err error +} + +func (it *iterator) seek(key []byte) { + if _, _, errsr := it.cursor.Get(key, nil, lmdb.SetRange); errsr != nil { + if operr, ok := errsr.(*lmdb.OpError); !ok || operr.Errno != lmdb.NotFound { + // in this case it's really an error + panic(operr) + } else { + // we're at the end and we just want notes before this, + // so we just need to set the cursor the last key, this is not a real error + it.key, it.valIdx, it.err = it.cursor.Get(nil, nil, lmdb.Last) + } + } else { + // move one back as the first step + it.key, it.valIdx, it.err = it.cursor.Get(nil, nil, lmdb.Prev) + } +} + +func (it *iterator) next() { + // move one back (we'll look into k and v and err in the next iteration) + it.key, it.valIdx, it.err = it.cursor.Get(nil, nil, lmdb.Prev) +} + +type key struct { + dbi lmdb.DBI + key []byte +} + +func (b *LMDBBackend) keyName(key key) string { + return fmt.Sprintf("", b.dbiName(key.dbi), key.key) +} + +func (b *LMDBBackend) getIndexKeysForEvent(evt *nostr.Event) iter.Seq[key] { + return func(yield func(key) bool) { + { + // ~ by id + k := make([]byte, 8) + hex.Decode(k[0:8], []byte(evt.ID[0:8*2])) + if !yield(key{dbi: b.indexId, key: k[0:8]}) { + return + } + } + + { + // ~ by pubkey+date + k := make([]byte, 8+4) + hex.Decode(k[0:8], []byte(evt.PubKey[0:8*2])) + binary.BigEndian.PutUint32(k[8:8+4], uint32(evt.CreatedAt)) + if !yield(key{dbi: b.indexPubkey, key: k[0 : 8+4]}) { + return + } + } + + { + // ~ by kind+date + k := make([]byte, 2+4) + binary.BigEndian.PutUint16(k[0:2], uint16(evt.Kind)) + binary.BigEndian.PutUint32(k[2:2+4], uint32(evt.CreatedAt)) + if !yield(key{dbi: b.indexKind, key: k[0 : 2+4]}) { + return + } + } + + { + // ~ by pubkey+kind+date + k := make([]byte, 8+2+4) + hex.Decode(k[0:8], []byte(evt.PubKey[0:8*2])) + binary.BigEndian.PutUint16(k[8:8+2], uint16(evt.Kind)) + binary.BigEndian.PutUint32(k[8+2:8+2+4], uint32(evt.CreatedAt)) + if !yield(key{dbi: b.indexPubkeyKind, key: k[0 : 8+2+4]}) { + return + } + } + + // ~ by tagvalue+date + // ~ by p-tag+kind+date + for i, tag := range evt.Tags { + if len(tag) < 2 || len(tag[0]) != 1 || len(tag[1]) == 0 || len(tag[1]) > 100 { + // not indexable + continue + } + firstIndex := slices.IndexFunc(evt.Tags, func(t nostr.Tag) bool { return len(t) >= 2 && t[1] == tag[1] }) + if firstIndex != i { + // duplicate + continue + } + + // get key prefix (with full length) and offset where to write the created_at + dbi, k, offset := b.getTagIndexPrefix(tag[1]) + binary.BigEndian.PutUint32(k[offset:], uint32(evt.CreatedAt)) + if !yield(key{dbi: dbi, key: k}) { + return + } + + // now the p-tag+kind+date + if dbi == b.indexTag32 && tag[0] == "p" { + k := make([]byte, 8+2+4) + hex.Decode(k[0:8], []byte(tag[1][0:8*2])) + binary.BigEndian.PutUint16(k[8:8+2], uint16(evt.Kind)) + binary.BigEndian.PutUint32(k[8+2:8+2+4], uint32(evt.CreatedAt)) + dbi := b.indexPTagKind + if !yield(key{dbi: dbi, key: k[0 : 8+2+4]}) { + return + } + } + } + + { + // ~ by date only + k := make([]byte, 4) + binary.BigEndian.PutUint32(k[0:4], uint32(evt.CreatedAt)) + if !yield(key{dbi: b.indexCreatedAt, key: k[0:4]}) { + return + } + } + } +} + +func (b *LMDBBackend) getTagIndexPrefix(tagValue string) (lmdb.DBI, []byte, int) { + var k []byte // the key with full length for created_at and idx at the end, but not filled with these + var offset int // the offset -- i.e. where the prefix ends and the created_at and idx would start + var dbi lmdb.DBI + + // if it's 32 bytes as hex, save it as bytes + if len(tagValue) == 64 { + // but we actually only use the first 8 bytes + k = make([]byte, 8+4) + if _, err := hex.Decode(k[0:8], []byte(tagValue[0:8*2])); err == nil { + offset = 8 + dbi = b.indexTag32 + return dbi, k[0 : 8+4], offset + } + } + + // if it looks like an "a" tag, index it in this special format + spl := strings.Split(tagValue, ":") + if len(spl) == 3 && len(spl[1]) == 64 { + k = make([]byte, 2+8+30) + if _, err := hex.Decode(k[2:2+8], []byte(tagValue[0:8*2])); err == nil { + if kind, err := strconv.ParseUint(spl[0], 10, 16); err == nil { + k[0] = byte(kind >> 8) + k[1] = byte(kind) + // limit "d" identifier to 30 bytes (so we don't have to grow our byte slice) + n := copy(k[2+8:2+8+30], spl[2]) + offset = 2 + 8 + n + return dbi, k[0 : offset+4], offset + } + } + } + + // index whatever else as a md5 hash of the contents + h := md5.New() + h.Write([]byte(tagValue)) + k = make([]byte, 0, 16+4) + k = h.Sum(k) + offset = 16 + dbi = b.indexTag + + return dbi, k[0 : 16+4], offset +} + +func (b *LMDBBackend) dbiName(dbi lmdb.DBI) string { + switch dbi { + case b.hllCache: + return "hllCache" + case b.settingsStore: + return "settingsStore" + case b.rawEventStore: + return "rawEventStore" + case b.indexCreatedAt: + return "indexCreatedAt" + case b.indexId: + return "indexId" + case b.indexKind: + return "indexKind" + case b.indexPubkey: + return "indexPubkey" + case b.indexPubkeyKind: + return "indexPubkeyKind" + case b.indexTag: + return "indexTag" + case b.indexTag32: + return "indexTag32" + case b.indexTagAddr: + return "indexTagAddr" + case b.indexPTagKind: + return "indexPTagKind" + default: + return "" + } +} diff --git a/eventstore/lmdb/lib.go b/eventstore/lmdb/lib.go new file mode 100644 index 0000000..81ea926 --- /dev/null +++ b/eventstore/lmdb/lib.go @@ -0,0 +1,208 @@ +package lmdb + +import ( + "encoding/binary" + "fmt" + "os" + "sync/atomic" + + "github.com/PowerDNS/lmdb-go/lmdb" + "github.com/fiatjaf/eventstore" +) + +var _ eventstore.Store = (*LMDBBackend)(nil) + +type LMDBBackend struct { + Path string + MaxLimit int + MaxLimitNegentropy int + MapSize int64 + + lmdbEnv *lmdb.Env + extraFlags uint // (for debugging and testing) + + settingsStore lmdb.DBI + rawEventStore lmdb.DBI + indexCreatedAt lmdb.DBI + indexId lmdb.DBI + indexKind lmdb.DBI + indexPubkey lmdb.DBI + indexPubkeyKind lmdb.DBI + indexTag lmdb.DBI + indexTag32 lmdb.DBI + indexTagAddr lmdb.DBI + indexPTagKind lmdb.DBI + + hllCache lmdb.DBI + EnableHLLCacheFor func(kind int) (useCache bool, skipSavingActualEvent bool) + + lastId atomic.Uint32 +} + +func (b *LMDBBackend) Init() error { + if b.MaxLimit != 0 { + b.MaxLimitNegentropy = b.MaxLimit + } else { + b.MaxLimit = 1500 + if b.MaxLimitNegentropy == 0 { + b.MaxLimitNegentropy = 16777216 + } + } + + // create directory if it doesn't exist and open it + if err := os.MkdirAll(b.Path, 0755); err != nil { + return err + } + + return b.initialize() +} + +func (b *LMDBBackend) Close() { + b.lmdbEnv.Close() +} + +func (b *LMDBBackend) Serial() []byte { + v := b.lastId.Add(1) + vb := make([]byte, 4) + binary.BigEndian.PutUint32(vb[:], uint32(v)) + return vb +} + +// Compact can only be called when the database is not being used because it will overwrite everything. +// It will temporarily move the database to a new location, then move it back. +// If something goes wrong crash the process and look for the copy of the data on tmppath. +func (b *LMDBBackend) Compact(tmppath string) error { + if err := os.MkdirAll(tmppath, 0755); err != nil { + return err + } + + if err := b.lmdbEnv.Copy(tmppath); err != nil { + return fmt.Errorf("failed to copy: %w", err) + } + + if err := b.lmdbEnv.Close(); err != nil { + return err + } + if err := os.RemoveAll(b.Path); err != nil { + return err + } + if err := os.Rename(tmppath, b.Path); err != nil { + return err + } + + return b.initialize() +} + +func (b *LMDBBackend) initialize() error { + env, err := lmdb.NewEnv() + if err != nil { + return err + } + + env.SetMaxDBs(12) + env.SetMaxReaders(1000) + if b.MapSize == 0 { + env.SetMapSize(1 << 38) // ~273GB + } else { + env.SetMapSize(b.MapSize) + } + + if err := env.Open(b.Path, lmdb.NoTLS|lmdb.WriteMap|b.extraFlags, 0644); err != nil { + return err + } + b.lmdbEnv = env + + var multiIndexCreationFlags uint = lmdb.Create | lmdb.DupSort | lmdb.DupFixed + + // open each db + if err := b.lmdbEnv.Update(func(txn *lmdb.Txn) error { + if dbi, err := txn.OpenDBI("settings", lmdb.Create); err != nil { + return err + } else { + b.settingsStore = dbi + } + if dbi, err := txn.OpenDBI("raw", lmdb.Create); err != nil { + return err + } else { + b.rawEventStore = dbi + } + if dbi, err := txn.OpenDBI("created_at", multiIndexCreationFlags); err != nil { + return err + } else { + b.indexCreatedAt = dbi + } + if dbi, err := txn.OpenDBI("id", lmdb.Create); err != nil { + return err + } else { + b.indexId = dbi + } + if dbi, err := txn.OpenDBI("kind", multiIndexCreationFlags); err != nil { + return err + } else { + b.indexKind = dbi + } + if dbi, err := txn.OpenDBI("pubkey", multiIndexCreationFlags); err != nil { + return err + } else { + b.indexPubkey = dbi + } + if dbi, err := txn.OpenDBI("pubkeyKind", multiIndexCreationFlags); err != nil { + return err + } else { + b.indexPubkeyKind = dbi + } + if dbi, err := txn.OpenDBI("tag", multiIndexCreationFlags); err != nil { + return err + } else { + b.indexTag = dbi + } + if dbi, err := txn.OpenDBI("tag32", multiIndexCreationFlags); err != nil { + return err + } else { + b.indexTag32 = dbi + } + if dbi, err := txn.OpenDBI("tagaddr", multiIndexCreationFlags); err != nil { + return err + } else { + b.indexTagAddr = dbi + } + if dbi, err := txn.OpenDBI("ptagKind", multiIndexCreationFlags); err != nil { + return err + } else { + b.indexPTagKind = dbi + } + if dbi, err := txn.OpenDBI("hllCache", lmdb.Create); err != nil { + return err + } else { + b.hllCache = dbi + } + return nil + }); err != nil { + return err + } + + // get lastId + if err := b.lmdbEnv.View(func(txn *lmdb.Txn) error { + txn.RawRead = true + cursor, err := txn.OpenCursor(b.rawEventStore) + if err != nil { + return err + } + defer cursor.Close() + k, _, err := cursor.Get(nil, nil, lmdb.Last) + if lmdb.IsNotFound(err) { + // nothing found, so we're at zero + return nil + } + if err != nil { + return err + } + b.lastId.Store(binary.BigEndian.Uint32(k)) + + return nil + }); err != nil { + return err + } + + return b.runMigrations() +} diff --git a/eventstore/lmdb/migration.go b/eventstore/lmdb/migration.go new file mode 100644 index 0000000..e0f7d0e --- /dev/null +++ b/eventstore/lmdb/migration.go @@ -0,0 +1,147 @@ +package lmdb + +import ( + "encoding/binary" + "fmt" + "log" + + "github.com/PowerDNS/lmdb-go/lmdb" + bin "github.com/fiatjaf/eventstore/internal/binary" + "github.com/nbd-wtf/go-nostr" +) + +const ( + DB_VERSION byte = 'v' +) + +func (b *LMDBBackend) runMigrations() error { + return b.lmdbEnv.Update(func(txn *lmdb.Txn) error { + var version uint16 + v, err := txn.Get(b.settingsStore, []byte{DB_VERSION}) + if err != nil { + if lmdb.IsNotFound(err) { + version = 0 + } else if v == nil { + return fmt.Errorf("failed to read database version: %w", err) + } + } else { + version = binary.BigEndian.Uint16(v) + } + + // all previous migrations are useless because we will just reindex everything + if version == 0 { + // if there is any data in the relay we will just set the version to the max without saying anything + cursor, err := txn.OpenCursor(b.rawEventStore) + if err != nil { + return fmt.Errorf("failed to open cursor in migration: %w", err) + } + defer cursor.Close() + + hasAnyEntries := false + _, _, err = cursor.Get(nil, nil, lmdb.First) + for err == nil { + hasAnyEntries = true + break + } + + if !hasAnyEntries { + b.setVersion(txn, 8) + version = 8 + return nil + } + } + + // do the migrations in increasing steps (there is no rollback) + // + + // this is when we reindex everything + if version < 8 { + log.Println("[lmdb] migration 8: reindex everything") + + if err := txn.Drop(b.indexId, false); err != nil { + return err + } + if err := txn.Drop(b.indexCreatedAt, false); err != nil { + return err + } + if err := txn.Drop(b.indexKind, false); err != nil { + return err + } + if err := txn.Drop(b.indexPTagKind, false); err != nil { + return err + } + if err := txn.Drop(b.indexPubkey, false); err != nil { + return err + } + if err := txn.Drop(b.indexPubkeyKind, false); err != nil { + return err + } + if err := txn.Drop(b.indexTag, false); err != nil { + return err + } + if err := txn.Drop(b.indexTag32, false); err != nil { + return err + } + if err := txn.Drop(b.indexTagAddr, false); err != nil { + return err + } + + cursor, err := txn.OpenCursor(b.rawEventStore) + if err != nil { + return fmt.Errorf("failed to open cursor in migration 8: %w", err) + } + defer cursor.Close() + + seen := make(map[[32]byte]struct{}) + + idx, val, err := cursor.Get(nil, nil, lmdb.First) + for err == nil { + idp := *(*[32]byte)(val[0:32]) + if _, isDup := seen[idp]; isDup { + // do not index, but delete this entry + if err := txn.Del(b.rawEventStore, idx, nil); err != nil { + return err + } + + // next + idx, val, err = cursor.Get(nil, nil, lmdb.Next) + continue + } + + seen[idp] = struct{}{} + + evt := &nostr.Event{} + if err := bin.Unmarshal(val, evt); err != nil { + return fmt.Errorf("error decoding event %x on migration 5: %w", idx, err) + } + + for key := range b.getIndexKeysForEvent(evt) { + if err := txn.Put(key.dbi, key.key, idx, 0); err != nil { + return fmt.Errorf("failed to save index %s for event %s (%v) on migration 8: %w", + b.keyName(key), evt.ID, idx, err) + } + } + + // next + idx, val, err = cursor.Get(nil, nil, lmdb.Next) + } + if lmdbErr, ok := err.(*lmdb.OpError); ok && lmdbErr.Errno != lmdb.NotFound { + // exited the loop with an error different from NOTFOUND + return err + } + + // bump version + if err := b.setVersion(txn, 8); err != nil { + return err + } + } + + return nil + }) +} + +func (b *LMDBBackend) setVersion(txn *lmdb.Txn, version uint16) error { + buf, err := txn.PutReserve(b.settingsStore, []byte{DB_VERSION}, 4, 0) + binary.BigEndian.PutUint16(buf, version) + return err +} diff --git a/eventstore/lmdb/query.go b/eventstore/lmdb/query.go new file mode 100644 index 0000000..7d25638 --- /dev/null +++ b/eventstore/lmdb/query.go @@ -0,0 +1,410 @@ +package lmdb + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "log" + "slices" + + "github.com/PowerDNS/lmdb-go/lmdb" + "github.com/fiatjaf/eventstore" + "github.com/fiatjaf/eventstore/internal" + bin "github.com/fiatjaf/eventstore/internal/binary" + "github.com/nbd-wtf/go-nostr" +) + +func (b *LMDBBackend) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) { + ch := make(chan *nostr.Event) + + if filter.Search != "" { + close(ch) + return ch, nil + } + + // max number of events we'll return + maxLimit := b.MaxLimit + var limit int + if eventstore.IsNegentropySession(ctx) { + maxLimit = b.MaxLimitNegentropy + limit = maxLimit + } else { + limit = maxLimit / 4 + } + if filter.Limit > 0 && filter.Limit <= maxLimit { + limit = filter.Limit + } + if tlimit := nostr.GetTheoreticalLimit(filter); tlimit == 0 { + close(ch) + return ch, nil + } else if tlimit > 0 { + limit = tlimit + } + + go b.lmdbEnv.View(func(txn *lmdb.Txn) error { + txn.RawRead = true + defer close(ch) + results, err := b.query(txn, filter, limit) + + for _, ie := range results { + ch <- ie.Event + } + + return err + }) + + return ch, nil +} + +func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]internal.IterEvent, error) { + queries, extraAuthors, extraKinds, extraTagKey, extraTagValues, since, err := b.prepareQueries(filter) + if err != nil { + return nil, err + } + + iterators := make([]*iterator, len(queries)) + exhausted := make([]bool, len(queries)) // indicates that a query won't be used anymore + results := make([][]internal.IterEvent, len(queries)) + pulledPerQuery := make([]int, len(queries)) + + // these are kept updated so we never pull from the iterator that is at further distance + // (i.e. the one that has the oldest event among all) + // we will continue to pull from it as soon as some other iterator takes the position + oldest := internal.IterEvent{Q: -1} + + secondPhase := false // after we have gathered enough events we will change the way we iterate + secondBatch := make([][]internal.IterEvent, 0, len(queries)+1) + secondPhaseParticipants := make([]int, 0, len(queries)+1) + + // while merging results in the second phase we will alternate between these two lists + // to avoid having to create new lists all the time + var secondPhaseResultsA []internal.IterEvent + var secondPhaseResultsB []internal.IterEvent + var secondPhaseResultsToggle bool // this is just a dummy thing we use to keep track of the alternating + var secondPhaseHasResultsPending bool + + remainingUnexhausted := len(queries) // when all queries are exhausted we can finally end this thing + batchSizePerQuery := internal.BatchSizePerNumberOfQueries(limit, remainingUnexhausted) + firstPhaseTotalPulled := 0 + + exhaust := func(q int) { + exhausted[q] = true + remainingUnexhausted-- + if q == oldest.Q { + oldest = internal.IterEvent{Q: -1} + } + } + + var firstPhaseResults []internal.IterEvent + + for q := range queries { + cursor, err := txn.OpenCursor(queries[q].dbi) + if err != nil { + return nil, err + } + iterators[q] = &iterator{cursor: cursor} + defer cursor.Close() + iterators[q].seek(queries[q].startingPoint) + results[q] = make([]internal.IterEvent, 0, batchSizePerQuery*2) + } + + // fmt.Println("queries", len(queries)) + + for c := 0; ; c++ { + batchSizePerQuery = internal.BatchSizePerNumberOfQueries(limit, remainingUnexhausted) + + // fmt.Println(" iteration", c, "remaining", remainingUnexhausted, "batchsize", batchSizePerQuery) + // we will go through all the iterators in batches until we have pulled all the required results + for q, query := range queries { + if exhausted[q] { + continue + } + if oldest.Q == q && remainingUnexhausted > 1 { + continue + } + // fmt.Println(" query", q, unsafe.Pointer(&results[q]), hex.EncodeToString(query.prefix), len(results[q])) + + it := iterators[q] + pulledThisIteration := 0 + + for { + // we already have a k and a v and an err from the cursor setup, so check and use these + if it.err != nil || + len(it.key) != query.keySize || + !bytes.HasPrefix(it.key, query.prefix) { + // either iteration has errored or we reached the end of this prefix + // fmt.Println(" reached end", it.key, query.keySize, query.prefix) + exhaust(q) + break + } + + // "id" indexes don't contain a timestamp + if query.timestampSize == 4 { + createdAt := binary.BigEndian.Uint32(it.key[len(it.key)-4:]) + if createdAt < since { + // fmt.Println(" reached since", createdAt, "<", since) + exhaust(q) + break + } + } + + // fetch actual event + val, err := txn.Get(b.rawEventStore, it.valIdx) + if err != nil { + log.Printf( + "lmdb: failed to get %x based on prefix %x, index key %x from raw event store: %s\n", + it.valIdx, query.prefix, it.key, err) + return nil, fmt.Errorf("iteration error: %w", err) + } + + // check it against pubkeys without decoding the entire thing + if extraAuthors != nil && !slices.Contains(extraAuthors, [32]byte(val[32:64])) { + it.next() + continue + } + + // check it against kinds without decoding the entire thing + if extraKinds != nil && !slices.Contains(extraKinds, [2]byte(val[132:134])) { + it.next() + continue + } + + // decode the entire thing + event := &nostr.Event{} + if err := bin.Unmarshal(val, event); err != nil { + log.Printf("lmdb: value read error (id %x) on query prefix %x sp %x dbi %d: %s\n", val[0:32], + query.prefix, query.startingPoint, query.dbi, err) + return nil, fmt.Errorf("event read error: %w", err) + } + + // fmt.Println(" event", hex.EncodeToString(val[0:4]), "kind", binary.BigEndian.Uint16(val[132:134]), "author", hex.EncodeToString(val[32:36]), "ts", nostr.Timestamp(binary.BigEndian.Uint32(val[128:132])), hex.EncodeToString(it.key), it.valIdx) + + // if there is still a tag to be checked, do it now + if extraTagValues != nil && !event.Tags.ContainsAny(extraTagKey, extraTagValues) { + it.next() + continue + } + + // this event is good to be used + evt := internal.IterEvent{Event: event, Q: q} + // + // + if secondPhase { + // do the process described below at HIWAWVRTP. + // if we've reached here this means we've already passed the `since` check. + // now we have to eliminate the event currently at the `since` threshold. + nextThreshold := firstPhaseResults[len(firstPhaseResults)-2] + if oldest.Event == nil { + // fmt.Println(" b1", evt.ID[0:8]) + // BRANCH WHEN WE DON'T HAVE THE OLDEST EVENT (BWWDHTOE) + // when we don't have the oldest set, we will keep the results + // and not change the cutting point -- it's bad, but hopefully not that bad. + results[q] = append(results[q], evt) + secondPhaseHasResultsPending = true + } else if nextThreshold.CreatedAt > oldest.CreatedAt { + // fmt.Println(" b2", nextThreshold.CreatedAt, ">", oldest.CreatedAt, evt.ID[0:8]) + // one of the events we have stored is the actual next threshold + // eliminate last, update since with oldest + firstPhaseResults = firstPhaseResults[0 : len(firstPhaseResults)-1] + since = uint32(oldest.CreatedAt) + // fmt.Println(" new since", since, evt.ID[0:8]) + // we null the oldest Event as we can't rely on it anymore + // (we'll fall under BWWDHTOE above) until we have a new oldest set. + oldest = internal.IterEvent{Q: -1} + // anything we got that would be above this won't trigger an update to + // the oldest anyway, because it will be discarded as being after the limit. + // + // finally + // add this to the results to be merged later + results[q] = append(results[q], evt) + secondPhaseHasResultsPending = true + } else if nextThreshold.CreatedAt < evt.CreatedAt { + // the next last event in the firstPhaseResults is the next threshold + // fmt.Println(" b3", nextThreshold.CreatedAt, "<", oldest.CreatedAt, evt.ID[0:8]) + // eliminate last, update since with the antelast + firstPhaseResults = firstPhaseResults[0 : len(firstPhaseResults)-1] + since = uint32(nextThreshold.CreatedAt) + // fmt.Println(" new since", since) + // add this to the results to be merged later + results[q] = append(results[q], evt) + secondPhaseHasResultsPending = true + // update the oldest event + if evt.CreatedAt < oldest.CreatedAt { + oldest = evt + } + } else { + // fmt.Println(" b4", evt.ID[0:8]) + // oops, _we_ are the next `since` threshold + firstPhaseResults[len(firstPhaseResults)-1] = evt + since = uint32(evt.CreatedAt) + // fmt.Println(" new since", since) + // do not add us to the results to be merged later + // as we're already inhabiting the firstPhaseResults slice + } + } else { + results[q] = append(results[q], evt) + firstPhaseTotalPulled++ + + // update the oldest event + if oldest.Event == nil || evt.CreatedAt < oldest.CreatedAt { + oldest = evt + } + } + + pulledPerQuery[q]++ + pulledThisIteration++ + if pulledThisIteration > batchSizePerQuery { + // batch filled + it.next() + // fmt.Println(" filled", hex.EncodeToString(it.key), it.valIdx) + break + } + if pulledPerQuery[q] >= limit { + // batch filled + reached limit for this query (which is the global limit) + exhaust(q) + it.next() + break + } + + it.next() + } + } + + // we will do this check if we don't accumulated the requested number of events yet + // fmt.Println("oldest", oldest.Event, "from iter", oldest.Q) + if secondPhase && secondPhaseHasResultsPending && (oldest.Event == nil || remainingUnexhausted == 0) { + // fmt.Println("second phase aggregation!") + // when we are in the second phase we will aggressively aggregate results on every iteration + // + secondBatch = secondBatch[:0] + for s := 0; s < len(secondPhaseParticipants); s++ { + q := secondPhaseParticipants[s] + + if len(results[q]) > 0 { + secondBatch = append(secondBatch, results[q]) + } + + if exhausted[q] { + secondPhaseParticipants = internal.SwapDelete(secondPhaseParticipants, s) + s-- + } + } + + // every time we get here we will alternate between these A and B lists + // combining everything we have into a new partial results list. + // after we've done that we can again set the oldest. + // fmt.Println(" xxx", secondPhaseResultsToggle) + if secondPhaseResultsToggle { + secondBatch = append(secondBatch, secondPhaseResultsB) + secondPhaseResultsA = internal.MergeSortMultiple(secondBatch, limit, secondPhaseResultsA) + oldest = secondPhaseResultsA[len(secondPhaseResultsA)-1] + // fmt.Println(" new aggregated a", len(secondPhaseResultsB)) + } else { + secondBatch = append(secondBatch, secondPhaseResultsA) + secondPhaseResultsB = internal.MergeSortMultiple(secondBatch, limit, secondPhaseResultsB) + oldest = secondPhaseResultsB[len(secondPhaseResultsB)-1] + // fmt.Println(" new aggregated b", len(secondPhaseResultsB)) + } + secondPhaseResultsToggle = !secondPhaseResultsToggle + + since = uint32(oldest.CreatedAt) + // fmt.Println(" new since", since) + + // reset the `results` list so we can keep using it + results = results[:len(queries)] + for _, q := range secondPhaseParticipants { + results[q] = results[q][:0] + } + } else if !secondPhase && firstPhaseTotalPulled >= limit && remainingUnexhausted > 0 { + // fmt.Println("have enough!", firstPhaseTotalPulled, "/", limit, "remaining", remainingUnexhausted) + + // we will exclude this oldest number as it is not relevant anymore + // (we now want to keep track only of the oldest among the remaining iterators) + oldest = internal.IterEvent{Q: -1} + + // HOW IT WORKS AFTER WE'VE REACHED THIS POINT (HIWAWVRTP) + // now we can combine the results we have and check what is our current oldest event. + // we also discard anything that is after the current cutting point (`limit`). + // so if we have [1,2,3], [10, 15, 20] and [7, 21, 49] but we only want 6 total + // we can just keep [1,2,3,7,10,15] and discard [20, 21, 49], + // and also adjust our `since` parameter to `15`, discarding anything we get after it + // and immediately declaring that iterator exhausted. + // also every time we get result that is more recent than this updated `since` we can + // keep it but also discard the previous since, moving the needle one back -- for example, + // if we get an `8` we can keep it and move the `since` parameter to `10`, discarding `15` + // in the process. + all := make([][]internal.IterEvent, len(results)) + copy(all, results) // we have to use this otherwise internal.MergeSortMultiple will scramble our results slice + firstPhaseResults = internal.MergeSortMultiple(all, limit, nil) + oldest = firstPhaseResults[limit-1] + since = uint32(oldest.CreatedAt) + // fmt.Println("new since", since) + + for q := range queries { + if exhausted[q] { + continue + } + + // we also automatically exhaust any of the iterators that have already passed the + // cutting point (`since`) + if results[q][len(results[q])-1].CreatedAt < oldest.CreatedAt { + exhausted[q] = true + remainingUnexhausted-- + continue + } + + // for all the remaining iterators, + // since we have merged all the events in this `firstPhaseResults` slice, we can empty the + // current `results` slices and reuse them. + results[q] = results[q][:0] + + // build this index of indexes with everybody who remains + secondPhaseParticipants = append(secondPhaseParticipants, q) + } + + // we create these two lists and alternate between them so we don't have to create a + // a new one every time + secondPhaseResultsA = make([]internal.IterEvent, 0, limit*2) + secondPhaseResultsB = make([]internal.IterEvent, 0, limit*2) + + // from now on we won't run this block anymore + secondPhase = true + } + + // fmt.Println("remaining", remainingUnexhausted) + if remainingUnexhausted == 0 { + break + } + } + + // fmt.Println("is secondPhase?", secondPhase) + + var combinedResults []internal.IterEvent + + if secondPhase { + // fmt.Println("ending second phase") + // when we reach this point either secondPhaseResultsA or secondPhaseResultsB will be full of stuff, + // the other will be empty + var secondPhaseResults []internal.IterEvent + // fmt.Println("xxx", secondPhaseResultsToggle, len(secondPhaseResultsA), len(secondPhaseResultsB)) + if secondPhaseResultsToggle { + secondPhaseResults = secondPhaseResultsB + combinedResults = secondPhaseResultsA[0:limit] // reuse this + // fmt.Println(" using b", len(secondPhaseResultsA)) + } else { + secondPhaseResults = secondPhaseResultsA + combinedResults = secondPhaseResultsB[0:limit] // reuse this + // fmt.Println(" using a", len(secondPhaseResultsA)) + } + + all := [][]internal.IterEvent{firstPhaseResults, secondPhaseResults} + combinedResults = internal.MergeSortMultiple(all, limit, combinedResults) + // fmt.Println("final combinedResults", len(combinedResults), cap(combinedResults), limit) + } else { + combinedResults = make([]internal.IterEvent, limit) + combinedResults = internal.MergeSortMultiple(results, limit, combinedResults) + } + + return combinedResults, nil +} diff --git a/eventstore/lmdb/query_planner.go b/eventstore/lmdb/query_planner.go new file mode 100644 index 0000000..cf7658e --- /dev/null +++ b/eventstore/lmdb/query_planner.go @@ -0,0 +1,218 @@ +package lmdb + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + + "github.com/PowerDNS/lmdb-go/lmdb" + "github.com/fiatjaf/eventstore/internal" + "github.com/nbd-wtf/go-nostr" +) + +type query struct { + i int + dbi lmdb.DBI + prefix []byte + results chan *nostr.Event + keySize int + timestampSize int + startingPoint []byte +} + +func (b *LMDBBackend) prepareQueries(filter nostr.Filter) ( + queries []query, + extraAuthors [][32]byte, + extraKinds [][2]byte, + extraTagKey string, + extraTagValues []string, + since uint32, + err error, +) { + // we will apply this to every query we return + defer func() { + if queries == nil { + return + } + + var until uint32 = 4294967295 + if filter.Until != nil { + if fu := uint32(*filter.Until); fu < until { + until = fu + 1 + } + } + for i, q := range queries { + sp := make([]byte, len(q.prefix)) + sp = sp[0:len(q.prefix)] + copy(sp, q.prefix) + queries[i].startingPoint = binary.BigEndian.AppendUint32(sp, uint32(until)) + queries[i].results = make(chan *nostr.Event, 12) + } + }() + + if filter.IDs != nil { + // when there are ids we ignore everything else + queries = make([]query, len(filter.IDs)) + for i, idHex := range filter.IDs { + if len(idHex) != 64 { + return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid id '%s'", idHex) + } + prefix := make([]byte, 8) + if _, err := hex.Decode(prefix[0:8], []byte(idHex[0:8*2])); err != nil { + return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid id '%s'", idHex) + } + queries[i] = query{i: i, dbi: b.indexId, prefix: prefix[0:8], keySize: 8, timestampSize: 0} + } + return queries, nil, nil, "", nil, 0, nil + } + + // this is where we'll end the iteration + if filter.Since != nil { + if fs := uint32(*filter.Since); fs > since { + since = fs + } + } + + if len(filter.Tags) > 0 { + // we will select ONE tag to query for and ONE extra tag to do further narrowing, if available + tagKey, tagValues, goodness := internal.ChooseNarrowestTag(filter) + + // we won't use a tag index for this as long as we have something else to match with + if goodness < 2 && (len(filter.Authors) > 0 || len(filter.Kinds) > 0) { + goto pubkeyMatching + } + + // only "p" tag has a goodness of 2, so + if goodness == 2 { + // this means we got a "p" tag, so we will use the ptag-kind index + i := 0 + if filter.Kinds != nil { + queries = make([]query, len(tagValues)*len(filter.Kinds)) + for _, value := range tagValues { + if len(value) != 64 { + return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid 'p' tag '%s'", value) + } + + for _, kind := range filter.Kinds { + k := make([]byte, 8+2) + if _, err := hex.Decode(k[0:8], []byte(value[0:8*2])); err != nil { + return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid 'p' tag '%s'", value) + } + binary.BigEndian.PutUint16(k[8:8+2], uint16(kind)) + queries[i] = query{i: i, dbi: b.indexPTagKind, prefix: k[0 : 8+2], keySize: 8 + 2 + 4, timestampSize: 4} + i++ + } + } + } else { + // even if there are no kinds, in that case we will just return any kind and not care + queries = make([]query, len(tagValues)) + for i, value := range tagValues { + if len(value) != 64 { + return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid 'p' tag '%s'", value) + } + + k := make([]byte, 8) + if _, err := hex.Decode(k[0:8], []byte(value[0:8*2])); err != nil { + return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid 'p' tag '%s'", value) + } + queries[i] = query{i: i, dbi: b.indexPTagKind, prefix: k[0:8], keySize: 8 + 2 + 4, timestampSize: 4} + } + } + } else { + // otherwise we will use a plain tag index + queries = make([]query, len(tagValues)) + for i, value := range tagValues { + // get key prefix (with full length) and offset where to write the created_at + dbi, k, offset := b.getTagIndexPrefix(value) + // remove the last parts part to get just the prefix we want here + prefix := k[0:offset] + queries[i] = query{i: i, dbi: dbi, prefix: prefix, keySize: len(prefix) + 4, timestampSize: 4} + i++ + } + + // add an extra kind filter if available (only do this on plain tag index, not on ptag-kind index) + if filter.Kinds != nil { + extraKinds = make([][2]byte, len(filter.Kinds)) + for i, kind := range filter.Kinds { + binary.BigEndian.PutUint16(extraKinds[i][0:2], uint16(kind)) + } + } + } + + // add an extra author search if possible + if filter.Authors != nil { + extraAuthors = make([][32]byte, len(filter.Authors)) + for i, pk := range filter.Authors { + hex.Decode(extraAuthors[i][:], []byte(pk)) + } + } + + // add an extra useless tag if available + filter.Tags = internal.CopyMapWithoutKey(filter.Tags, tagKey) + if len(filter.Tags) > 0 { + extraTagKey, extraTagValues, _ = internal.ChooseNarrowestTag(filter) + } + + return queries, extraAuthors, extraKinds, extraTagKey, extraTagValues, since, nil + } + +pubkeyMatching: + if len(filter.Authors) > 0 { + if len(filter.Kinds) == 0 { + // will use pubkey index + queries = make([]query, len(filter.Authors)) + for i, pubkeyHex := range filter.Authors { + if len(pubkeyHex) != 64 { + return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid author '%s'", pubkeyHex) + } + prefix := make([]byte, 8) + if _, err := hex.Decode(prefix[0:8], []byte(pubkeyHex[0:8*2])); err != nil { + return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid author '%s'", pubkeyHex) + } + queries[i] = query{i: i, dbi: b.indexPubkey, prefix: prefix[0:8], keySize: 8 + 4, timestampSize: 4} + } + } else { + // will use pubkeyKind index + queries = make([]query, len(filter.Authors)*len(filter.Kinds)) + i := 0 + for _, pubkeyHex := range filter.Authors { + for _, kind := range filter.Kinds { + if len(pubkeyHex) != 64 { + return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid author '%s'", pubkeyHex) + } + prefix := make([]byte, 8+2) + if _, err := hex.Decode(prefix[0:8], []byte(pubkeyHex[0:8*2])); err != nil { + return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid author '%s'", pubkeyHex) + } + binary.BigEndian.PutUint16(prefix[8:8+2], uint16(kind)) + queries[i] = query{i: i, dbi: b.indexPubkeyKind, prefix: prefix[0 : 8+2], keySize: 10 + 4, timestampSize: 4} + i++ + } + } + } + + // potentially with an extra useless tag filtering + extraTagKey, extraTagValues, _ = internal.ChooseNarrowestTag(filter) + return queries, nil, nil, extraTagKey, extraTagValues, since, nil + } + + if len(filter.Kinds) > 0 { + // will use a kind index + queries = make([]query, len(filter.Kinds)) + for i, kind := range filter.Kinds { + prefix := make([]byte, 2) + binary.BigEndian.PutUint16(prefix[0:2], uint16(kind)) + queries[i] = query{i: i, dbi: b.indexKind, prefix: prefix[0:2], keySize: 2 + 4, timestampSize: 4} + } + + // potentially with an extra useless tag filtering + tagKey, tagValues, _ := internal.ChooseNarrowestTag(filter) + return queries, nil, nil, tagKey, tagValues, since, nil + } + + // if we got here our query will have nothing to filter with + queries = make([]query, 1) + prefix := make([]byte, 0) + queries[0] = query{i: 0, dbi: b.indexCreatedAt, prefix: prefix, keySize: 0 + 4, timestampSize: 4} + return queries, nil, nil, "", nil, since, nil +} diff --git a/eventstore/lmdb/replace.go b/eventstore/lmdb/replace.go new file mode 100644 index 0000000..5aa10d1 --- /dev/null +++ b/eventstore/lmdb/replace.go @@ -0,0 +1,49 @@ +package lmdb + +import ( + "context" + "fmt" + "math" + + "github.com/PowerDNS/lmdb-go/lmdb" + "github.com/fiatjaf/eventstore/internal" + "github.com/nbd-wtf/go-nostr" +) + +func (b *LMDBBackend) ReplaceEvent(ctx context.Context, evt *nostr.Event) error { + // sanity checking + if evt.CreatedAt > math.MaxUint32 || evt.Kind > math.MaxUint16 { + return fmt.Errorf("event with values out of expected boundaries") + } + + return b.lmdbEnv.Update(func(txn *lmdb.Txn) error { + filter := nostr.Filter{Limit: 1, Kinds: []int{evt.Kind}, Authors: []string{evt.PubKey}} + if nostr.IsAddressableKind(evt.Kind) { + // when addressable, add the "d" tag to the filter + filter.Tags = nostr.TagMap{"d": []string{evt.Tags.GetD()}} + } + + // now we fetch the past events, whatever they are, delete them and then save the new + results, err := b.query(txn, filter, 10) // in theory limit could be just 1 and this should work + if err != nil { + return fmt.Errorf("failed to query past events with %s: %w", filter, err) + } + + shouldStore := true + for _, previous := range results { + if internal.IsOlder(previous.Event, evt) { + if err := b.delete(txn, previous.Event); err != nil { + return fmt.Errorf("failed to delete event %s for replacing: %w", previous.Event.ID, err) + } + } else { + // there is a newer event already stored, so we won't store this + shouldStore = false + } + } + if shouldStore { + return b.save(txn, evt) + } + + return nil + }) +} diff --git a/eventstore/lmdb/save.go b/eventstore/lmdb/save.go new file mode 100644 index 0000000..752ac80 --- /dev/null +++ b/eventstore/lmdb/save.go @@ -0,0 +1,71 @@ +package lmdb + +import ( + "context" + "encoding/hex" + "fmt" + "math" + + "github.com/PowerDNS/lmdb-go/lmdb" + "github.com/fiatjaf/eventstore" + bin "github.com/fiatjaf/eventstore/internal/binary" + "github.com/nbd-wtf/go-nostr" +) + +func (b *LMDBBackend) SaveEvent(ctx context.Context, evt *nostr.Event) error { + // sanity checking + if evt.CreatedAt > math.MaxUint32 || evt.Kind > math.MaxUint16 { + return fmt.Errorf("event with values out of expected boundaries") + } + + return b.lmdbEnv.Update(func(txn *lmdb.Txn) error { + if b.EnableHLLCacheFor != nil { + // modify hyperloglog caches relative to this + useCache, skipSaving := b.EnableHLLCacheFor(evt.Kind) + + if useCache { + err := b.updateHyperLogLogCachedValues(txn, evt) + if err != nil { + return fmt.Errorf("failed to update hll cache: %w", err) + } + if skipSaving { + return nil + } + } + } + + // check if we already have this id + id, _ := hex.DecodeString(evt.ID) + _, err := txn.Get(b.indexId, id) + if operr, ok := err.(*lmdb.OpError); ok && operr.Errno != lmdb.NotFound { + // we will only proceed if we get a NotFound + return eventstore.ErrDupEvent + } + + return b.save(txn, evt) + }) +} + +func (b *LMDBBackend) save(txn *lmdb.Txn, evt *nostr.Event) error { + // encode to binary form so we'll save it + bin, err := bin.Marshal(evt) + if err != nil { + return err + } + + idx := b.Serial() + // raw event store + if err := txn.Put(b.rawEventStore, idx, bin, 0); err != nil { + return err + } + + // put indexes + for k := range b.getIndexKeysForEvent(evt) { + err := txn.Put(k.dbi, k.key, idx, 0) + if err != nil { + return err + } + } + + return nil +} diff --git a/eventstore/lmdb/testdata/fuzz/FuzzQuery b/eventstore/lmdb/testdata/fuzz/FuzzQuery new file mode 120000 index 0000000..eed0ba0 --- /dev/null +++ b/eventstore/lmdb/testdata/fuzz/FuzzQuery @@ -0,0 +1 @@ +../../../internal/testdata/fuzz/FuzzQuery \ No newline at end of file diff --git a/eventstore/mmm/betterbinary/codec.go b/eventstore/mmm/betterbinary/codec.go new file mode 100644 index 0000000..325631e --- /dev/null +++ b/eventstore/mmm/betterbinary/codec.go @@ -0,0 +1,139 @@ +package betterbinary + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "math" + + "github.com/nbd-wtf/go-nostr" +) + +const ( + MaxKind = math.MaxUint16 + MaxCreatedAt = math.MaxUint32 + MaxContentSize = math.MaxUint16 + MaxTagCount = math.MaxUint16 + MaxTagItemCount = math.MaxUint8 + MaxTagItemSize = math.MaxUint16 +) + +func Measure(evt nostr.Event) int { + n := 135 // static base + + n += 2 + // tag section length + 2 + // number of tags + len(evt.Tags)*3 // each tag offset + each tag item count + for _, tag := range evt.Tags { + n += len(tag) * 2 // item length for each item in this tag + for _, item := range tag { + n += len(item) // actual tag item + } + } + + // content length and actual content + n += 2 + len(evt.Content) + + return n +} + +func Marshal(evt nostr.Event, buf []byte) error { + buf[0] = 0 + + if evt.Kind > MaxKind { + return fmt.Errorf("kind is too big: %d, max is %d", evt.Kind, MaxKind) + } + binary.LittleEndian.PutUint16(buf[1:3], uint16(evt.Kind)) + + if evt.CreatedAt > MaxCreatedAt { + return fmt.Errorf("created_at is too big: %d, max is %d", evt.CreatedAt, MaxCreatedAt) + } + binary.LittleEndian.PutUint32(buf[3:7], uint32(evt.CreatedAt)) + + hex.Decode(buf[7:39], []byte(evt.ID)) + hex.Decode(buf[39:71], []byte(evt.PubKey)) + hex.Decode(buf[71:135], []byte(evt.Sig)) + + tagBase := 135 + // buf[135:137] (tagsSectionLength) will be set later when we know the absolute size of the tags section + + ntags := len(evt.Tags) + if ntags > MaxTagCount { + return fmt.Errorf("can't encode too many tags: %d, max is %d", ntags, MaxTagCount) + } + binary.LittleEndian.PutUint16(buf[137:139], uint16(ntags)) + + tagOffset := 2 + 2 + ntags*2 + for t, tag := range evt.Tags { + binary.LittleEndian.PutUint16(buf[tagBase+2+2+t*2:], uint16(tagOffset)) + + itemCount := len(tag) + if itemCount > MaxTagItemCount { + return fmt.Errorf("can't encode a tag with so many items: %d, max is %d", itemCount, MaxTagItemCount) + } + buf[tagBase+tagOffset] = uint8(itemCount) + + itemOffset := 1 + for _, item := range tag { + itemSize := len(item) + if itemSize > MaxTagItemSize { + return fmt.Errorf("tag item is too large: %d, max is %d", itemSize, MaxTagItemSize) + } + + binary.LittleEndian.PutUint16(buf[tagBase+tagOffset+itemOffset:], uint16(itemSize)) + copy(buf[tagBase+tagOffset+itemOffset+2:], []byte(item)) + itemOffset += 2 + len(item) + } + tagOffset += itemOffset + } + + tagsSectionLength := tagOffset + binary.LittleEndian.PutUint16(buf[tagBase:], uint16(tagsSectionLength)) + + // content + if contentLength := len(evt.Content); contentLength > MaxContentSize { + return fmt.Errorf("content is too large: %d, max is %d", contentLength, MaxContentSize) + } else { + binary.LittleEndian.PutUint16(buf[tagBase+tagsSectionLength:], uint16(contentLength)) + } + copy(buf[tagBase+tagsSectionLength+2:], []byte(evt.Content)) + + return nil +} + +func Unmarshal(data []byte, evt *nostr.Event) (err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("failed to decode binary: %v", r) + } + }() + + evt.Kind = int(binary.LittleEndian.Uint16(data[1:3])) + evt.CreatedAt = nostr.Timestamp(binary.LittleEndian.Uint32(data[3:7])) + evt.ID = hex.EncodeToString(data[7:39]) + evt.PubKey = hex.EncodeToString(data[39:71]) + evt.Sig = hex.EncodeToString(data[71:135]) + + const tagbase = 135 + tagsSectionLength := binary.LittleEndian.Uint16(data[tagbase:]) + ntags := binary.LittleEndian.Uint16(data[tagbase+2:]) + evt.Tags = make(nostr.Tags, ntags) + for t := range evt.Tags { + offset := binary.LittleEndian.Uint16(data[tagbase+4+t*2:]) + nitems := int(data[tagbase+offset]) + tag := make(nostr.Tag, nitems) + + curr := tagbase + offset + 1 + for i := range tag { + length := binary.LittleEndian.Uint16(data[curr:]) + tag[i] = string(data[curr+2 : curr+2+length]) + curr += 2 + length + } + evt.Tags[t] = tag + } + + contentLength := binary.LittleEndian.Uint16(data[tagbase+tagsSectionLength:]) + evt.Content = string(data[tagbase+tagsSectionLength+2 : tagbase+tagsSectionLength+2+contentLength]) + + return err +} diff --git a/eventstore/mmm/betterbinary/codec_test.go b/eventstore/mmm/betterbinary/codec_test.go new file mode 100644 index 0000000..1cefdaf --- /dev/null +++ b/eventstore/mmm/betterbinary/codec_test.go @@ -0,0 +1,182 @@ +package betterbinary + +import ( + "os" + "slices" + "testing" + + "github.com/kr/pretty" + "github.com/mailru/easyjson" + "github.com/nbd-wtf/go-nostr" +) + +func TestBaseCase(t *testing.T) { + for _, tc := range []struct { + json string + bin []byte + }{ + { + `{"id":"a9663055164ab8b30d9524656370c4bf93393bb051b7edf4556f40c5298dc0c7","pubkey":"ee11a5dff40c19a555f41fe42b48f00e618c91225622ae37b6c2bb67b76c4e49","created_at":1681778790,"kind":1,"sig":"4dfea1a6f73141d5691e43afc3234dbe73016db0fb207cf247e0127cc2591ee6b4be5b462272030a9bde75882aae810f359682b1b6ce6cbb97201141c576db42","content":"He got snowed in"}`, + []byte{ + 0, // versioning byte + 1, 0, // kind 1 + 102, 232, 61, 100, // created at 1681778790 + 169, 102, 48, 85, 22, 74, 184, 179, 13, 149, 36, 101, 99, 112, 196, 191, 147, 57, + 59, 176, 81, 183, 237, 244, 85, 111, 64, 197, 41, 141, 192, 199, // id + 238, 17, 165, 223, 244, 12, 25, 165, 85, 244, 31, 228, 43, 72, 240, 14, 97, 140, + 145, 34, 86, 34, 174, 55, 182, 194, 187, 103, 183, 108, 78, 73, // pubkey + 77, 254, 161, 166, 247, 49, 65, 213, 105, 30, 67, 175, 195, 35, 77, 190, 115, 1, + 109, 176, 251, 32, 124, 242, 71, 224, 18, 124, 194, 89, 30, 230, 180, 190, 91, 70, + 34, 114, 3, 10, 155, 222, 117, 136, 42, 174, 129, 15, 53, 150, 130, 177, 182, 206, + 108, 187, 151, 32, 17, 65, 197, 118, 219, 66, // sig + // 135: + 4, 0, // tags section is 4 bytes long + 0, 0, // there are zero tags + // 139: (135+4) + 16, 0, // the content is 16 bytes long + 72, 101, 32, 103, 111, 116, 32, 115, 110, 111, 119, 101, 100, 32, 105, + 110, // "He got snowed in" + // 157: + }, + }, + { + `{"id":"a9663055164ab8b30d9524656370c4bf93393bb051b7edf4556f40c5298dc0c7","pubkey":"ee11a5dff40c19a555f41fe42b48f00e618c91225622ae37b6c2bb67b76c4e49","created_at":1681778790,"kind":1,"sig":"4dfea1a6f73141d5691e43afc3234dbe73016db0fb207cf247e0127cc2591ee6b4be5b462272030a9bde75882aae810f359682b1b6ce6cbb97201141c576db42","content":"He got snowed in","tags":[["client","gossip"],["p","e2ccf7cf20403f3f2a4a55b328f0de3be38558a7d5f33632fdaaefc726c1c8eb"],["e","2c86abcc98f7fd8a6750aab8df6c1863903f107206cc2d72e8afeb6c38357aed","wss://nostr-pub.wellorder.net/","root"]]}`, + []byte{ + 0, // versioning byte + 1, 0, // kind 1 + 102, 232, 61, 100, // created at 1681778790 + 169, 102, 48, 85, 22, 74, 184, 179, 13, 149, 36, 101, 99, 112, 196, 191, 147, 57, + 59, 176, 81, 183, 237, 244, 85, 111, 64, 197, 41, 141, 192, 199, // id + 238, 17, 165, 223, 244, 12, 25, 165, 85, 244, 31, 228, 43, 72, 240, 14, 97, 140, + 145, 34, 86, 34, 174, 55, 182, 194, 187, 103, 183, 108, 78, 73, // pubkey + 77, 254, 161, 166, 247, 49, 65, 213, 105, 30, 67, 175, 195, 35, 77, 190, 115, 1, + 109, 176, 251, 32, 124, 242, 71, 224, 18, 124, 194, 89, 30, 230, 180, 190, 91, 70, + 34, 114, 3, 10, 155, 222, 117, 136, 42, 174, 129, 15, 53, 150, 130, 177, 182, 206, + 108, 187, 151, 32, 17, 65, 197, 118, 219, 66, // sig + // 135: + 205, 0, // tags section is 205 bytes long + 3, 0, // there are three tags + 10, 0, // first tag is at offset 10 + 27, 0, // second tag is at offset 27 + 97, 0, // third tag is at offset 97 + // 145: (135+10) + 2, // the first tag has 2 strings + 6, 0, // the first string is 6 bytes long + 99, 108, 105, 101, 110, 116, // "client" + 6, 0, // the second string is 6 bytes long + 103, 111, 115, 115, 105, 112, // "gossip" + // 162: (135+27) + 2, // the second tag has two strings + 1, 0, // the first string is 1 char long + 112, // "p" + 64, 0, // the second string is 64 bytes long + 101, 50, 99, 99, 102, 55, 99, 102, 50, 48, 52, 48, 51, 102, 51, 102, 50, 97, 52, + 97, 53, 53, 98, 51, 50, 56, 102, 48, 100, 101, 51, 98, 101, 51, 56, 53, 53, 56, 97, + 55, 100, 53, 102, 51, 51, 54, 51, 50, 102, 100, 97, 97, 101, 102, 99, 55, 50, 54, + 99, 49, 99, 56, 101, + 98, // "e2ccf7cf20403f3f2a4a55b328f0de3be38558a7d5f33632fdaaefc726c1c8eb" + // 232: (135+97) + 4, // the third tag has 4 strings + 1, 0, // the first string is 1 char long + 101, // "e" + 64, 0, // the second string is 64 bytes long + 50, 99, 56, 54, 97, 98, 99, 99, 57, 56, 102, 55, 102, 100, 56, 97, 54, 55, 53, 48, + 97, 97, 98, 56, 100, 102, 54, 99, 49, 56, 54, 51, 57, 48, 51, 102, 49, 48, 55, 50, + 48, 54, 99, 99, 50, 100, 55, 50, 101, 56, 97, 102, 101, 98, 54, 99, 51, 56, 51, 53, + 55, 97, 101, + 100, // "2c86abcc98f7fd8a6750aab8df6c1863903f107206cc2d72e8afeb6c38357aed" + 30, 0, // the third string is 30 bytes long + 119, 115, 115, 58, 47, 47, 110, 111, 115, 116, 114, 45, 112, 117, 98, 46, 119, 101, + 108, 108, 111, 114, 100, 101, 114, 46, 110, 101, 116, + 47, // "wss://nostr-pub.wellorder.net/" + 4, 0, // the fourth string is 4 bytes long + 114, 111, 111, 116, // "root" + // 340: (135+205) + 16, 0, // the content is 16 bytes long + 72, 101, 32, 103, 111, 116, 32, 115, 110, 111, 119, 101, 100, 32, 105, + 110, // "He got snowed in" + // 358: + }, + }, + } { + evt := nostr.Event{} + err := easyjson.Unmarshal([]byte(tc.json), &evt) + if err != nil { + t.Fatalf("failed to parse event json: %s", err) + return + } + + size := Measure(evt) + if size != len(tc.bin) { + t.Fatalf("wrong measure. expected %d, got %d", len(tc.bin), size) + return + } + + bin := make([]byte, size) + + if err := Marshal(evt, bin); err != nil { + t.Fatalf("failed to marshal to binary: %s", err) + return + } + + if !slices.Equal(tc.bin, bin) { + pretty.Fdiff(os.Stdout, tc.bin, bin) + t.Fatalf("invalid bytes result -- expected\n%v\ngot\n%v", tc.bin, bin) + return + } + + var res nostr.Event + if err := Unmarshal(bin, &res); err != nil { + t.Fatalf("failed to unmarshal: %s", err) + return + } + + if evt.Content != res.Content || res.ID != evt.ID { + t.Fatalf("unmarshaled wrongly") + return + } + } +} + +func TestRandomEvents(t *testing.T) { + for _, evtj := range normalEvents { + var evt nostr.Event + easyjson.Unmarshal([]byte(evtj), &evt) + bin := make([]byte, Measure(evt)) + Marshal(evt, bin) + + if evt.Content != "" && bin[len(bin)-1] != evt.Content[len(evt.Content)-1] { + t.Fatalf("didn't use all of the measured bytes: %v, %s", bin, evt) + } + + var newevt nostr.Event + Unmarshal(bin, &newevt) + + if newevt.Content != evt.Content || newevt.GetID() != evt.GetID() { + pretty.Fdiff(os.Stdout, evt, newevt) + t.Fatalf("unmarshaled is different") + } + } +} + +var normalEvents = []string{ + `{"id":"99b83b56b5e32d41bb950b53e68c8b9e25cb2c5aad0a91f5a063e1899cd610d7","pubkey":"5ec2d2c42dda8b0a560a145f6ef2eae3be8f9f972ca33aca6720de96572f12b9","created_at":1688572804,"kind":1,"tags":[],"content":"Time: 05/07/23 12:00:03\nUptime: 6 days, 17:50:45\n\nCPU:\n\tUsage: 9.8%\n\tTemperature: 35.67°C\nCore Temps:\n\tCore 0:\t35°C\n\tCore 1:\t36°C\n\tCore 2:\t34°C\n\tCore 3:\t35°C\n\tCore 4:\t39°C\n\tCore 5:\t35°C\n\nMemory:\n\tTotal: 15.57 GB\n\tUsed: 3.45 GB\n\tPercent Used: 24.3%","sig":"9eff509ed6fc96067ddee9a7d6c7abfe066136c7d82f0b3601c956765402efa9591e6916ca35aa06976c23b1adb2d368bd0f8d21d73e5f7c74d58acd1599c73a"}`, + `{"id":"080c1acd1df07693fd59ad205d14c4d966a1729c6c6773e2b131f5d2356ace77","pubkey":"06a498e5bf0cd756a4941e422713a7e75deca00332cb3736000f3df8616a2367","created_at":1688556260,"kind":30078,"tags":[["d","plebstr"]],"content":"wrQqhrUOy48lYfuAVcJlEMwygDIWi/pn3WFGptIuFfjkGi8ZBsUACsnpWbXg03TOkZqLK8VsmC4By2bQcDaP9Na0DzZK4MBO0At2vDfOyu/lx1nXLoj2r/efAWX4uEYFo3BsyWMeZfWxRltBuZO92OND7p0AUIdcPsTkkQtikHD5TBko2OGlejAUNu7PEDMh+K0Bg3i1W7iNMV2EUYOW37+T0AlHSrQh6eUCpLcLh46oqgeg1ZgtpsJTCgSEQjoY5QLgTXw+N/DuLeiC30BjaBBCTSqFjhemE0MEo5Glg4YrCx8HZxP/KIbWie4rbU+2z02KHSc0CxPv7A0IqPQAfAMjC2pExUqFclVtd9XSPrW0umwFNJ2ljauQfilchOTvPbhMxcAqfRgeFGWpZJmpqQ2IVJkzMPr5f5as8rBIqbQ0uGmZDjyf99FgATYvkXkxBGNFNGkLHmX3aSs2FZP61bSiZzXbuD31l37//huO/Fk+o3eejP0yPZe11tHBMeL75FPfH9sRwQm0UHWVDL45IA2JKAdC0Zt1DQVJZ47usj5Ivj+qmvuOFGgWrukpQhDzsuGoXSi/8acXGmFGas7M+3NE/WX5umNJkPHDcaTtSRTsKLmmNdAlISQ3DQ2mYMJBlZ5U+wBEjw7DY81XcqsEB7g4TdmA0bQx/4M8m0v1UgL03gTYCiH8nA6nqtbp5a3H7DB8YKfQfBn2DMprhyGFREeN1MwYqcLbdHPiibBMKpGphsBi9HBwexm5FiVyPgWjFSI2yqj4nd2f8syX7OrYzBSgyFB5Luq4DXEtpL2BVgtnwPYyUdG3AkwcBYKQcmrxMZdzRVxSmuU6ws9SMBueqpNxzqdoODUFNK/BZ+UZhiOm7+iGoqSeLWpsDNwxupG385ixIv/U7EYqKhwkfekNyA8hExJRsjxFOiZ0YfoGNG42XvpYNLRrqztOM3/95I81Rq6d1e/sBx5MYrdgQRBmyJ4sDRc+1jWTHZTduVmEFfy4DjY88mO+67G2WtiKFUa/KLkgEpqhBCAWalCNkAuSPKxMiqHMSoP45ekERdnqaKqQfjc0tca/lq2OUds3ctIUdjkR35baxgnHIISHhkFlTHwP1KoMGMriMREpCUqZVGZ7EFNbKscJoYAMLrCNp8WfMivgOOxhUW+jgrrrgmGSHDbTK4caeCKV5jCwdhKOg943yEFzFcMe0SAV9iC1ZIt4Qp/U5gzeG4IT0eIVzNDdJ27PMGvEVmAMFIWVh6oRYRxsbAhZJwXrSWDOObiqcIQxuFviOevL+D2khF3r9KMRgqRYJsXHi6Mx7I/QnU9nAdfeOLgu8LF79Yvv6dFjt1DlPmQDuWmJv0v2qe4ybQGKvS2Kdt1S5sGhYYNqG0Vb4Ld8sKg8gagJwwo7d2F9BOyycMhql/qvN9RC/6oPxuDVOJsQBGH2qo+diS8uMsDa/a6spk1T0q3u/vNU1w+bnDNMK9uUxHmTvO5rhg3qW3fwnUQzboWsw6pqsWAFryu3LwtHxsVNBTjlBv6GFodS5U5sPkT324r26rla2stN0DqLId1OCLCfUesV9yuYNFmq+bijHThfu/lu86YMa+Lo+GjW/XSzw7So9U8ZVBF8C/+9mFuxFIj38/kUUz93o+aO06kyNrRA2QA7VtX1wkPpPCUix95bYg7FN/P0eJGfjnG6KvMlI7/O0raCQI7UtRCJ8LSCnswRNW9mv2evT1qVX2XCCiYCm7SWa2Scb6cZjK2yVbcSRi6dcn2difKCaPxXUVcL4KZO5dw8xkCxNNNSHghY6zLcH+oAcBh/jU3+i7RODeTCr7GhfLTM7y0+bMK4JX9gHzhOB8dYphNhVR0zYDsejKxytunPh47bMl+i8cV4wiU/OD5+ZZeuUoYd3gJu9abJ1D95Td1D8W8PyhtDOy9n1P7f1MJN0O9egMqiYKioZSd2BkPzCpljL/9e1kZdLlnWjwVX5kF4ksPbwNwlI44Jcv8zGqJSLzva4iCMhNBFR261BGpWwkkAh375l7dwVqC/tPkyGPjWcOVk7/wnBN4QP0ZYDymSwENGh9Ek9xKkfww7TqPtOjSjm6K7lDCiY1Mnt5P4gqec5U8OTMbNV0cd23V2pUQFkN/wrZ1rpcrjFJJFELfmzzTaO/era1o7cm5BHMjVAlf5F3n7ZO4wMWVLHAqmMnofy30VZxPuzWXx8TsyYvoz2WUW5X1vTIBPfzZJVZx97/XNmb/+B9llR59agDUUWO9/D3WFhvnRqE8ZPkJPM8ExHW8/ECdfos652Tv1YAJSTGckmPajrYgd+zfaXPZMgBnovH88zITQ5twAlm/ze+CuMi0dV3IQgwWsZKopeLs0jO8URN703xYj5abFijgzeyhHV3NyYyQTt0FOfCt39NJxEwL2dgLtHzOPh41Nspb1jEphyfpwrATS4y+hyut/ce3cGsCkTJ5aClq0U48DFfnvf1/2xnXE1ZzPSZIm8fSugDy1He/NpMcRC3xKx+T5IRP6kBLrXm+hoktS1r3g5chdRaqm4lwylDE3I7Q/J3k3FexiA4BElxETDRerNH5AiyvoG//UnL3IsXIb3IO9YmW8VxvPf5AhTzds0JkqQf6zJ+Ds3uwPFq6ZbimNB5Jj68a3ueUuQslZ08c5+dLf6HL8UKr3Pmr2IYCFT+HgdhqKo6fnH4U8ORN9zLRLASgT9AmZu8vK3FchyAxXWC0TGCP72dP2RKglhcvuH2j9f8+Adk9GOkPN2eG9zAS7Tky0UEWbVynDAzcfNdh2fiodrJl6u5SPE+WyZR2epvSPQQmp/4T4LmheXwgR3gJABy22pTsmonG5DrorA+6ZMb6nXFlPlG7ioshUV7twvqKv6xKjXvnfJZjJ+GzogBiyHrgAP4UNgAU1JekrJEFtpwr0cxCHEfYJEW7GQytm/IVl4Qo1VO4a2kxkR5+zYJRSjlCFgdN/1kiG3Gs1v7fosAQI+Z+d0ABPXHp1Z+HGbkFFbd96H6C33FwuN1atkNcKZWFsBwSWgfs27mzVtOzF8eu8MdhZ56M2fzexKe66aX2gtpJGlycesQWu03v3TvwzEdsOuDwShbg6KR3SXz+XSGkxpaH8DcKPxvZkMqR1oWa5cDfg8f4gp08t7Lxu6EPfQBnJkB0fxBJy1T+X2E8aNeQFg3QoMWIAGWQZUbuQHtOUNLgIQNNiU6IfSpMFiGG4yaudaq68JLuqEZtTKRVuiLcZaiElY2zFByKk91yikBQAy5H/bnV4PxgJ1g2YflH5JSnRKSA5vhtgQ71rJCg4ZvBvZp/k2LNFVpdu8LcVLROcOJIrC/NlJ+JPt0IbJXyt5WzaIkxVuIbhhZW8KLsVYfNF/M4MCZwRNkz2dXKQ8r7bs6RA8hpz1HZoH1+IroaaPzY5m7F6oJYwx23X4ojkKvhs2mZhR79o5WgHwLXeZBUI2Tvq2i8ax+8YYclIIjy+1PGOzHaFhALHs9vByfBSRMoOlt+oM+mKFs+dWUSeskOuB+t6XsfpJWdIZgLe3pQGvivEDfzwauHiU6lR8JlqvBzucFhZZDfseYFGwKD9ALhnFZIuDutgtslVTByNqkuEbSvnLY6VlN5LWj+uU0DABspZoWs5AVtcMVp/J7avj22gTeUHYA5DNY9iEllprl6dEtC+I122EqQ8laGikM214Vkq2gOxk5LVCU3UVE0KmbDAwjL/wFV5K5sEZhicckN0cI+J3mNevRg225lL2ho9RXeEI4i+NB9rIXoHNgLiNJKxV4j3y3F09uTVpOvT9Pky5emeDQuCw7ltrlezRw42VvlszA9ys2GP30ChwY34X1kKdQ/HpGSSaAAyoEfOJd17B41BwKpXGlAkkvj/9hVeAA7oEBbTT9bs0K5CoawidvHadvqjQ4fY380Pf59oJjItwUMembJ2k/beZ2d9jGiXb67lmi8aZ/2O6DLS94wgvvoV2hh+IokJ3ofwj8Y3EYMDrdX5OVS04mxwtRKXJsN6mqRo8foNonx3qAdfdHbtuylu0uNSe3mz8O3P8DFJ/Om8/sK9NdBJNdM1V7mTew7phUAFEDeqzsQtUNSiofhGUGupO1xPSC46nFQE2C87tFPi2GN2ihDtSx2Jwi7m7dpeKBkRcwCEGnDPLfMVsaYaLyMFY9hxi/Imwn1Ia2CoGE40cdSRZdKs6T1hWBiceO1w+wJjfBsOiGKD5QMjyqilKVWZJ8gg29wTnmcCEyDxsgXowk+4l1nCNYqfG64GD9q3a7W0Z+OVWFuHvLbLFANEilHYtE5Vc2fXybhMEVpbn9FZBkHdoGnmqhZi7UvwHpNn2oaTdRtecATJeIleWHB092TulO2iMTrPhDU/bemBOJKeoQuE9pLR1Cv4mhs3emUsuVWQgT9+YiyI3k1/JeebZDGth6XfSXnTWExOkWOjnM69BmtnuDlmgY49cp/Bt+oy7iBFxqkM+qLAESJ3uON6h1sl78n/XsWxDFUVKLu0qCWsp5Qc5WRcRjLe/7wUZOAl4PIOL7HWXmeXSPbAoZszRSr/+eXPwppkdeVWuZCHBdTu3xqMeCcmsjbmP6W1e8G9ENkoH/zljqO8R7z1lWH+eRbY62dECrTFt4qoP6rsi6lEpSd10tvQqt5AUvJBCNPOVvp24GGPedT7TOap1quGVHKKQxkCuViUrWXRsVBfIdzj+9/2OlhHGIb2mYrFlL4CwgZmdGQ7tAv2DTt8+5hhhaXZPHkixCxUJ6FPqUwA0gqysOFaOAGXlSocUzF423uyGzIxdT3WYjEmH+D8BVW4XMqMT6gJu6KlKonF6qp8qysBrlwPm1vvLDCF+D5rSEWrT2XLQHdBfJSYrYwrrXJVcWt44E1T5WU27ed++YpDslGI7jXRXEYG9z0XVUlyUbCzdq4CFREQxDxRfHRuRcP7kCaALjt4h/gd1SJlPFtoFuse1GAbuslbf0AzGaV8WcdOq2wwbGcgGtXlNyKARxeE0wt56EMqog4gv9JLFZo/hil0d7dXVIHM/nhz4HOwrzAndLluETqKoUEc41PbNhpkHjA8OzslbqwB3XdB9YCddxo/0ghD0tIvlir3VWAdOUDcL0+VmbQcrUYXsRpP3dgt79RK8+AmJ27VTOZa6stj5wLVejtyx2qAu1bjZSudyhRf+fjg5X8Zs1h9YfszTRyUYIdd9LnHdMHHSvzExixkZQP2Gzjj5lAC4cjQCPX/rOpbNzC+8RmN61Fxd+nP0MnQXnVhaCNRZOBWkXBJxbkxYJXHlRDGI+yiXaPPFlJ+4obxtdhM/0lf4ZUQt+MmzOfyQdl1ohnDfv3cDGxj2pUh6HH6eLDvUHyvy4bQIVRH756VEUpQmD7NsODyjb4LEgWsOxs+dUaRbFfx3NpuB3OOwmCAKKRo9PtAfWIrdLVYWLnQrzdGF9AOWV1yuyoMbj6Of+uYb8Tkst/T06P6DwlgOuSLSesCpaHFCRuguTduj8Wy8AZSbIUSPQAq/yYPbiLogyICR5qjw6Dd3tUFWCfdX18chhIEjZhVxNdFh+AvLhTH4ihBkd4NV26JgPwZuLgvYt8u6NwjPvmqYZ+W+EizGCRXvFauG/HRyqMaG8pyY0aDhQaI1LNPlckQ51QK/uZShFtHPve/EJ1qkWQq+I6z1kdFYv9vUT6MrBQZV5AnQ2+pV4lZnyYCd++TeMrUq3jJT0Zf9xclRCDtBXKzP6oLgIpeZ5+tHwU5u550Fwdfe74fiexG+zi8jyRg9Yo7Ki6Bfyf/vem3bg6Tna/H0qSkcl/bSG0EmqbMCFQCEg8tS+d5hVruiUXVOAmfDOtKiMG5PzJGZ/Z+O+3VHRTxqqoL9iLbAwBSVStax7fWpZTb9VMAXLN2ngcRsWTE4YNsIldnMyj/s3qo49EA2X9e24Tjd2f8klCPrfXfzwRBFLRIJ38Z7RIXtifc7KFqYrusmymx+Oqz3+d5o/8plz6Baxe1MgjIi0sR0G6IJ0YZN2OB2FT/2e8QeVgcuYiybr12kflQJMnEvRi+NW+WOQ+uIE0wOsVPnnEv7mPtXN6MLE2h8MOAxahKrl/Yej+QdaRcHmmWhoYlHQhuld1+AQ1sH4ePoWdOanBVUeKRmdbnDem7TTJqEJ2LYJvfb1gO/wLMFxI9EqN1Dvpmux/JoREJ7dzNvrbB0U9mhWGNOMPsZqzcKfp2u8YFL++6OqDDGsEgft/IG8psRugEj1O6krn8UfSf5Q9xM3GAC/PLAs2Y78f5OedhQKrdVB1gHhusHcpjxmzDizIlo9liLKMk36Ri2ztJzNJ/c6JiJCfktSZVZFelKD7d4sE/EEfhFoZr4lO+zYw6/+iwQD/NEZHKZWzRqE6tXlnR24V3PPY2Nj+MwpE6tUYvJLc0EbSx/SIBWVGSEgt3ExKX7Q9XZCIKKXZ2s4IC9Dn+w0+/8dqBjPor9cJnnyvR61kusm7CuYKQwQJT5xTtJUZDAGDR5iB4wp2ynJrNEsRFo+NmpuooyKIeE0OGuMValoDeac5TeYXis76MIiHyg1Lw1u3OzT+Kvzek62l0JBUaHjks5sGsaTw0Lt3/Cmh9dHin1D8ES4RlDRORLwF0nlGCSlfgZxZ6gPLtTdx+Xx6TPKwYGB4aps6OJW8A4lEJ/IhfqJSJYG87BKKazdstDTb772RF80doNffHrzgMJj35vkINwziEKj7z7XWAFW/+CwPO6dz+5NTVQ/ewxM1iIfAqYPzD66xdUN3/xFhErljI1BaM4HIwu7pVA6hATSyhCSZ1y8Q6J8+w+cx2gd3Jd07di2meCbXM3NeN3ICXpR2p+XsuKf0kTb1BRperzzOJBHlka1sYsgVn55HIT6wUNq0uAKOkMCAaa275wc7qUdwrk3eqwcLAvG58daxBKKQHwU5v/vafOr6rdFvkzY3s849wW7Tr7vJyMTOD/PWRwWBSHvOgBAt7sbGtx+WGNJAXJziy9JKCWJzRR4Os2Y01svbIX6Ipu/YCyBluaNPJpH3ep1XjMJzIs8XA9PcTSE88z7LsnGyaCYdCJPyFv/NSSbXXOtmPAharqAJ3Ut/1TXiJrxsmZmrWu7TIp1/vEoqFAJOOXNF98GS4WB8EA7ob6fwuBpQP667VkqB6jL5UB5c6JwlH+o28AihbXHi5J7enwvonQnBX5+gvecGw84YI5s2B6XA12gT8c0XGCpE2k5o9//AuQqCUUXpSZHoLUI1knxL5tW06/tCQm71eNW5pck8z0aeQsgjunEGs3QgnQgR6dqYt8/G14z+NM3x+N0QrEYkR10ifoQIrE8CoUduuQF8v0GlBZ1jF1JkYiuuR3+TgW96JaG/KdAW9nxlN63h17B0jHqpcIlYleA4gfZ5H3+7h015wS3v8uLAuzYehLghGQTvIenZAR+GQLxaGXnC/Pv9GzNCYSTY8NPfwrK55tWJCcVL4a3TZV2vWNlFLCsCpKXvj6KdoOqvc27MjHaVbRzYb4+fTFTiVwEv308qZCeAigBVHtG5T+Q1U7dmrAXtMuEMMYXQKq6ndzz76nipEDoeBDcIYc7WxpYU1B7fcyP5166J2Se78fGzRzwKcpNcfFOT5vlh/G+6ZdUOE9UdGfRLXAexYgyiosluDtvs+FJaPErFANhf5XdWC+Q4DOPzzzqIGF/TjCdf5cwpV1sn/e9P5FhZ3rGwX0INwMwSWGKqBbbBRZq6ewWDK9wY5p6vsjKNEY3gbdes1oOszlPAt0kbGPy/j2S4KctI/iopVReS3prTYe1yGpRI0YTwV+FTINszBihunvlvNTTNrheiRF37mYT7L0cpIhF4IlABHPmLnDN0QPbZ7owvsQdqcZpMOLLnI3kRfyb2tFpx03aPHhsP3aI+M97ITKDnV5nBTQIuSN9sxzM2rQ9f98u/L6cStrvXIx1EUukqP/RpCDcLq8k/nFSEkSca+VP7yAtSE2YQNAykrFmG/jBpuP2tFpLvU+5YX58BN0YJQ7nuCz+HObF8yKyceR+YJW8lxRVgovryZKVx0vZWdx9gG6PemsNMtiYZQQqWxfBFTJXJY22LlM6Sd+03UhmWEypHHHuzVvDCEUExcjHIGQsNtTtAo6ywm6A/wtSl1u6PtrsT0PbkH5FK7bYe+vMcXSFY5vzhs1atJCNoB/GMZUHLthQ/kkzPJD4HJ12VCvP5+j6pyQNFFZF1Ry9x40qM7k0xV8ZI2INg1MqFohUDrwnctoCobRaqqDdrso1u29tboRPUHQilSBVZwk7oTHYxQKOnmKyt9RZ63EJhN3vZMm9rL0tll3tfDdGDCGXld3gPlO/iTKuPzVFwUIdZJRgsXfzML9lK2oYXH1hJ4YdG8d5SliFne4XiLpMX9vwQs8ssVVtW5zFyJ3SdEky3NC1ZYKjscZOjwBNHuq48Z/Ou2UfYMkl77zdiRnLgKR6n7D9sCUIxbhCYj+mTgkBOvFhEe/4MxwbMurjiECwT5VAPmShkYK+rtCzy1C/eOgMe1hxOsxQfTqQ/pOT/xgm+8DoE4g2pjReKQQ89NjjCFRIiSV9ouun1sJKM39G8sLp8DJ6ZJRjLuw2hegZKPPPY9FCTJ1U28aVUmqtIHVp8ve+f04W/2museeiIbSq+F9QEzgNdsGUoVQiK9W93qppIS+CMc8NSog/HFujBhf/lJmEZiZlLNvKfl9kLquwl6PIZtHP25/ZprzLNSfHlwgixqLbMMck1qupbzdjHkniKvOMz4QmpDsRz5X3cPpp+kDYjjJT24ZYLfqf6N/hZJUFeyD/ADh0K0asKiSsKCg6sOK0IHTnEuUhjRg0cJ+nkomS2cnAo3L1UkW4h0JXngujuyycOduGl0TMrEBbYkOGlHRd8jOHgL28Omii3wgkLCfnEvaxXWTI7ZUBya/tzNUhG1a2+FM0Rurgtotgnl3xqJoDMAfEMExOcPxojWbhauKvI0hUCDukr2pfaHbHvzcooKbOBhp9ZuTuIhFHM00TxuuOlVd9vuN8P4JXRP2ml6125FpjehUOEAdEUY0QFG3KJhb4BzCqOkbn/11lehrNkyZyo5LfXWK0P5dOtydEPUD2r6cjJiiydsJ75O/nFvl5F8YHF0cg09QkISW1Sqq0EXdbMq877F/HMALMnY4bAhhDJRvNFFSZo0NQIbXm16eiqX0a+in4A6acjkypAPMva0metG33QhmcyVuWX5djn+cU9K9zdcbU9k/QFg+n9WfTWfPrVX0w2f/GqDNMpFe4NbnoA9cFLP/w2wiiKMua1jrHZwcIPDpyMOstqy2/OOn5y0WgdQ4b+zW+o9lnamcM9/0KyLXNBAOYZuwm6PRQ2GMiRo3FdzG74V6EtgcdshRLZewyrdPZ0hfyg5GcLxRQZiBlWhvAR9FrnzxZmTZCvGpyq0jdC+LSfrcnQwKkEkWDwAMXZOuIy5CnPSkmcvdAFXvFHUGNhMUdzC1Dk/iPkNZBwFrozm4LNu40g8F7brYAB/vie4Oz/sygGirZV1VyRc2SJ7lIML9tPE9xwF5+sYqcFQaOD1DwAXCbf+lAKX/Y317eartcmRo7axmBAn0tSg57IkoUxF8T4MGhWgH5uwwDpW1isArsno5Wvb4jdq9fy9Iz9vnV+dhDkdjbZgxwt2M0AJzfbUnxyjmsY4wNjAAgCCSVHCS4OIP+OxZo1EgJ/BL1IWnTwWJiwDWYzRhz6pLXmStWd+FY9h3BuK4yJ1vMmGgD5Lac0Lve7iWNDwmW6eUYS/gNf2LnXMYcwfj7/unCHlWydgnYudqnykwuHX6oFdNbUR4vXji3ts77an8MkTOwDLQ8u5x2wxOVkA171f9XvcB+Du0+ZBwVUNfIOUefVpZ4R61siMmn/1U/UL86KsUzcbkNwUwY71EgOCC4HbcBoAdK3egNcyqh5Lu32wNnTYl7bytIjF68OhHfxYLGZ3YW1wZJjA0upXGvLh9eH/R/ezmXOSGX4b9Lvz9M1cpkm6V60YlYFJ6FqJ/x7AEZTQx3oD9XK1MlRvcegB7T1Zb/KQAp/c74kapGtvFGM3Zz+ZmzKExNHCj31eBQNU8sOsSXPYCzF2S1Aq8Xb3BALJ0UzyOGyf05uFxpZt1sQ4ntvChJ71+pNAMx6o6qzV3X//bSiQnmcP1UmohsYyEXDis6XmZcDGR7RBd5DUN93GqOqLCf7RDydKw6z8/id4M/U8inN96JmUq2FNB4oduPJYAf4iHpDVW2LJovLqsKYNbEbmsvJ6EnCim5jNy3QNeJhRc9wtjtAYX6ynJ+DTSbqw52tsBqqbDFUL1eP52JUUiFXc2hP1JT6Ri3oisMeg993ee+Sx9iK4j7FIRixUCVLtJaZZlXZ/PNNvncv8U6elMdfMP2T57uZemWTL4CZ6vSVmmLdU8ZUGq7cVaB/vobTg9rqk5+wSXkDmZx6YwqqaxECx1Cdo5Ap9dFj5C8DW1CTd1CIk2VYuhkVVJt3NT5vwzHgtfMAO11WHc0u1ypjYDpky0aFCbvIJiDST13sIm2RyN3qsQsw9oaedWyda77/nH+6zLgwQ+rFFf+ielyTJZrGAMsT78LMpLuDviLeNNkNeCC/kMj6GbNi0YQ+kzL1mqN6IgCoeNFfpgVuFWvjKiSFSY7VYUKCpo55r5ozOCx/BngoyKW/SkgVUeo6JlkS5QQ7xU2hrOVfXkTKjA/pVcmXBjIGVcuxPOOSdmoTyHZogofvtslzlV7W2iV7Z8pyjWeA9DSFV/O4frhMtsskUgxFRdyZGTJHMNIMSja9bWl22huBEiML+k5Gq2ZNa72tdz6R10Kz34JPG3InC/yJ9bl8i3gnSB/ZXyj68hv671Mvi+WAGRumOl5pJSUdffRCzU3HExxBqMR9/uAXQ/n8WHgA/PP+rimvWCVYyGhAj6KqmyNmDX+exll8BjievPZ7J6EBGTra3+DnaXdetVwKNm0IKJNHi5dVXQRTsfOga679GPUWPn2aJH78oqljmKz34BvkzFZP3ZfziEO2RfblI8ddQgqkpVbtp/1dqTA2Ux8eN3b5WhhSDasj1iyIW+Xut4N94aXeqrfDbFQTq/60fS565bF8wonHu2xQFv4cUJtnoVqP+3E0Hls6YkjI4igYlkybddADG4nPM/Utwrd1v4qBqMOCpJOUzHwFVHvNhhWkkEONYTRYb/Vg7n+RojmeQ1R3TDuPHbi13kmPe5kPuy4UwhLRtkihc21fdpUgS/3Er43LF6MdpKSlxuvUcqedl+CGL4CGl6MgZ7XNBmscZDFhPpqmndzpy4Wrc7lA465hb0fU+ABSBwMLPbr+a9kEs8f8O27aljaSxowahclX69/GPsu6mFsi7+BdJwr3nCpwJ9b0gFlUkxchZdmonUNmHlESY7sKqOkPap+XYP+AeXM+dS4+x/FEV1Li1bRDsSAPR7w9BIBxa5Rv/kiUQ5qdotOjs+x2aC6nzPyLSMxxeIR4IKfVSyvuQdsO8GUovE3bCsMNtrU6wby+htuxVXzFNr/XnmVM7RoPOVMF5tjHdAwpG2Cj2pZtXKxcVv9luVwo9JYj384X85oZFmECdoDb/I5u+NK/PvsF6X9sIqC8xFY0kXKTwzJZOppV/z/7fYMeV3bV5cxhsLR/RAoxgj+KOWAITgcFEsCykeNS7coFHnzv5/qqpE/aGzVJfhgfGmUNdCSNjSnLnNadRklYdT7QkaU8rPyVnTaBi+8Fs9dn6rTzNwvrcgBqOIIDNwMLuEG6WAjLnL4w83a1+zGT6aG0XtIetUoHThttfAO2zUXjgQihHgugfwqYJY3rIUwjEp8Ww3Y0LVdriTdY0oO/HZwixeikTR3C7UAMh9iCus2vYFbHiWarLmuR0DRQTMzGjYsnZUKqvLi0nOKpLEUJqrU/DK/cbMMXj6ojizQ2rierOBY2Q4xGPfA3LDGd0hKAg19uQC+j0nhfJzENwmgs2+jhPUmssA5PA6MX4lzYgwK2zhUOBWoHdlFuz8SSkIGN1psDb0RU7oUgTlOI5MPzp27BAVqVRHOXDhht72XVPsNXuCvgKnw1No9Onf3gsjD6gVLcVM3SYE0VBJ3gSxG9aYThlBEE0hvOd2o2GvQv7YVURahplRpJjadrFD7F8xIWaknoiUTcRqkp2qbf3MRj82tYuYLwJiEOPqBS6WtIuddU9qy0lHttYAE51EVjskD/V4yt6KSNkni879c+KzUiGkGHCNwatAzhIUMxJbcWiEwmUMxNclO+DRANJPbzlpIM1T7mpjKCC8NY2a0EnMt9uywT3rRc30keGxEfuLeHhONojYSdJ3ZILtMf9+i3FX5boQFGtOW72IpBAGjB3vQwZG+hCJki0r3c3Uc9OWkIRT+313gzberjPI1q1c8Xk+Baa5UXQmB2LxN3mD20mS+G+XXF7t3uvXzpKARdyFhGxBzSgFtS55OiUXgaqm5GfQVA6G6eCPWvRTv0hMvkmx1VOaS3WUfdqngNHoVaT9hJslUmENIjWQzZHUTWS0g8fdIk5brrNoYtf83xVSAEyk7KvCducCqb7mTu8dlPrq2BmHtty8Lk+a/cw+hu9CFxYzUk6lDYgwk8g6ygn9PykadjBWiDFuKkdmwSwH1jfQ3t9tooehS8S4BNRM5SGOv6BwQKLSOe0nFp1L7JD0ucMENmEDgFhSzSfVDsVaOsEm3O1eHiE84t8FZF1IqpaGU2xdA6ykU3o7NSWcd7jO2KUJjmxLzJRq7Ro5y8MvQOoZ+6R+cm+FcMjnop4ixuS5B7QySVxrLO4ZfJ3iq9hSes8kS5bM+tSOWOQbj/kvy4epQhDyTgj16wjXCPIgMwNDjSy9Rx4yTDbBWjac8jLATc2TyQG7h/2xFPHcJAuqHg2Wm/Q/5lwLusWUSyKI73nhZNHGN6fHOe8KHiuHbhC5oGf8jI8GRx/sAyEJz2yMv9c8bq1yN7uhgdMRPINc6h7ZXceofzJfJTh5zJxDuHC215Mai/aWHT5lfdZgONSGc2Nik34qkVvNqfvPmZ+ckL7TlgFENx/2LTHY7gggnsCeCTt4xE9GhRF6Mlk56ZlnF6mdKuqjQ5uPTj5OgWt3p0JDaiRN37JNZ4XUIwj0AxBj+bnB0CgpXVGa40znMz41gQbhdrP+zI5LulJhumQyJMS4EJoDefkU9T4mmPexDSAsPPADjSZCPwyj0zM4yhMeWi5PG3vF5GDfrJNYE7xgR2GczOkIRLcedfOb795rmnKdwOwWViLbnWvLi0QTuo7RW4Wmt2YwI0J7RI3805pqsjnvZ5+LDPTsafmle2WQIdYsnd1iuYah0sXfImBUMMjN4eHolrza79zU2WdD7IDjML/yBWaOPhD8/Pb5aqzlOObbQlSS4sg6PFn2A39osDWSt3d7NKkSvBq+6aDo5gBL1C2vLeZCnznkLZXJl19ex+8hia3e+KGNoGb8A65y7OKrTbZCtZvF9l5K+4XGrH6o2ePm+S+lCrGK9OEN2qfktEHMY0Sr6IjFJZ28UEUSljahkMHSyNct869m4igTQBuo3UlRxysYHC/7CUEhdf6KxJELAsYl5Q=?iv=ILkIKiNWJ1xZhh69TAiCOQ==","sig":"31e6b022f1b7133a97490faebeb75f08ba230100df36ad11440bb8547c83cb42d741d8fc2bfee7880f33e864d354092532fe4a9b6191245a01ff65ea00f244c1"}`, + `{"id":"55ef38277352859c9e70a70e17e565652d5ece390ef05225104bf6f846410f0f","pubkey":"e81ca829c9bd368cc584844078f570c105e59d9392d19ce71bb9f34c1ac633f3","created_at":1688556088,"kind":1,"tags":[["e","29d57dd3bff6fde72141efcf55a09da0e4cb4a41785aa4f7c1411f8505af72b7","","reply"],["p","1e2d080673f959a5d82357d5e2aa5011778af634c33e4207cc54e7df943c798c"]],"content":"Is today the opportunity?","sig":"e9575aa169dbe38c249d7fedae70d1bed9bebca8522793a3d98ab2a12ef3849f85c87a3af2f24557296ef049f7b1f5ff09c5a1d812487ab26fa669d0093840bb"}`, + `{"id":"221e4c29c3ea93ddcd2298aaf5a0f5a7c628afb79d005cbb415cef2af8a2bb77","pubkey":"e81ca829c9bd368cc584844078f570c105e59d9392d19ce71bb9f34c1ac633f3","created_at":1688556080,"kind":6,"tags":[["e","29d57dd3bff6fde72141efcf55a09da0e4cb4a41785aa4f7c1411f8505af72b7"],["p","1e2d080673f959a5d82357d5e2aa5011778af634c33e4207cc54e7df943c798c"]],"content":"{\"content\":\"There will always be a another opportunity to buy more Bitcoin. On our way to 1 Whole Bitcoin…. #bitcoin #dip #nostr #plebchain\\n\\n\\n\\nhttps://nostrcheck.me/media/public/nostrcheck.me_2617026328114791421688555844.webp \",\"created_at\":1688555863,\"id\":\"29d57dd3bff6fde72141efcf55a09da0e4cb4a41785aa4f7c1411f8505af72b7\",\"kind\":1,\"pubkey\":\"1e2d080673f959a5d82357d5e2aa5011778af634c33e4207cc54e7df943c798c\",\"sig\":\"5d60fad4103a82934b9fde378b36b67db811b624da70c57f5ff1b50a11e0d606de606e1593a2d7446ed7ab2fc56bb13d89280f9336f6a74c40eb98f9d274bd81\",\"tags\":[[\"t\",\"bitcoin\"],[\"t\",\"dip\"],[\"t\",\"nostr\"],[\"t\",\"plebchain\"]]}","sig":"6cbeaae55176f424520cb13bfa5287e67438b3950653159c914bf7ce838097c29a4e3b95f84610cc8d211b5dc76872482b9cd0cfe09ba5bc84eae71d974a30a9"}`, + `{"id":"2dc1a37fce7815aba8a1750801f86c1cd35145bba6cfc35cce2c9c96eef32e5f","pubkey":"7ca66d4166b16f54a16868191ba1c6386a976624f4634f3896d9b6740a388ca3","created_at":1688556074,"kind":1,"tags":[["q","d913924e45928baf48b6b8fce440ebb7ccd177bc0979350923f5375aa42ceda6"],["imeta","url https://nostr.build/av/43715004b4a8ab944a45160869b9f01b1733f453817b4aacf938f563142aa735.mov","blurhash eaDv1LD*ICtkxV}uNGO9nmniVvt5ovaOWCEz$jw1XQX8Ioxas.R*jb","dim 720x1280"]],"content":"Lord knows their Magic 8 Ball is useless https://nostr.build/av/43715004b4a8ab944a45160869b9f01b1733f453817b4aacf938f563142aa735.mov nostr:note1myfeynj9j2967j9khr7wgs8tklxdzaaup9un2zfr75m44fpvaknq0qhsgt","sig":"e3d6f7d2deea211299f22d97be779629f66c31ee6a84382e04503817f2ecf16dacdfe4f535da378b508f917c6c73bcfb32b60510ed4edca32f7247fae4ae7ff6"}`, + `{"id":"989a336e2b5f35080afa97b72bfe88f42381c9e624d1849417f364e06b2221b0","pubkey":"634bd19e5c87db216555c814bf88e66ace175805291a6be90b15ac3b2247da9b","created_at":1688557054,"kind":1,"tags":[],"content":"あーあーあーあー、てすてす","sig":"d1ab7eeb73779f2a5bb6a3339aa5afb16afd3347b663823f135f5343c2eea9a4e337565f97e7a4dac34bf75f227489a27f3321fd740c1a426968fb5a76c99717"}`, + `{"id":"0d6cf58fe2878c050973bb26e678090258c716c456008aa6d849de555fa788b3","pubkey":"e472cba86ba9df4a48605371a42e90117036cbc1f9919865809346e59064b28f","created_at":1688557024,"kind":1,"tags":[],"content":"strfryのstreamとsyncの違いか今の所よくわからない…","sig":"e7de14d5b6f62c44c3f24838d23e388feabaf2500144e5ca2630adf34bc4e7f512c4f7303109ba9fd4c803d47bd8a48bdacc2e29aa1701c8c6dbfbf3dc9240da"}`, + `{"id":"c290be21ddf6188436bf544d5625246de2dde22eb17ab41f40b6b8aa9bee9c98","pubkey":"4d39c23b3b03bf99494df5f3a149c7908ae1bc7416807fdd6b34a31886eaae25","created_at":1688556176,"kind":1,"tags":[],"content":"独裁かはわからんけど、ぽーまんさんはキャッチミーイフユーキャンの詐欺師みたいな感じ","sig":"b8e10a7df4718f0738c0bbc59b7f25401027fa436dc00f0afdcb979bd253050376bbaea1a6ec5fa246be935d6cd5f72d8010e8f800c79a9867f00f5b1e083a14"}`, + `{"id":"0ad438f0a34756ecb1bf4d1792dc42a5b0141a39d944dfdd6737e883815a65dc","pubkey":"6a3cdfe891cddc33228a52cd7b27eca17e630569c93c24d70dc1cc01ce45881f","created_at":1688556173,"kind":1,"tags":[],"content":"hallucination やめて","sig":"ddbcc08b16f88532ccc739ab7dfa112fb462aafbeecb859a1b1b511ae9c2eb46872505aec58fe7e8b38639e558f0e9e0a13adf1b2f89d3a96f890acb3cd5c40f"}`, + `{"id":"ef1aea4c78f3de5cdd07dfe632e83adef34b3ac0c26afba60852ecd9800adc16","pubkey":"634bd19e5c87db216555c814bf88e66ace175805291a6be90b15ac3b2247da9b","created_at":1688556039,"kind":1,"tags":[],"content":"※日本リレーの relay-jp.nostr.wirednet.jp は何もいじらないので、継続して利用可能です","sig":"94eba6e0a242cf8987e1d8d782968b9e341e4f66278b937fa4da33c708e1f6eb82652796785eb20b21f9c18c0534a568b088297b6bef65729192ea04485b7740"}`, + `{"id":"d2c2cee862a4c7c903ecaf129e2458132b3b4134ae3135f71ba4b84798ccdd3f","pubkey":"634bd19e5c87db216555c814bf88e66ace175805291a6be90b15ac3b2247da9b","created_at":1688555969,"kind":1,"tags":[],"content":"relay.nostr.wirednet.jp をちょっとメンテナンスしますー\n一時的に過去のデータにはアクセス出来なくなります。(そのうち復活させる)","sig":"9c0749183db90cac31778523424453ba53532f7537233053fb1629428a4844bc9e69efdb2a2ac75b3e6f10fd28a34c366d79fa86f68a3fba36fea2bcd82d5c9f"}`, + `{"id":"4296bfa40427b9cb3e078da9c12de7af57e238caf77ace9b517ecd99ad7f38d8","pubkey":"046284c5d3cc859f58b1ff58d2bdbf22eb6f41a633e97f503a569cc1fe886322","created_at":1688555517,"kind":1,"tags":[],"content":"ブンブンピーブピー","sig":"40426c3677dd61132558e58ec2e0d306a7581a73e7cbcd8fcf447b0da1580b782c12461d4105939faa4caf95864354dba25fe5b10aa794ccc7f68adb2d12bb01"}`, + `{"id":"abd1d0c9300b7745bfada6147ceb5b4d9d09ab23925e55c53b835347fdd0cb17","pubkey":"634bd19e5c87db216555c814bf88e66ace175805291a6be90b15ac3b2247da9b","created_at":1688554980,"kind":1,"tags":[],"content":"Threadsには旅立たないかなー。","sig":"4f0243d5380a1757d78a772bb27386d2c2b54926b514f4568e717ed9cfe6d87f8d299a9b34d6bbd90241deabde17a3bf514f3195b4f4c4183429387bdc6f179d"}`, + `{"id":"4db06f7e522db1d5166f5455e193690a3e79f256ffa27df09aeede7f70fd87f1","pubkey":"2748ffc20bf0378ace2b32d4e9ca11fceb07fbef335a7124b5368b6338daf18b","created_at":1688554800,"kind":1,"tags":[],"content":"ζ*'ヮ')ζ<ウッウー!! 🕗\n--------\n2023年 7月5日 (水) 20:00:00","sig":"16aa8eda88e42711cd2b77f5611cb0f171493d36d58c167513afa3be3bbfb3f3ddc7cbb6a20a8b8e011c0b61befbc8d6e8b012f49619f9a15d77410e849df185"}`, + `{"id":"ebd8dd36f274ddf91959bf1225bb4c0353d187b373d91e92e1f971365d556420","pubkey":"634bd19e5c87db216555c814bf88e66ace175805291a6be90b15ac3b2247da9b","created_at":1688554184,"kind":1,"tags":[],"content":"あーーーーーーー、 relay.nostr.wirednet.jp ののぞき窓。\nログインボタンを非表示にしてるんだけど、キーボードショートカットだけ生きてることに気付いた。(※作者です)","sig":"21fc8e74b995bd185031fac03b85e3a1b431f79de26658f00c50e404769ac431ca53f151c3dc9a90435b2e70f4a2ac199c84fcb7ca2858c45665d99f9f9bae0a"}`, + `{"id":"e2aec1b7e297329203f67b61f214c2b745a3bc1590f299ca250a1633714c829c","pubkey":"b6ac413652c8431478cb6177722f822f0f7af774a274fc5574872407834c3253","created_at":1688553478,"kind":1,"tags":[],"content":"やー今日も疲れたなー!\n大将!お勧めでイソシアネートとポリオールね!\nあ、6:4でよろしく!","sig":"12ba5dc9ff18f4ce995941f6de3bfaf8e3636afde37a06a4d3478c930ae22e2f79690e6f0682d532541222746aeb5f6dda29251cd7c31e71d7e206199b04bab4"}`, + `{"id":"e4e86256ed64514bcb3350cf8b631ef84b4aeafcdb164cea5096c893ead6a0a1","pubkey":"79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798","created_at":1688574304,"kind":1,"tags":[],"content":"\b\f\ueeee","sig":"c61a4971facc4899109e1a28b73cbd27f8807fedcff87cfa1d8f5e9b709feab75e3a62a96fc75b5d2a2f42443d5ca35daa6c3d724cd6e6133b9c4a1ef072c1e9"}`, + `{"id":"650573b38c32eb08087c16fd5bf734c1b10c35be4366f5b3c8a0476569682b48","pubkey":"f4db5270bd991b17bea1e6d035f45dee392919c29474bbac10342d223c74e0d0","created_at":1702213369,"kind":3,"tags":[["p","0fcba340409f2eaad5b859aad412cb326b515a5f11d5585199906eae0a1ea948"],["p","e1ff3bfdd4e40315959b08b4fcc8245eaa514637e1d4ec2ae166b743341be1af"],["p","e623bb2e90351b30818de33debd506aa9eae04d8268be65ceb2dcc1ef6881765"],["p","39a8b17475be0db44e313f9fd032ffde183c8abd6498e4932a873330d2cd4868"],["p","460c25e682fda7832b52d1f22d3d22b3176d972f60dcdc3212ed8c92ef85065c"],["p","e8c1ca03a46d97184bfcd9125a5c9674a867bd1beaebe47c77d4eaec6c5ee874"],["p","4707c8fa26c025f3694dc0a62d45fe597f7819deb8a45abb894f22cffbecb777"],["p","58dc4e185bc8dfeb4dbc71e3818cc5a3e666b3bb92303aa4d8572cf52626c6f3"],["p","74dcec31fd3b8cfd960bc5a35ecbeeb8b9cee8eb81f6e8da4c8067553709248d"],["p","84dee6e676e5bb67b4ad4e042cf70cbd8681155db535942fcc6a0533858a7240"],["p","430f76d2cab692d81be33395066231f8560c97281f28393ed9c9607a16dc5268"],["p","254c6c9043dee77294e7095eedf34e3068705f5a68219e7411e19ec13f4f3963"],["p","8664ff363efcd36a154efdcbc629a4d1e4c511f9114e1d35de73fff31cb783b3"],["p","8aec8cfb20ce774c08e8b790d973da9916c625faadf74d8b6c8995ab2b8ce992"],["p","9b605c669b3f02bf593d6d945e25de246998808c34d46b194259451c1b7b488e"],["p","0dde402c4b53857520ec9481869e5d34641d0f749204db9c1ea8869c0ade6766"],["p","6b06b54d5ebd78d46a83d59185e5f253d2c0a3ab4371bfadced961473184d5c1"],["p","ca20c9531929c1b9aa5dd31a1b952a2025310118e5775d01ad244129b744e1b1"],["p","d3af49435bb13c695232fd8ca8c9e73db8c65630553baef83f9544e81cef9be7"],["p","8ed150f8a138ebee0faf7bd859847af0abc97a74f85c6d081d9de3dcb31085c9"],["p","b377757fa3efd9d4f56170bd08508872b13680a000be9b19f3c0f6fea3d861bc"],["p","af90923637f1e0cd2cce418dd1722ec6537dbbed535e71ca882e804ca18d3954"],["p","37c4e186f730439249cf08fee7b58186ccae9e4dd12f35bf58f9b4267de9109b"],["p","b2e953ee76cf442384792ea47a385268970ae0165bae3edfb9f48d877ed5a9f6"],["p","6f6b50bb6fc7353a5bf3f54ef34d421ff81731e16d7f52d888833c6aa848e5a1"],["p","04ea4f8350f1562aa1d60dc070561f5bb8386a11d1a00570fd7440da210e1713"],["p","17f2f12ce60237bfa71bcccc70b4062db190dd2d6d349b9323e9ec93b2a8dccb"],["p","d03f364a17a70354cc6c8cbf59a607e550fe8f57a02697791e9eb35be12a5e44"],["p","711876aa62348eda76c2182203f4b2ba6d154072a7f66fb9950cd434ced08600"],["p","6f0ec447e0da5ad4b9a3a2aef3e56b24601ca2b46ad7b23381d1941002923274"],["p","79c2cae114ea28a981e7559b4fe7854a473521a8d22a66bbab9fa248eb820ff6"],["p","7d3581015632d288712433d865352dd45d780ffa5ba14eeaa758e6a416c07d37"],["p","f7108e642c3966c68dbe54bd25509be2e1a745b1fce08d25e646d5db9c50b5c0"],["p","66d7101a0ded94f06f07577e536e289ecd33d020d5f4fbbfb320d4eb17d17c7c"],["p","645681b9d067b1a362c4bee8ddff987d2466d49905c26cb8fec5e6fb73af5c84"],["p","51c059f3cc1802997fe1c4e60d6315c22079a025823256160ac7bfc1bca7c2d7"],["p","a6e3fee826e7da976917ef76692a2b8915ce6af8f330b5d73620511ea1e557ee"],["p","c93406ed82c231019cf1d96700884fdedf1f7d5a32fa368b10b260cc6918f4a1"],["p","50d94fc2d8580c682b071a542f8b1e31a200b0508bab95a33bef0855df281d63"],["p","00000000827ffaa94bfea288c3dfce4422c794fbb96625b6b31e9049f729d700"],["p","20d88bae0c38e6407279e6a83350a931e714f0135e013ea4a1b14f936b7fead5"],["p","3f770d65d3a764a9c5cb503ae123e62ec7598ad035d836e2a810f3877a745b24"],["p","6e468422dfb74a5738702a8823b9b28168abab8655faacb6853cd0ee15deee93"],["p","2067810159470dce4c95ecb96d27bd01fe3030a446134ddddda29eb72c7d580d"],["p","3bf0c63fcb93463407af97a5e5ee64fa883d107ef9e558472c4eb9aaaefa459d"],["p","c49d52a573366792b9a6e4851587c28042fb24fa5625c6d67b8c95c8751aca15"],["p","69aeace80672c08ef7729a03e597ed4e9dd5ddaa7c457349d55d12c043e8a7ab"],["p","32e1827635450ebb3c5a7d12c1f8e7b2b514439ac10a67eef3d9fd9c5c68e245"],["p","63fe6318dc58583cfe16810f86dd09e18bfd76aabc24a0081ce2856f330504ed"],["p","eab0e756d32b80bcd464f3d844b8040303075a13eabc3599a762c9ac7ab91f4f"],["p","eaf1a13a032ce649bc60f290a000531c4d525f6a7a28f74326972c4438682f56"],["p","5e7ae588d7d11eac4c25906e6da807e68c6498f49a38e4692be5a089616ceb18"],["p","0fe0b18b4dbf0e0aa40fcd47209b2a49b3431fc453b460efcf45ca0bd16bd6ac"],["p","85080d3bad70ccdcd7f74c29a44f55bb85cbcd3dd0cbb957da1d215bdb931204"],["p","e7424ad457e512fdf4764a56bf6d428a06a13a1006af1fb8e0fe32f6d03265c7"],["p","834ef5926f563b89852965932bf88513595a23ad9d8b026446b454a23b1a7bb4"],["p","52b4a076bcbbbdc3a1aefa3735816cf74993b1b8db202b01c883c58be7fad8bd"],["p","a976156de0384616921e32bfc8314cc647d33843af649d2d91faabb2450b808d"],["p","ee0304bae0d4679bb34347ce3b1b80482262b9812bd0c0d5e19a5e2445043b75"],["p","7fa56f5d6962ab1e3cd424e758c3002b8665f7b0d8dcee9fe9e288d7751ac194"],["p","4918eb332a41b71ba9a74b1dc64276cfff592e55107b93baae38af3520e55975"],["p","34d2f5274f1958fcd2cb2463dabeaddf8a21f84ace4241da888023bf05cc8095"],["p","0114bb11dd8eb89bfb40669509b2a5a473d27126e27acae58257f2fd7cd95776"],["p","020f2d21ae09bf35fcdfb65decf1478b846f5f728ab30c5eaabcd6d081a81c3e"],["p","045745ac0e90a436141a3addd95575c2ead47b613f45287283e5802ff7fd99fd"],["p","064de2497ce621aee2a5b4b926a08b1ca01bce9da85b0c714e883e119375140c"],["p","06bf93843b7cc4f43669fd64627bd0e47e75d34106ca55fea60b93fa7322d63c"],["p","0c28a25357c76ac5ac3714eddc25d81fe98134df13351ab526fc2479cc306e65"],["p","0c371f5ed95076613443e8331c4b60828ed67bcdefaa1698fb5ce9d7b3285ffb"],["p","0c3849bc387593eab66792356d65c44852d1a35bf022a9b2d7cfcf50d4e6a146"],["p","1577e4599dd10c863498fe3c20bd82aafaf829a595ce83c5cf8ac3463531b09b"],["p","187aac66ef6f0598f5cb736c1757073b67a8db75b4907be0d56eda42daa81e6e"],["p","18b2ebab655ab3931dfa0346e7dc1c77ce28387a68956b9d4f0650a2cc0646e0"],["p","1a4ec27b7539c4ddef2c45afeae679af6c42c4fe9b3a0d08d1426d72490e9f32"],["p","23948d2fbac3e2097e902da16dd1b4f1005d16d8485319f00240d828ee6c35f7"],["p","24202e533d2ef4da8acc01fa218bd0e2a85105210e8ab53ed1f3e2c270f33db9"],["p","29fbc05acee671fb579182ca33b0e41b455bb1f9564b90a3d8f2f39dee3f2779"],["p","32bea35c961e2469424c6a3d05a6f379e9d699822b9c325088d649b119e52f24"],["p","338ef72e3deebda385aedea5e89b87ec35a7d296d4a9b642bb2c1ad926007db7"],["p","35f25abceda5f71685dd378f02167cc51dd19313660951c40266a5dc3b8ad0f5"],["p","3b6a3d3bb3358836a64d1c80292b96e7698ec35a2e5ca451defa6bd3af3eeb84"],["p","3efdaebb1d8923ebd99c9e7ace3b4194ab45512e2be79c1b7d68d9243e0d2681"],["p","40e10350fed534e5226b73761925030134d9f85306ee1db5cfbd663118034e84"],["p","44f85003a8d6f05cd966516267e8ca5111df15933e185bb80e9f5246acf3375f"],["p","457e17b7ea97a845a0d1fa8feda9976596678e3a8af46dc6671d40e050ce857d"],["p","4657dfe8965be8980a93072bcfb5e59a65124406db0f819215ee78ba47934b3e"],["p","46fcbe3065eaf1ae7811465924e48923363ff3f526bd6f73d7c184b16bd8ce4d"],["p","4e3016e742f44520e9077a20f898179b026bc53d02c9a74b0a027b7d087a7012"],["p","51b826cccd92569a6582e20982fd883fccfa78ad03e0241f7abec1830d7a2565"],["p","59fbee7369df7713dbbfa9bbdb0892c62eba929232615c6ff2787da384cb770f"],["p","5a8e581f16a012e24d2a640152ad562058cb065e1df28e907c1bfa82c150c8ba"],["p","5c04fb11b79bb4ec9f8c59da5abc8c529f98c34ada48bf7f38caf62b97e0384a"],["p","5cc29169f09efdfc8cf63e3458c6938f9d9d68af02d7f39d74a6882b48d7ede4"],["p","5df21e8ec11e21e7b710ac7d6c94427407ae69e93a7fcf0d0a3ee2fac4fdc84b"],["p","623341a4a92b31d0738b39ee6be2bd3916a2326c283cd40ba46eb319d46e09a3"],["p","6389be6491e7b693e9f368ece88fcd145f07c068d2c1bbae4247b9b5ef439d32"],["p","64a8961fe8536fea89b357f192002720f8110498fa0256f8adda635b9e3e35e8"],["p","677b3a764ee6cc24d4def8f848913469868ccd4b5cff88cccbf3aa3701e9e7c7"],["p","69a0a0910b49a1dbfbc4e4f10df22b5806af5403a228267638f2e908c968228d"],["p","6ef322e8d90d39b8675d4fa27dc1d3d45c1f983e53fe751047ed7322da349dbb"],["p","7acce9b3da22ceedc511a15cb730c898235ab551623955314b003e9f33e8b10c"],["p","7b394902eeadb8370931f1903d00569545e84113fb6a09634664763be232009c"],["p","7ca66d4166b16f54a16868191ba1c6386a976624f4634f3896d9b6740a388ca3"],["p","82aa6958505fd4a7ecedf8df4009291044ee6f1c8c4a8c39e1099d69c94d0851"],["p","861f8c873a208385c5a9fe0fa9b4a57fc6b62770dd0563c3a93064da07f37139"],["p","8c241ca3c9061cce3f32afa39878ba59cb411a106eaf2f782d826d458995e698"],["p","8fce46c6d2dc4236b24a23382a02d3400548fdf6d286da701914280f4d6dbe73"],["p","9733f4b4559a5228ddc33d3b21164094903b8cbfde7f9cbd9bee5b4ac44e8fe6"],["p","98315132d6ab8cfe404f3a8046b8336d545f1494b163b6ee6a6391c5aec248c9"],["p","9989500413fb756d8437912cc32be0730dbe1bfc6b5d2eef759e1456c239f905"],["p","99d34baec4517212be4ef5c52362f9a690143f708cc8886b6adab211154aaeee"],["p","9c163c7351f8832b08b56cbb2e095960d1c5060dd6b0e461e813f0f07459119e"],["p","a341f45ff9758f570a21b000c17d4e53a3a497c8397f26c0e6d61e5acffc7a98"],["p","a5e93aef8e820cbc7ab7b6205f854b87aed4b48c5f6b30fbbeba5c99e40dcf3f"],["p","aa55a479ad6934d0fd78f3dbd88515cd1ca0d7a110812e711380d59df7598935"],["p","ab3abefbae4593867feb5f5ac50fe98451c581b17fea79b0d10e3384fa033e1a"],["p","b81dcdd29c4b11eccf0799ebddde348f2449efa612c6ce521b87313233af9d32"],["p","b9aee15fc0559afdebf2069159ffdeaa24592198e637d292d0e66f7f8f6df3ea"],["p","be1d89794bf92de5dd64c1e60f6a2c70c140abac9932418fee30c5c637fe9479"],["p","c83723d33fa86c8f01b254b1dcaaa025b2ca659320950d044d22c41b5d1daf29"],["p","d0b327c688aaf250a7b62f7f2a92d1bce0424ae90aa664891f9b90071ae748c2"],["p","d4843f4c280abba3d43d84ed7924b2567d7c166f5e72985b9f06d355601b5d78"],["p","d5bd2aedd006cf7eda5f39a84c0c1bb98e5f9ac97094631b0ef533d67a0d3896"],["p","d8a2c33f2e2ff3a9d4ff2a5593f3d5a59e9167fa5ded063d0e49891776611e0c"],["p","dcdd29d5f71beaa1c362e6aa384dd073fdcca1155caccd29f82d43acf8be7598"],["p","de7ecd1e2976a6adb2ffa5f4db81a7d812c8bb6698aa00dcf1e76adb55efd645"],["p","df58986e1190c8f350a3769b766d20f4d9caa23d2dbc4b6b7188eb353dd16dab"],["p","e33fe65f1fde44c6dc17eeb38fdad0fceaf1cae8722084332ed1e32496291d42"],["p","e75da451e9e743b43cfb9a0d7f30c4df7272e7e53b03e999e17cedc7b8eb40b8"],["p","ea429a67b2d57bd931b291a6582771abddf1be7583c0d49538f3ba4b156f81fc"],["p","ecb3fd820e9a7a78bb919e5e7eb4bf256471389e896041c5e7d4d6fd4828e9dd"],["p","edd132ddb63503de7bb2f6960d18f90dae70d376258e0dbfdd7fa739ca2aaa91"],["p","ef151c7a380f40a75d7d1493ac347b6777a9d9b5fa0aa3cddb47fc78fab69a8b"],["p","eff26639ec9a8098b77109021e2626d2c6402f1291b0f3d83e1911cf749caef1"],["p","f2ab3689b2c8996451eb9cfa0d0c7ad9ecd566a174cdbf27f586f611bd510174"],["p","f2c96c97f6419a538f84cf3fa72e2194605e1848096e6e5170cce5b76799d400"],["p","fcf70a45cfa817eaa813b9ba8a375d713d3169f4a27f3dcac3d49112df67d37e"],["p","8222016b518f6424a3ce2857bb3ee93e2df2c98ca8afa5accb89653cc74e9866"],["p","cb59f0345b1ef0cf3d3c31ff28376f5b28db38daa8226ce6d03bab16d47dbc83"],["p","2d0732f0a98f3c0ed235296153d06e6518a99315d624670a9d92ff4d3d7fa9f8"],["p","7bfda8851f0bebfaad675be189f78532e95579270dd9827d6a6df66d7a768195"],["p","5abcdf4d9e26ea23b827ec580c3666c13f056f54f54c199472e47b0d60623c3a"],["p","4a0cbe797a3dec633408209e014596a0935dfa986897a36de0f7e6b49a9d10c7"],["p","c2c20ec0a555959713ca4c404c4d2cc80e6cb906f5b64217070612a0cae29c62"],["p","29544f8f3f79f65828b1704370774e87cb80a944c20e5d78adef7731e157b689"],["p","ea2e3c814d08a378f8a5b8faecb2884d05855975c5ca4b5c25e2d6f936286f14"],["p","3fa2504f693c7f0fe71bded634339ae5383a8b14b4164d7c4935830b048dce12"],["p","319a588a77cd798b358724234b534bff3f3c294b4f6512bde94d070da93237c9"],["p","7ecd3fe6353ec4c53672793e81445c2a319ccf0a298a91d77adcfa386b52f30d"],["p","532d830dffe09c13e75e8b145c825718fc12b0003f61d61e9077721c7fff93cb"],["p","d61f3bc5b3eb4400efdae6169a5c17cabf3246b514361de939ce4a1a0da6ef4a"],["p","bd7163c3a05eda1bd2789e886a58d052ae6739d1796db1d0420609584e12e30e"],["p","687431378b0677f75f57d82ba3a6333b57d50c7e45b740af04074b7e637a4e5d"],["p","c48e29f04b482cc01ca1f9ef8c86ef8318c059e0e9353235162f080f26e14c11"],["p","0e1eeb6e6eb38112ff2bc4ef96f5356723f8b43df6921be4f7a80fbdc69a10ac"],["p","63d699329db8e38569f2589b6a3aa7b0f231cec83a7789e29f0a396781bd7d72"],["p","68425139b06dabe49cb6f1396efa064b64739504b7d2ce7350edaf752cb79fca"],["p","23249c4d0e3dec5e29240dfc248ef9b5944558441e3363139c68fb3f587f1b3c"],["p","a05d4b701627f56efb90f71d21704a6949b70adec1a6b23210d7bb0a3a2ca6a9"],["p","2f35fc6a9707ed52108083f288e5c7864485666bd69b7f924242ccf5b83cf494"],["p","89e14be49ed0073da83b678279cd29ba5ad86cf000b6a3d1a4c3dc4aa4fdd02c"],["p","b9360cd808b24ecbfd03575f3d637b1e62ca9fea415ac67b6c5b11ef15f28d06"],["p","94e268f4aca4cc14613e1a6d50dab40882b9f08a31d7f6ba81604429b1bbba0e"],["p","9a2dda390fdbc8f3932efea3952195dfeb0d99e3968b66c36f5f372ee61af460"],["p","4a38463c2a75e68c24416e7720a3b3befbb0ea6872d5a04692c39e18e8f2dcac"],["p","72936ba9f4f21f1563e2e5001aade6cce3acc162a4d99a823d231dc641b9e3b4"],["p","c1fc7771f5fa418fd3ac49221a18f19b42ccb7a663da8f04cbbf6c08c80d20b1"],["p","0000006a13e10fb648049b5e78632a0c2bf09eaf6a9d55d081b82baf86c951be"],["p","b17c59874dc05d7f6ec975bce04770c8b7fa9d37f3ad0096fdb76c9385d68928"],["p","000000dd7a2e54c77a521237a516eefb1d41df39047a9c64882d05bc84c9d666"],["p","369e357c8275e2bdf198c34bd85f665b6052b631ce8350ce980792bc8966c169"],["p","592295cf2b09a7f9555f43adb734cbee8a84ee892ed3f9336e6a09b6413a0db9"],["p","113c545faac244be3f90c9404e6b090bef1b4865819bcd419c1d9bbd1dca3092"],["p","4bb1cc354560bf907f8714870f34c1df07c920768938a62c1ae40d57260134b2"],["p","6ed65feee1baee7cf57dca1d30b485599cda231c95f828c0c94559b39af599d7"],["p","c5f8bbbc7ca6c140cb20d6fa8cba36866ada6fce408168ae627d9007722c5ebf"],["p","5b0183ab6c3e322bf4d41c6b3aef98562a144847b7499543727c5539a114563e"],["p","acedd3597025cb13b84f9a89643645aeb61a3b4a3af8d7ac01f8553171bf17c5"],["p","693c2832de939b4af8ccd842b17f05df2edd551e59989d3c4ef9a44957b2f1fb"],["p","e69c3fa8a2113a07e1f8fb909a3a1cd3b30cb4d349a9d74577637f536f38abff"],["p","2cad5a4855a23027276a510a2d14d7ee4d19b915f3447a89cf2e8dfd0b4aeeec"],["p","d40023204289dae3ad69ecf5e64893988e108f7cbf3fca6df9786cc275d4627e"],["p","2cdeb9aab4913f1d59c5c882c9847712ce05bc4f7786c09e28b4a562e72b8503"],["p","435d0addfa045737179f6f2d6ba975acf3c5736845b97f05406be1bca9b3bb5b"],["p","90b9bec74789688e515125596ab6350bfe646176ac75742275063922c5fea010"],["p","1989034e56b8f606c724f45a12ce84a11841621aaf7182a1f6564380b9c4276b"],["p","8fb140b4e8ddef97ce4b821d247278a1a4353362623f64021484b372f948000c"],["p","cfe3b4316d905335b6ce056ba0ec230b587a334381e82bf9a02a184f2d068f8d"],["p","149e07e619eedd32e47568d3197e36e270f4b5b048c6f652bf56fe90667dbf89"],["p","eb3a338922741d7014d78ade8c53f138534b6603b032a8289b1cc9b645adfa83"],["p","a9f8b3f2ac19cc06d5194dd1ac9314d4741a09777444986553926d9165181647"],["p","c7d32972e398d4d20cd69b1a8451956cc14a2e9065ad1a8fda185c202698937b"],["p","180a6d42c7d64f8c3958d9d10dd5a4117eaaacea8e7f980781e9a53136cf5693"],["p","b65f7930b335304ad851271c5b3877c93cac06f73e3374cc775225fd9da0f725"],["p","418a17eb8c7a403fb15334264f85fb7a35d33e1cfc7d6bed610931d1b64b2b3a"],["p","4308ff20ced73871b69ed8bcada2a051bc3a05610d1183c54d67b47cd0f7035e"],["p","c4f076133f1cb058327a3ed6d27bacbe6f8880179073f7f3eda87093de21dfb4"],["p","c6eefbf62fd2d58922cf78d39d8b9bcdbd7292449041a0e27acf6bacd1717ba3"],["p","c89cf36deea286da912d4145f7140c73495d77e2cfedfb652158daa7c771f2f8"],["p","e6618db6961dc7b91478e0fa78c4c1b6699009981526693bd5e273972550860c"],["p","582d2a4639fa1d074ff32053fba4ba7d6f0e6bf4c6794eb728c222b838f0c575"],["p","75da94027ad408bc2faffeb1e67d71babe8d78d89c3620da212303b877a65b5c"],["p","8c71091d5b7dea833eae73bb07e28f12cc456dc791aa7f213ad175fb5a1d6fe6"],["p","54fc669ccc03a47b3d95a9111bfddc590863d26a398c7149d2d349683b8451c8"],["p","978c8f26ea9b3c58bfd4c8ddfde83741a6c2496fab72774109fe46819ca49708"],["p","871ac06f097cf8f0e9ceccdf0a9f21c5d8a0261edb17b1f59a39b5b377f75d44"],["p","d7536260b88cf7e5cc3fca4f82240f342bc098762cf3b855d5d765e78c95ddcb"],["p","66a6db0e988d13c4a84e2b325fa58ca14f8bbf140728c566d1381e1a99474cab"],["p","d307643547703537dfdef811c3dea96f1f9e84c8249e200353425924a9908cf8"],["p","c4d2e2629ac9577cd36689a267b7ac8dba9a520ad44639699394c58582029433"],["p","b9e76546ba06456ed301d9e52bc49fa48e70a6bf2282be7a1ae72947612023dc"],["p","8be2fd2cf7cce65a56f0820b022125e9ab4044c7dc5e444e2c0c0eab7501b0d7"],["p","72f9755501e1a4464f7277d86120f67e7f7ec3a84ef6813cc7606bf5e0870ff3"],["p","2aea307f6c48e95ebcc42de3ad2be88e5566ff659c90ac0234edb416bc0787aa"],["p","84580515242fc91b6bec5988b6f43e46f05c2de55612e0ec41cecdb4a2059f18"],["p","717d4b62ed661dec51fb39962728a424dd00f8edb68da2c52851b79a0ad25465"],["p","76c71aae3a491f1d9eec47cba17e229cda4113a0bbb6e6ae1776d7643e29cafa"],["p","1743058db7078661b94aaf4286429d97ee5257d14a86d6bfa54cb0482b876fb0"],["p","20c9c5730c48008e964107182286dab42cb8c943b0da9c59438b542760533f06"],["p","296842eaaed9be5ae0668da09fe48aac0521c4af859ad547d93145e5ac34c17e"],["p","497e2ad9609d900f7b2fab27b27ba478b7a4a578809f52288fd47c5e431a7e3f"],["p","4075b1664a065b8aab93908438888daef4825b92600abc152b850034be55405c"],["p","16bd5ce84b9e75aff00f06d71f9467e62da38813168da48b8eeb6bade5fb9393"],["p","43baaf0c28e6cfb195b17ee083e19eb3a4afdfac54d9b6baf170270ed193e34c"],["p","971615b70ad9ec896f8d5ba0f2d01652f1dfe5f9ced81ac9469ca7facefad68b"],["p","470be0e81485a5ff4d430dab3c7b26c5c74fa5223370a63d7710907a619c49d7"],["p","ad2fcf2d1c717758e5ac2d2fe9ac23f387acdebba5c79dc41934885aa8ec8ff6"],["p","5be6446aa8a31c11b3b453bf8dafc9b346ff328d1fa11a0fa02a1e6461f6a9b1"],["p","8fe53b37518e3dbe9bab26d912292001d8b882de9456b7b08b615f912dc8bf4a"],["p","0f51985097dcf1bda4dc174a92a4da3a65c7ccd3cb97f4a443e861c4f4d4db1f"],["p","3ce95e3bb7bf6d3ab84de5721413ec016050226420461f15e524489d9bfc28b1"],["p","83f3d191a47d2d03e7f574adaf4edb0440b492785266fb3af44093d23e558d17"],["p","c03f8d7c5f158d51dd14b1db3cbe8ed8b7f6b934e77d1917682d4f517b13e5e4"],["p","4379e76bfa76a80b8db9ea759211d90bb3e67b2202f8880cc4f5ffe2065061ad"],["p","92cbe5861cfc5213dd89f0a6f6084486f85e6f03cfeb70a13f455938116433b8"],["p","2edbcea694d164629854a52583458fd6d965b161e3c48b57d3aff01940558884"],["p","3941f72c64c6c7b4a2a0a3d209f758500aaa517307eee8d0d416186ec48767be"],["p","df173277182f3155d37b330211ba1de4a81500c02d195e964f91be774ec96708"],["p","521630479ced897cce77d73e1fcd9a704d4f3028ca580d78d905d4312d339d34"],["p","33c47c54d9cca7afe9274cda812297a0d5918faa63fc786207c96b9a924bcd58"],["p","a8171781fd9e90ede3ea44ddca5d3abf828fe8eedeb0f3abb0dd3e563562e1fc"],["p","8781706462080b2f1f62f42f9ae426e7b571dd5d4f0cce4249e8a6f0bdc694d6"],["p","8a981f1ae3fab3300b548c4f20654cb0f1d350498c4b66849b73e8546001dca0"],["p","93a43823aa8fe8d8b4f6c4249aad91ecfd84708bcd76d38e68a91239c8ef459c"],["p","76ea2cfb8ca5cf7683d63c850a4d2db7214d3a850bc021301e3b84f360905955"],["p","9661c86de40de72f84c047c98a0b7e88ecf9a47a0ec906eca16c260a81c10052"],["p","d2704392769c20d67a153fa77a8557ab071ef27aafc29cf6b46faf582e0595f2"],["p","2250f69694c2a43929e77e5de0f6a61ae5e37a1ee6d6a3baef1706ed9901248b"],["p","87e3d38d3cb63d9f4c43d4a71062b42dc7a01af2d662be5eeb7d211ed153c744"],["p","60aa407564c268bfdab075a736f4641b4f53415ad75465875cc3032f076f5eec"],["p","3903a3348bb1cdaea9e6ee262284ed547891c15d02d9949846d5556aa0935bcc"],["p","5b12cc0c996685cf44aa28f992330566fec1d3b64e46fae76239360ab8ac8d33"],["p","7fcb8acc15835e07fd8b064130236243c4a798346114f358628163eac84e8a30"],["p","385cec32516d8b9149be96b583a4912e87776a0fca246281875b9c5aa5193faa"],["p","62559d88c94220e4e74cf312ce895212f2afe0d37149ba468ecf3855281d7346"],["p","f47d575f2c441f579acda9d032950f36c266961302e9e6c12c585f2c496724b2"],["p","372da077d6353430f343d5853d85311b3fd27018d5a83b8c1b397b92518ec7ac"],["p","deab79dafa1c2be4b4a6d3aca1357b6caa0b744bf46ad529a5ae464288579e68"],["p","3801b810302319202a3ded5474ee8fc484a0f56dc182a4e8ee9e30c7c6c14915"],["p","42cd5d2f904503217a57d9302bf12943bfee915a05f51f7d69edc65fa9b292f8"],["p","38ccd6fad4e93e4a3cbaa74a361c7e00c1b34e2b941a18d4e6429f4dc06e260b"],["p","c027a5c368d76620072b15ba36b8502a5477fc4f316163e7ff7c95046df9f109"],["p","7b991f776d04d87cb5d4259688187a520f6afc16b2b9ad26dac6b8ee76c2840d"],["p","a2ca85ffc3b2c8a6112e41b7ce5c1beaf88583918aca06c8d63138eb1e7c4d6b"],["p","c0cd5aab24c06850e95dcb9b7b6506540c51cadd999d18a2d637b4282e16a2b5"],["p","e07773a92a610a28da20748fdd98bfb5af694b0cad085224801265594a98108a"],["p","a6e8f8a2b548d9593fc30b09cdd5cbbe8a782d53b15b0f9f6bd7951875bee5d5"],["p","67eb726f7bb8e316418cd46cfa170d580345e51adbc186f8f7aa0d4380579350"],["p","65594f279a789982b55c02a38c92a99b986f891d2814c5f553d1bbfe3e23853d"],["p","803a613997a26e8714116f99aa1f98e8589cb6116e1aaa1fc9c389984fcd9bb8"],["p","21f2c717256a79aa1ad4cbf217759a2cc560e33692050cac4afd5d13b98c6c2b"],["p","73cee26b0ba3c05fe676ad47a3f07f791c358b39217463519a66df74072f3bb6"],["p","8a044409cee04f124a49db9411bc183519573f1beb31c82980867d1232125ee7"],["p","23d49394612585706c72908a5e3904f95177ea087b032ddbfcd2862304c7d983"],["p","5df413d4c5e5035ff508fd99b38b21ea9a0ac0b9ecc34f3312aba9aa2add4f5b"],["p","61ee5dcafd2da0afbc1bfc689f45bb7096247e16747f2cf7d15891dc533c5998"],["p","28b76e7d30e6812259551c7edc6cd2aa7ce0568d25a0133d2e0b2f8d10bf6097"],["p","c3cdf0195ff6fa46a4302428384173db22704e39855cb92ef7fa4f811b15b2e2"],["p","cbb2f023b6aa09626d51d2f4ea99fa9138ea80ec7d5ffdce9feef8dcd6352031"],["p","b5ba65fbb0221a32b6c14400f505cfdd3651d43938a248a9265a516ec0c54240"],["p","3d842afecd5e293f28b6627933704a3fb8ce153aa91d790ab11f6a752d44a42d"],["p","3d82e8f6cc6c096544bf33e4875a03f5600acff7ec467e150a1cc7712d214dc9"],["p","8d8dc47f14255f06bcb07903daa2d44230a3b7e0c04092f08930fe1f7b2efac1"],["p","480ec1a7516406090dc042ddf67780ef30f26f3a864e83b417c053a5a611c838"],["p","f28be20326c6779b2f8bfa75a865d0fa4af384e9c6c99dc6a803e542f9d2085e"],["p","765d9522752f573aeeae5087f01e5ed8b64c3ca7ffaae004a3b784c93f026d1b"],["p","64286647ab922754ddc5c7e96e6f6bf259fcd64686806a6b6aac11755f6c7296"],["p","ec14b4cbd56f5818a9be8561e3f82dd9ddf778cd1d1de10e2c01e5a965a8a338"],["p","5c50661b092cc786b55cb5d69149b129bdcc8e05d7238cc653de9f465ed69f91"],["p","b804f68e970d94d446e7db3ed9791146d6a2aa252d9587a666d12fc02fb0fa1f"],["p","956f7b9664256862e0d1214745d269c71205a427a09e9fd5e7760bc401cab45d"],["p","ecad7a30a24bd09fd0f009e38e5b5f81e41d43e36d7f353edb0a6c2272fd87f4"],["p","39647228efddf22f84a7ce36d111b378ce5f3d15b8401347855e04b24c8e4e04"],["p","7997cee42f2d677aeeae8e5cd674a583f7e031f067b8785b21ea52c237e3a5d1"],["p","4ca4f5533e40da5e0508796d409e6bb35a50b26fc304345617ab017183d83ac0"],["p","d376c4df7ee3ac69dcc88bedaee04e545c6ba190d2a710f05fa2c960f6bde9f3"],["p","b2dd40097e4d04b1a56fb3b65fc1d1aaf2929ad30fd842c74d68b9908744495b"],["p","a536ab1f7f3c0133baadbdf472b1ac7ad4b774ed432c1989284193572788bca0"],["p","4523be58d395b1b196a9b8c82b038b6895cb02b683d0c253a955068dba1facd0"],["p","cbbfb8734acdd9b02903b16d802a8de1790b09de6c139bc995d75bb122f19895"],["p","e4b67f9f7c0a1cce1c24ca9196f8e1446fcce17fdef5d5eb46a3929433ea4d91"],["p","9b6d95b76a01191a4c778185681ed7f3bced2fffa8e41516ec78240b213285f5"],["p","17538dc2a62769d09443f18c37cbe358fab5bbf981173542aa7c5ff171ed77c4"],["p","ae7b06b64340cc070d11fd85a3a5ef55bae9f3ea5782d617ffa88bc2ab657f72"],["p","f6f30bb15f46869271c245e352921fffc1eef5776d286e3e4894e0ae905a1ad5"],["p","e40bcc3e12921ec232fe66528e2ba5d5cd4e0688e4bfa083a486a97fdaadceaf"],["p","a4dbfdc6e7e27e33b04e8009cf15dd1df35d62a9b258e70c38166871a577c47a"],["p","5a6440553acacb4f820127802f1ca1b0a66e70783ad70a9f7ba81c392107e5d5"],["p","2b52df38e2325a92381b0182b2a9c0d2cd50bf6c4819a270887e1856f9bc9579"],["p","32da0d4511aae226c714edd38d519e268244f35178beae1f5377d38f0ff77442"],["p","66e92d77cf0f668a2f404d4c270a41fb8b0b78b21450b3166e8f1fbaa0f60b87"],["p","921128ada38c463ed8137cfdf798979f39cba14077724f95e649c53c99ee77db"],["p","e88a691e98d9987c964521dff60025f60700378a4879180dcbbb4a5027850411"],["p","e2ccf7cf20403f3f2a4a55b328f0de3be38558a7d5f33632fdaaefc726c1c8eb"],["p","6bd4f9d97fb67fe03ca6055b411291752467fd049dd66b23c842696193b2cad5"],["p","c31e22c3715c1bde5608b7e0d04904f22f5fc453ba1806d21c9f2382e1e58c6c"],["p","0ffb0df6e0193519592e8fdc4e638bd560308c56dd1b3b3f83eea09f24d39020"],["p","9aaf0593552ec164a663747291a5a9f5e325f76ca7767bf1962e78083939494f"],["p","82606831017c96aff924507d18741988399f40da4f4738768437f24b79c0c2f1"],["p","da3bdecc5e31a3bd3e9e8ecf54b36040a168a7bf35dbddf5d25634cfb6d1f930"],["p","8867bed93e89c93d0d8ac98b2443c5554799edb9190346946b12e03f13664450"],["p","3c0a6d434e28f68a4b31d38ece15b33c1518aa58299b7d315426d716c5493ed4"],["p","871c6968069ae0afc0f21bfe1910b489f5619494c5a7e06434c168fd45d89532"],["p","1d80e5588de010d137a67c42b03717595f5f510e73e42cfc48f31bae91844d59"],["p","fb7b958399ec0a0dfe5b56090e6dd56fc5215e8cde9585f5bc234356f09a7d32"],["p","64bfa9abffe5b18d0731eed57b38173adc2ba89bf87c168da90517f021e722b5"],["p","8047df981a97dd41b48f554ac00e90bd62348fe65384c88ef29032d752857143"],["p","000000005e9dda01479c76c5f4fccbaebe4e7856e02f8e85adba05ad62ad6927"],["p","e6c3340cf1385c48dd1967c28b70234e7528245b3c089a601b3e42176ef7d160"],["p","7b3f7803750746f455413a221f80965eecb69ef308f2ead1da89cc2c8912e968"],["p","8391d6d55db48a45df2f44f6149c4b037e654f175404f338ba54f798cd69fe68"],["p","2ec8bde62533599f222786c5f6087c7e10209f8306b470a602c60b78cd7fb94e"],["p","58d0d3b005c51b7e0b932a548bf6b7c7fe76c397248fe88da12ffaf67480c6b5"],["p","d49d746504eebd8cf374735980fcf69b6bb958f71ada40e33d127668f82c1014"],["p","dd81a8bacbab0b5c3007d1672fb8301383b4e9583d431835985057223eb298a5"],["p","3af187bfe00920d87068a6bcdffc48f4b241d1d82594cd395119a30891041654"],["p","70a4d29b7f5d3695d9c7b31ffc04afba16079cf6e2237189bb7fa354fa317309"],["p","ae60eb949586948b516c8221ed7500e3b4f134ae0806e3fdffdeacf96b358117"],["p","feb4e6277e69aedd3b6c0f2c1904976397f30b8f003b201a4908ea3fa7a7bfe8"],["p","2ef0fccfd5a55e36bc8be3a525c1ce97f20eabaa94e69d82febfd641d9480c35"],["p","e54c21c7ca38bbbf57a6b9fce46e5b33eda927da6dc90cf65239a2214d7e9087"],["p","492bf025bc7394a95e83dd64995669bcf0d909536e30ef6cd73c86c53e34ff10"],["p","e6c282d1a1a1bdc7254b1b6932df32c516a2f7f1036d199b37b9e13129a3af26"],["p","c02bedab495a8d73e23192fa161a0b8344821f48446811b1e810fac65b584f49"],["p","4c3ca21cd9edb94db4153e80daa549a9336aa674bfff2029ee171d0b38861f30"],["p","7cc328a08ddb2afdf9f9be77beff4c83489ff979721827d628a542f32a247c0e"],["p","cc76679480a4504b963a3809cba60b458ebf068c62713621dda94b527860447d"],["p","72d3b3c8d52ae5ea0a37728f74551e732317a64f047ba0834f63cde4da7e22a8"],["p","0904cd8792f87042bae46ff1d24516dbd4ee3d3fcdf9d8f52d7016a5100b8c70"],["p","2d9873b25bf2dda6141684d44d5eb76af59f167788a58e363ab1671fefee87f2"],["p","e20f5c24d81039642ebee50f553cf33d5b8eed185eccd27d149b5e8fc733ac78"],["p","06c2b1612e31b284b2f7841083e09d4e1d4a67b466733a0978da3cf4c4cb74ea"],["p","be8baa6b437c12ab20bb43e7bcf66121e09e76fe0aff341c93946e4c2e4490a4"],["p","e034d654802d7cfaa2d41a952801054114e09ad6a352b28288e23075ca919814"],["p","c9dd2f54ad27e1975436d9789117d760d76d45c923f0902b05432a5f440aa5db"],["p","4c2f8edb279e8b553c44132a9c88c53af971c5e226a6f2a128b7e250c6fd066a"],["p","1756aaa0ed77a552c2b9d2d9f8bc33925e68f7a5fabc4b0fa4d06c0d436853c1"],["p","95c7f867802a9c7cfb1d2b243be152234864e2cf9f60f467ae32bf1cf05f89fb"],["p","ff27d01cb1e56fb58580306c7ba76bb037bf211c5b573c56e4e70ca858755af0"],["p","a723805cda67251191c8786f4da58f797e6977582301354ba8e91bcb0342dc9c"],["p","3d2e51508699f98f0f2bdbe7a45b673c687fe6420f466dc296d90b908d51d594"],["p","13bdb64432e556238e23e2d785edcb56339aad4b83782cafb86b577cbcfa46d9"],["p","538ddc49e1a781d8a608adad12d2017bb3152b68820483fd8fc3739b6204829c"],["p","385eacfa42fc0831b4975983c485e0c7c55ed0f5e4f56d79fa7a7151fb0a06d7"],["p","fa984bd7dbb282f07e16e7ae87b26a2a7b9b90b7246a44771f0cf5ae58018f52"],["p","3356de61b39647931ce8b2140b2bab837e0810c0ef515bbe92de0248040b8bdd"],["p","8450da93568724f32b101b08a18118938c311332d4a54c87ad564e70918c434c"],["p","11b2d93b26d7e56fb57f0afce0d33bfa7fb35b913e4c0aeb7706464befb9ca97"],["p","50c59a1cb233d08d5a1fb493f520c6b5d7f77a2ba42e4666801a3e366b0a027e"],["p","eeadea6cbb5018a190f0117857de513cc271d24c947d56cd82c54a6b64ae47a4"],["p","75f457569d7027f819de92e8bb13795c0febe9750dc3fb1b5c42aeb502d0841d"],["p","ffd375eb40eb486656a028edbc83825f58ff0d5c4a1ba22fe7745d284529ed08"],["p","9eb45a0cff1f66e6bc58178f4e0a53484807484c1b7e51f3582a56fed52248c2"],["p","1f2c17bd3bcaf12f9c7e78fe798eeea59c1b22e1ee036694d5dc2886ddfa35d7"],["p","deba262b2d87f7ed1252241e607bd1bbf42e67354992f89e7536d65d7a19e423"],["p","1739d937dc8c0c7370aa27585938c119e25c41f6c441a5d34c6d38503e3136ef"],["p","e77ec5911d3de5ab21c5dbb3e54e050cf583d9bad007c614f9ea73ceb91e22df"],["p","943c9d0e5120601823611d5579bc349c30ddbace4906970cddbaf801b8f12613"],["p","a9d2ae96da6f7c75ab7819c85c2d3bad55fce325aef00237881d7d2ed801e11c"],["p","133cd2fe9dc3c4d166f4d8924ee5186a30e99f6ae74cd0c2458dd01acc12db5f"],["p","50ea483ddffeeed3231c6f41fddfe8fb71f891fa736de46e3e06f748bbdeb307"],["p","6beb9b9791362595b2c39b8102253eae2b1e19a71d03a510104ad25c324a0939"],["p","7560e065bdfe91872a336b4b15dacd2445257f429364c10efc38e6e7d8ffc1ff"],["p","4eb88310d6b4ed95c6d66a395b3d3cf559b85faec8f7691dafd405a92e055d6d"],["p","6f3a43a2a356c5b58bc472f42ecf2824345f1508092449c8dd320fe33a1bc2d7"],["p","2dc735e382d707520354126b9f954ab46e8dc7cbbd0db53d210479d73d1dd5c4"],["p","d4338b7c3306491cfdf54914d1a52b80a965685f7361311eae5f3eaff1d23a5b"],["p","35c4d017370298b107b7251509c0df85628118da5b2e21974f1149311e46bc39"],["p","784097c4d8521468de52e91c0395c08ab1f1d2560b039fe095402ef532245370"],["p","648c0f5302c75f38382a4d2c85a482b927cc61b2828a0794e36c6cc796de86a6"],["p","1b5ee74df1f13eb85d54d36bc19a4180f44ce9aceeba2ea110f9ea79bb1aae6e"],["p","466ac208863d019ea9d97fa62698e3a7703a5a52386cdbf541fdbdc7dcfc63e3"],["p","6d3d8fe28eee088ef8c4ead4db04415e5ae174fc4f3081d03ab8299c4063a6cf"],["p","1fd43567d182cea764bc43e7f1e434e28a5f534024016415364fed9a81f8977b"],["p","7c584092ed68fad2a8e5104ff4a9a6fdac3d82397f16688115a973d3994ecff5"],["p","3cfa816bb4892fa6be993ac72a9fcdbb089bdea0c5d9011fd204d154545fa2d9"],["p","4058c03cd10c9f9fa8b5b89be8588ee85f832b1ec41f5f2359e734255c3b5750"],["p","74214fce7752e5f09c8983815e72021f4a1e7b7a75d0a71fd147a900d1d17d91"],["p","813c2662366a12f6337b951c048552fd3c4894e403cab701634dcd803786dc09"],["p","fe3373e56901dcd41e1d3c058d541b772fe0339fae63c292b7255d0cbcfbbe20"],["p","285d4ca25cbe209832aa15a4b94353b877a2fe6c3b94dee1a4c8bc36770304db"],["p","bd1e19980e2c91e6dc657e92c25762ca882eb9272d2579e221f037f93788de91"],["p","922945779f93fd0b3759f1157e3d9fa20f3fd24c4b8f2bcf520cacf649af776d"],["p","1004bb54927fe6cd8ce421d163f95ee228fd113e0f73b02398abf81de00a49bd"],["p","8a8ea365c2215ab47391dcf29cd952ba632c11d5d7cc3011e7a84224e9f4f569"],["p","1bc2e23bbbf8ace7de552d3206b753d2511fac600a971e509231f4688a05ecb3"],["p","20893557000c1fb5913ac94106d695c746e07f8fbab8d451b704761dc443e146"],["p","56a6b75373c8f7b93c53bcae86d8ffbaba9f2a1b38122054fcdb7f3bf645b727"],["p","787bf36bce3124a472a9e9107f697bf04fb6a92bab053aca97879901f98fa4cb"],["p","3db7ca42a8e83c04bd52ce836a9818b6e9052e9e3aa85d22c2b3c715f121f96f"],["p","1e52c554676021e891a9ddf1a09f5ddc7ff3d11e5c4796910bff2d8121771908"],["p","56899e6a55c14771a45a88cb90a802623a0e3211ea1447057e2c9871796ce57c"],["p","15300dbf5bc7edf2999836b359efe3fc3d6d5f4fb78b49557786ff63598b8792"],["p","f0fb31d1810a9f95df3d178fcd67ca0b09879ad11e8689e56962cd839fb8ead4"],["p","44e8f823d92a78b0e7da0f30c067a0cc1e832fa18dba28d009477a978c7d569e"],["p","efc37e97fa4fad679e464b7a6184009b7cc7605aceb0c5f56b464d2b986a60f0"],["p","f96c3d76497074c4c83a7b3823380e77dc73d5a9494fd2e053e4a1453e17824b"],["p","cf473ebe9736ba689c718de3d5ef38909bca57db3c38e3f9de7f5dadfc88ed6f"],["p","1dfa6d98c8852567545980c2b9b9b498ed3bd3f6e9a7f10b2811581bfd096fc1"],["p","9969d2eac150709d1dca0d42be38617a09ad14a7d7220d4c9c7cf06307c02fd9"],["p","62fe02416353e9ac019c21f99b8288f53d1d29ea2d860653a67690d747d6e4ec"],["p","9ce71f1506ccf4b99f234af49bd6202be883a80f95a155c6e9a1c36fd7e780c7"],["p","dbbb5c7d5d20094ea6ca9a0f59ae373cc0190163312724bbcb2425be04f57823"],["p","b9a537523bba2fcdae857d90d8a760de4f2139c9f90d986f747ce7d0ec0d173d"],["p","8e432ad14d3955d0863b975778f0c8817ef88c9f119d626da1a3face584bda73"],["p","d21982fa3e38f21b2a7e4a5c781f8e469347f0f3ddbcd3fd199fbad407448bb8"],["p","22f22e98a035b8ff97d4cc4c5b34e65ccecae258430eeca46b17b1e8aabb8a5e"],["p","ddff07845a831e9c5e08cad7571e484268926c220013f5bbab12ed5bcbe0ea05"],["p","d3070b8d93883c80d8a578387ca33ae064dc30218ad04e83bf4edb277c881fbf"],["p","cfdd3f3d6b02c20aa5684e53455ab9896ac1db451bd660040067714f0dcd152e"],["p","126103bfddc8df256b6e0abfd7f3797c80dcc4ea88f7c2f87dd4104220b4d65f"],["p","852c84ec56934835c298614627595df59e8db721db88aa2b36bf031ce1c42543"],["p","3e1f9f86effed965a05c943171ce8224214a7c1a764abad495f255b19db47370"],["p","6771e897a555ac5be4b997db1f0ffeaf0a779763da7253c11b8fc4748d8173cd"],["p","10bdc222a7e7562ae8e7418cdda17d788183be1ca880acc637fde04ec3df85f0"],["p","b11c0331dc196952eac7edba149e8a2067463420cb64a72692ba8cfae7dc8443"],["p","444eaf65c2d35511fd0dbdbb0fe3c74139fcd144de00d22d57819ae5fdc76ae8"],["p","1e7fd24d076a6088d9af4e43644ff112a9b2be5842bc2715d6727cb485a588fb"],["p","fe7f6bc6f7338b76bbf80db402ade65953e20b2f23e66e898204b63cc42539a3"],["p","8f79fe359293552d6cac527b93ed28e2f7f9ea2ad9781d25cf92d36fecb68088"],["p","b12b632c887f0c871d140d37bcb6e7c1e1a80264d0b7de8255aa1951d9e1ff79"],["p","28ca019b78b494c25a9da2d645975a8501c7e99b11302e5cbe748ee593fcb2cc"],["p","47bfd5caed665500772533e2e3190c0db4b6d3941d1509b5fc1f3a4cd37d1fdf"],["p","e62f419a0e16607b96ff10ecb00249af7d4b69c7d121e4b04130c61cc998c32e"],["p","49e3ca8a4e680a8914b8736f72470c9e91f609905b01650c75d3e189a49f2172"],["p","0d687fae4919b7826ed59443f86e16874104e958421a065d85c06f67b9a8f897"],["p","4a961e50e01811d177b24ad4eb2eca7c5baf974944fbdaab811161937caedbde"],["p","53922a6a03fb01d7251b6c49594c4b5935b5df52d4da5706fb98fc483e3f493e"],["p","dc4cd086cd7ce5b1832adf4fdd1211289880d2c7e295bcb0e684c01acee77c06"],["p","58cd5b2b45d2388e4b8657aa7e805c0d5efa967bcc7e76aea97ef0eb4c95d5c6"],["p","f4db5270bd991b17bea1e6d035f45dee392919c29474bbac10342d223c74e0d0"],["p","20986fb83e775d96d188ca5c9df10ce6d613e0eb7e5768a0f0b12b37cdac21b3"],["p","38dbb9b07d93861d40620ad62d44b1a8e8785df0997eeb4454f12d217048cd5c"],["p","4aecd26331c402e0b440dfb06cf2da6a363ec28949175d37352f7db9b4658542"],["p","cee1f88adb2f82457ffd58549a6d2c9957af9ec65665c615317a532648be9c86"],["p","bcea2b98506d1d5dd2cc0455a402701e342c76d70f46e38739aadde77ccef3c9"],["p","4cf5136be3f8f2cf6d08541cd5517b2e48d2eaed85ee82ee7f4c994ea576978c"],["p","7579076d9aff0a4cfdefa7e2045f2486c7e5d8bc63bfc6b45397233e1bbfcb19"],["p","04c915daefee38317fa734444acee390a8269fe5810b2241e5e6dd343dfbecc9"],["p","df476caf4888bf5d99c6a710ea6ae943d3e693d29cdc75c4eff1cfb634839bb8"],["p","b9ceaeeb4178a549e8b0570f348b2caa4bef8933fe3323d45e3875c01919a2c2"],["p","4cdbf5bcd7f015a3ebc6853e6566732f9c11357b6e43d6b2edce742fbe9847f4"],["p","50c5c98ccc31ca9f1ef56a547afc4cb48195fe5603d4f7874a221db965867c8e"],["p","d411848a42a11ad2747c439b00fc881120a4121e04917d38bebd156212e2f4ad"],["p","58ead82fa15b550094f7f5fe4804e0fe75b779dbef2e9b20511eccd69e6d08f9"],["p","82341f882b6eabcd2ba7f1ef90aad961cf074af15b9ef44a09f9d2a8fbfbe6a2"],["p","54d8f90d7b86ba313a08423c8d730d0dbd6e17b26db5186c2c0c473c8c91201f"],["p","40b9c85fffeafc1cadf8c30a4e5c88660ff6e4971a0dc723d5ab674b5e61b451"],["p","266815e0c9210dfa324c6cba3573b14bee49da4209a9456f9484e5106cd408a5"],["p","c6f7077f1699d50cf92a9652bfebffac05fc6842b9ee391089d959b8ad5d48fd"],["p","e83b66a8ed2d37c07d1abea6e1b000a15549c69508fa4c5875556d52b0526c2b"],["p","d2362fadfb6a1dd1414762a058538f6b6c424d9cdb874b81513e985485656057"],["p","4590384fa571a14421340d379e7a58965103b104ccc25ed1a9b43099f6931f64"],["p","a9434ee165ed01b286becfc2771ef1705d3537d051b387288898cc00d5c885be"],["p","6fda5bce2882176bfbabfab503a1b5281329582d71c4be84bbb567e65c1a791f"],["p","ee6ea13ab9fe5c4a68eaf9b1a34fe014a66b40117c50ee2a614f4cda959b6e74"],["p","a5ec1fa80adb7ac0a6356a1409f0ed2c6f5ac95c0f1d65710a2eda2fc7ceffe3"],["p","7bf37af96df12d121a0e7c05418acfcc806b512e93ce54d08c6a388de2433f84"],["p","7d4417d5df435a97b8f55c8f2e7e2ef533e2371ce5e1cffd595c179a3eaf36d4"],["p","173a851fb394588411d543370d29af3b3ba28784298c065d2f7ed6df9093aff2"],["p","9349d012686caab46f6bfefd2f4c361c52e14b1cde1cd027476e0ae6d3e98946"],["p","fa36cc11955c14fe2630d324bd4cb26bfcb65f33ae80bf31c5df4616034288a6"],["p","5c120c778430df1dc11fb29275380459bdae29cd4a48226d68256029e37c90ca"],["p","f28ce9dc1b2ba21e4ffbc2ee2c7e0313e3e004604905be85baaacd1c8dc3e934"],["p","73650fd65d3680880033da775a94c366135b1690bc034dfa2c811c16623e5aec"],["p","6bc58cf504fdfe8ab6ae4f0bd0ae5cf2b0ad859519ad9bc9691cb4a04d46cebf"],["p","eee8f90244589abc852b024493a077522157057e6d565788d8d09473b81d14a9"],["p","6c6e3e05e1c9d2aae0ed2431544aea411771dd9d81017539af0fd818b2389f28"],["p","d4da76773d3eb8ac89b864eaf34530b5da1dc80e59d5c2d3028d35803be0ed84"],["p","acbc4d7fd608e07e8980f0bc37a81c576f171204f3c348919ff172175e05f3f6"],["p","5bf3b508cf12e9da97577606fe14cbe2ed4dc01c1781fea852682bd327e07477"],["p","0e05cb33ce37bfadfc26a5406e082f84550f63f992df5256dfd08ac62082a99e"],["p","81d38469313088cce52b8a860711c21e7408860286bb3834a4d74fab717cde2e"],["p","4fc467d1d8089a84407846ad6747397663c27f798225fe9749b426aacf4d83cf"],["p","508c8b17e19abf4e090bc0fbf252bfe003d7cc8cd7752df321514f6c393d1f1e"],["p","9356d008ab226ab6b08ab3c73900546b2836c48c7527f0e5332c09201a3e6dc8"],["p","762a3c15c6fa90911bf13d50fc3a29f1663dc1f04b4397a89eef604f622ecd60"],["p","d10f938e98e9d20ccd7972fdc61554a875a67008dd6e8b11988b1e2e01f44889"],["p","1dac57913c60f528500fc31907e0a2cd8da4a9d3c0e95a685754f3757d826128"],["p","77c2a730a1921d9baefb3a433fcc2771a8ccda9f8aa461d10ebfff8039e0ca1d"],["p","4bc7b2f1b14c47c5c87dcdfe458f5cef54e46526ac6e811ccdf18e4204a37ca9"],["p","9770fb48aa3861dd393eb857e740f2df6f18e0ead43bad1d30c65e5c198200a6"],["p","fdb873a485e43f7d48366302ab02ef80c20d4a2b0c58c3765eb7a48167199be1"],["p","b6e30dda349c5d461df01294e0f7d5ffe43832fb71be73b972672cd79b210867"],["p","404211c6e9763f49c8c0d8622e6fbf216550b748674247400a2527e0f95e3a79"],["p","8e27ffb5c9bb8cdd0131ade6efa49d56d401b5424d9fdf9a63e074d527b0715c"],["p","516add19a861a2f429ccc883fe73243179d5298248c827d68fafe822e697c014"],["p","e8f0ecf0c478acdc21e787e577d0e00bd4458d1308af5c43b1e8eaa8088940c3"],["p","77ce56f89d1228f7ff3743ce1ad1b254857b9008564727ebd5a1f317362f6ca7"],["p","6b4a29bbd43d1d0eeead384f512dbb591ce9407d27dba48ad54b00d9d2e1972b"],["p","c35ff8c340449f0d68af1aec4844bb44a9c0b8c1dd4f4d4efbc65e12039a348a"],["p","668ceee55475f595ec0e8ef44c64bf9da8d9dd6008ac54dcd24b2501930b960e"],["p","93518f91dfa51d8acf39217cdcd3d2ccd178433cb9e72368544aacd7412cb50c"],["p","ace91a4ff2dc43e45e59d1065b99fe94bd3c17ce8ded88cfa1d4a1dce6a61853"],["p","9a3301a9229e40f60a53a05fa2035523dab68c57269194afa3b3711c5fd7ef8c"],["p","61066504617ee79387021e18c89fb79d1ddbc3e7bff19cf2298f40466f8715e9"],["p","e844b39d850acbb13bba1a20057250fe6b3deff5f1ecc95b6a99dc35adafb6a2"],["p","f0ae8aa99b960d90aa6c7d3c41d928961d026a82737dab33f7a59893e39fa63c"],["p","a147fff6a69171ea7a2d93a46804365bf33d6e1039be2d70020deb5ec905fb54"],["p","db8c123f05445f79d7fefbf53ee0e4acedde7427b942a5c3505f45f6645ca373"],["p","43565bbf0ed8f87dc9e997d5476b95f07e797d952e37eab54d93e69b11779743"],["p","35829e97cdd9cd66ea852babd6b2f1efb029f082ff50ca66263109e2e7e7d504"],["p","3ec97185b3251b43e7ebac2d720123daa17ebb3640b879657c41e440daffb897"],["p","3e6740a3773c8b99f26f75c7529d7996d54d9a1fa7423db34807cfd0aa0f89a5"],["p","466cf4d2117be42022bd6573efd6e9cc863b5c493db6a0db80e5d3b977487370"],["p","d6d4922ba0f6577b266951677e2b7f19954e04ed55e2e6428b90b4db742b9470"],["p","4d8e327543efbe13ef4f49e43922a40258ac60ededcee062a568f18845a09a04"],["p","ebd908f8addc76bc17c9430a4045c570e6a8733efecb560adaf4ed4affe4e070"],["p","0b26f590631b0fa3048d103410e484e5e22e2d5a8eceaceda9d91b38f81dd1a8"],["p","f81916ce7290ccd77945bce3c61921ff39c1a9be2f43c1829085eb26f2d78c16"],["p","1d189b2ac68e0293985f8781a0a1e548103ef70b836b34b1e43efd99c743e503"],["p","60868a25f8d84a0c250c83e4ffac0eb8d21454d0ab084a7c6ff67310f6a020ce"],["p","c37c1b0f84a576194e1b518f1c3e7cc89c12d445900a9a511177af65ebff7368"],["p","b12ae9dd8025d11855b22cef7f7d3557b9e67d380e058811ebf752f958d175d7"],["p","974d0f476f175adf26ceddb29f460368536343561b9b76582fe9859483f01878"],["p","13020be82513fc7ee129bd3955ecd4ccda95fecaebbe06ba559e56231d1b1742"],["p","0d0547de422dfbc821247725bfc761c3efd11da98f6fef0ec3dc213465155c5a"],["p","9d7d214c58fdc67b0884669abfd700cfd7c173b29a0c58ee29fb9506b8b64efa"],["p","a765974c5c35ea5e2ebe5399ca1ed276ea3a36681641bf8b19aeb460b5488223"],["p","3a322c39eb9b532ba6c1d0039df85dddb43a28b563fc85430c5585dd09d80d0b"],["p","66fdcf7c587d0e5dc61348571b53b50d7a8e3d7534a960f0224c8aa2a52e2912"],["p","5d34f3ded332d1d8ae01fc7690fcbe75def9ef90f37abd6d88224af826ecd3b3"],["p","c9bdb692cef1f403336be7e0a79f8436e6fbc325a0d2e8746c4b7342234e27a4"],["p","d36e8083fa7b36daee646cb8b3f99feaa3d89e5a396508741f003e21ac0b6bec"],["p","41fd944c0015fb6e357bf5dbff8178326436ece7ae07a46a0d7f0250325208bc"],["p","3f1f611ebf95162da08e73795ddfbd3a2ebfe5b3edc0495356032af3fd251aa2"],["p","64f776a8fe0b0c6b51c5de0984aa3d62ab3861d67b7be18d3ded1c0afcf7866f"],["p","590e15b38be0560309d02cf9dd203cb2d61d87272dd708af3d5a10526f29feb9"],["p","d7c3a4c24f04dfcd4622edd08b881e04d8657310e1e289eb181c100efe8d547a"],["p","bb192f4eae419de467d86c5659deb8a942f0aee0e6eab3c97e73eb990aaeadd7"],["p","fe641e7b70a4b44f406c8aa6d33abb2ca8c0f9686c83033a2abc46a2784102db"],["p","2779f3d9f42c7dee17f0e6bcdcf89a8f9d592d19e3b1bbd27ef1cffd1a7f98d1"],["p","ddfbb06a722e51933cd37e4ecdb30b1864f262f9bb5bd6c2d95cbeefc728f096"],["p","508f28656b8db436153d5239de5034abc0351b8c90ac33e6b156d1fea64b2960"],["p","09d49f47081c1a06f04afda62988e3253247a8c96c1d4ef025dc7619dbc23942"],["p","4d4fb5ff0afb8c04e6c6e03f51281b664576f985e5bc34a3a7ee310a1e821f47"],["p","449b4d6f4873ec3adda13505eaa58caae1b105186ba47857c03690ae69cb7b25"],["p","cb03caccaab163c1ecd16ad400303a570b488fb339cabb565169212956020612"],["p","6c1e6166f1d631a60e9ce708bb2570cf0fa4f6d31c0e110df5585ed1b2fa34ef"],["p","b1a621aab2578958d80b2c418ccfbf53674321d7e795b96c885f26496bcded2b"],["p","a1f66e86a8864c0cf172a41d0505369a0808fefe5691931dddf27eb51d62b916"],["p","ae44681bb75c03a96f3af62e88b6d80de6d3f223f2d9459a31823e37bd27918d"],["p","cee901130e5733b721af6c5a0566e9aa9dcf523eee73b8927c832611bb59d321"],["p","2a483906003f87a53cfa0cfaae68a68976b30c9fa12b564f28e60836c7f7c5e5"],["p","c21b1a6cdb247ccbd938dcb16b15a4fa382d00ffd7b12d5cbbad172a0cd4d170"],["p","5a3de28ffd09d7506cff0a2672dbdb1f836307bcff0217cc144f48e19eea3fff"],["p","284954984e19bf621b1a85687806890d2e27ddd0e6c58c14e41e9e0a7362acce"],["p","26bd32c67232bdf16d05e763ec67d883015eb99fd1269025224c20c6cfdb0158"],["p","ed5fa5e37f5db8d8d6db05b2cb04960c4f5c0bbb4a5ca5569d2b29cc284a57d4"],["p","b676ded7c768d66a757aa3967b1243d90bf57afb09d1044d3219d8d424e4aea0"],["p","d3f06d830e33927f422f9d00c5180b6a071f8e024573c5aad5c5a3f17ff53dc3"],["p","dce5977d76e7378cde2a59f06bcdb1dcab3522831825d89bc778bdae7354d072"],["p","1211516620ad5bf795939c925c51b71cdffa64dd688e303816cd0e9180f99cb4"],["p","4d04dacfbb2d6586187af3fc1c282f6780e47109820b819af2c17177eeef1614"],["p","98af38d531fe95b764a461c21ac4429e68134005b59d6d54c29012ed40608e89"],["p","1bc70a0148b3f316da33fe3c89f23e3e71ac4ff998027ec712b905cd24f6a411"],["p","3a60b545d01979e4f28f51d7de9b887e9b719be71a699b060e08c9888d96fd75"],["p","b133bfc57bed61c391d4e8f953b906c7f1709c438d91c75fb6daf79449d5789d"],["p","e1f9ba320bff4c50e9faad444432f8f3a9bb0924bb88c95691fc65ef97634d07"],["p","870744363b1a5986d6773b5706dde258c039f6d34a5ffc270915033a6a67c82c"],["p","0a777389e7a0beedba4b06b75b8d0ee38a564b0eb05a92b465b6e484a0a7a492"],["p","21b419102da8fc0ba90484aec934bf55b7abcf75eedb39124e8d75e491f41a5e"],["p","acf2efa933d96b3be77830b4d8da09fb187b0202f668c6ec9dc64632dfad02f3"],["p","5b1510033bf42452dca2d2a93ffd43041f8e5243af44b79cd78a806b035c1ed2"],["p","2b82e9e9a0c9307c539b69e27942e46c97c8828c0810921a31a76efb81aae425"],["p","964b8c967e05d4ec27d13d99f994f164bb3c6574532e15a47f25229cb0487148"],["p","02d9f5676fffc339ffe94dfab38bebe21ce117c6f1509d9922a82d454f420da2"],["p","5b72ee50d6f11a2d1d68c6b76aa01eb9164d429ced93a747bb1d045fc3311826"],["p","f4db5270bd991b17bea1e6d035f45dee392919c29474bbac10342d223c74e0d0"],["p","f4db5270bd991b17bea1e6d035f45dee392919c29474bbac10342d223c74e0d0"],["p","f4db5270bd991b17bea1e6d035f45dee392919c29474bbac10342d223c74e0d0"],["p","f4db5270bd991b17bea1e6d035f45dee392919c29474bbac10342d223c74e0d0"],["p","c2827524936dedad5f623bcf8a04d201f3fd3ed7d4912a190dbeef685f45b2f7"],["p","b6960fcbc7c04536bc98f55c48d9f9fee55983e7f763c124453af728af82311d"],["p","8bd3c3f0c4aed0224ea16bcbb8279238c9f1b29212a4d0f0c67199e416087870"],["p","f1d6ebc997563b4c5f4bae0936dabcbec0dd91f4dd10b2913dd4869deaa4adee"],["p","95b3e86657e547ac5023958185324d79447e2a72a474a64f18529dfd82159caa"],["p","18d06103ace2304261596adc644dfbbe79e26e20d38135af0ed26d435095ce0e"],["p","d7264f22bed60bda3540c0b05928983ba7e0bc1331a25bf85eea4abacf4eb343"],["p","a2a52fda7b7de24766227d6d750b9652ccf5655752532418604d002eeec4aca7"],["p","c8ee83e8df8bfcdae83feeb5d2607a848242e6131a52480ca7fd03262d496a32"],["p","6d7ebcad7e5d83a9a8d0eff8995b2b37959bc34564967fe7962e6427c5ad5f33"],["p","69a80567e79b6b9bc7282ad595512df0b804784616bedb623c122fad420a2635"],["p","edfe66a5eb79c8fb4f6b4b76a27b8ba66efd1b100ff8e9a7899ef9a182dc7630"],["p","f4db5270bd991b17bea1e6d035f45dee392919c29474bbac10342d223c74e0d0"],["p","f4db5270bd991b17bea1e6d035f45dee392919c29474bbac10342d223c74e0d0"],["p","f4db5270bd991b17bea1e6d035f45dee392919c29474bbac10342d223c74e0d0"],["a","34550:f4db5270bd991b17bea1e6d035f45dee392919c29474bbac10342d223c74e0d0:Art"],["p","5004fd183cc91d4ae52e06748da2dcd1f9929b0d992a47169d1cccebc384e7d9"],["a","34550:266815e0c9210dfa324c6cba3573b14bee49da4209a9456f9484e5106cd408a5:noStrudel","wss://nos.lol"],["p","04c960497af618ae18f5147b3e5c309ef3d8a6251768a1c0820e02c93768cc3b"],["p","ca86d55ab426d52de1732413d69b9b1b063a7980968b8b69635a89a562af1bec"],["p","2c24e1af571fb4ccfeca3981649c1b09c695cd83b129709eb3b41c7ad2854899"],["p","c31c463a0538044a4ef38eb249badfb251acbae372775b7d73f4849af25e24b7"],["p","46d0f6305b38ec45050aedb211981230b370a675f09bca0f9ef1ee20149d4ad9"]],"content":"","sig":"689c9d91372937460c039c4f1bb68b8828c97e9c570c66fcca9a48e8f6acc893102b544821f48d090822114574a438d517096035ba072adcf833576a4588de78"}`, + `{"id":"650573b38c32eb08087c16fd5bf734c1b10c35be4366f5b3c8a0476569682b48","pubkey":"f4db5270bd991b17bea1e6d035f45dee392919c29474bbac10342d223c74e0d0","created_at":1702213369,"kind":3,"tags":[["p","0fcba340409f2eaad5b859aad412cb326b515a5f11d5585199906eae0a1ea948"],["p","e1ff3bfdd4e40315959b08b4fcc8245eaa514637e1d4ec2ae166b743341be1af"],["p","e623bb2e90351b30818de33debd506aa9eae04d8268be65ceb2dcc1ef6881765"],["p","39a8b17475be0db44e313f9fd032ffde183c8abd6498e4932a873330d2cd4868"],["p","460c25e682fda7832b52d1f22d3d22b3176d972f60dcdc3212ed8c92ef85065c"],["p","e8c1ca03a46d97184bfcd9125a5c9674a867bd1beaebe47c77d4eaec6c5ee874"],["p","4707c8fa26c025f3694dc0a62d45fe597f7819deb8a45abb894f22cffbecb777"],["p","58dc4e185bc8dfeb4dbc71e3818cc5a3e666b3bb92303aa4d8572cf52626c6f3"],["p","74dcec31fd3b8cfd960bc5a35ecbeeb8b9cee8eb81f6e8da4c8067553709248d"],["p","84dee6e676e5bb67b4ad4e042cf70cbd8681155db535942fcc6a0533858a7240"],["p","430f76d2cab692d81be33395066231f8560c97281f28393ed9c9607a16dc5268"],["p","254c6c9043dee77294e7095eedf34e3068705f5a68219e7411e19ec13f4f3963"],["p","8664ff363efcd36a154efdcbc629a4d1e4c511f9114e1d35de73fff31cb783b3"],["p","8aec8cfb20ce774c08e8b790d973da9916c625faadf74d8b6c8995ab2b8ce992"],["p","9b605c669b3f02bf593d6d945e25de246998808c34d46b194259451c1b7b488e"],["p","0dde402c4b53857520ec9481869e5d34641d0f749204db9c1ea8869c0ade6766"],["p","6b06b54d5ebd78d46a83d59185e5f253d2c0a3ab4371bfadced961473184d5c1"],["p","ca20c9531929c1b9aa5dd31a1b952a2025310118e5775d01ad244129b744e1b1"],["p","d3af49435bb13c695232fd8ca8c9e73db8c65630553baef83f9544e81cef9be7"],["p","8ed150f8a138ebee0faf7bd859847af0abc97a74f85c6d081d9de3dcb31085c9"],["p","b377757fa3efd9d4f56170bd08508872b13680a000be9b19f3c0f6fea3d861bc"],["p","af90923637f1e0cd2cce418dd1722ec6537dbbed535e71ca882e804ca18d3954"],["p","37c4e186f730439249cf08fee7b58186ccae9e4dd12f35bf58f9b4267de9109b"],["p","b2e953ee76cf442384792ea47a385268970ae0165bae3edfb9f48d877ed5a9f6"],["p","6f6b50bb6fc7353a5bf3f54ef34d421ff81731e16d7f52d888833c6aa848e5a1"],["p","04ea4f8350f1562aa1d60dc070561f5bb8386a11d1a00570fd7440da210e1713"],["p","17f2f12ce60237bfa71bcccc70b4062db190dd2d6d349b9323e9ec93b2a8dccb"],["p","d03f364a17a70354cc6c8cbf59a607e550fe8f57a02697791e9eb35be12a5e44"],["p","711876aa62348eda76c2182203f4b2ba6d154072a7f66fb9950cd434ced08600"],["p","6f0ec447e0da5ad4b9a3a2aef3e56b24601ca2b46ad7b23381d1941002923274"],["p","79c2cae114ea28a981e7559b4fe7854a473521a8d22a66bbab9fa248eb820ff6"],["p","7d3581015632d288712433d865352dd45d780ffa5ba14eeaa758e6a416c07d37"],["p","f7108e642c3966c68dbe54bd25509be2e1a745b1fce08d25e646d5db9c50b5c0"],["p","66d7101a0ded94f06f07577e536e289ecd33d020d5f4fbbfb320d4eb17d17c7c"],["p","645681b9d067b1a362c4bee8ddff987d2466d49905c26cb8fec5e6fb73af5c84"],["p","51c059f3cc1802997fe1c4e60d6315c22079a025823256160ac7bfc1bca7c2d7"],["p","a6e3fee826e7da976917ef76692a2b8915ce6af8f330b5d73620511ea1e557ee"],["p","c93406ed82c231019cf1d96700884fdedf1f7d5a32fa368b10b260cc6918f4a1"],["p","50d94fc2d8580c682b071a542f8b1e31a200b0508bab95a33bef0855df281d63"],["p","00000000827ffaa94bfea288c3dfce4422c794fbb96625b6b31e9049f729d700"],["p","20d88bae0c38e6407279e6a83350a931e714f0135e013ea4a1b14f936b7fead5"],["p","3f770d65d3a764a9c5cb503ae123e62ec7598ad035d836e2a810f3877a745b24"],["p","6e468422dfb74a5738702a8823b9b28168abab8655faacb6853cd0ee15deee93"],["p","2067810159470dce4c95ecb96d27bd01fe3030a446134ddddda29eb72c7d580d"],["p","3bf0c63fcb93463407af97a5e5ee64fa883d107ef9e558472c4eb9aaaefa459d"],["p","c49d52a573366792b9a6e4851587c28042fb24fa5625c6d67b8c95c8751aca15"],["p","69aeace80672c08ef7729a03e597ed4e9dd5ddaa7c457349d55d12c043e8a7ab"],["p","32e1827635450ebb3c5a7d12c1f8e7b2b514439ac10a67eef3d9fd9c5c68e245"],["p","63fe6318dc58583cfe16810f86dd09e18bfd76aabc24a0081ce2856f330504ed"],["p","eab0e756d32b80bcd464f3d844b8040303075a13eabc3599a762c9ac7ab91f4f"],["p","eaf1a13a032ce649bc60f290a000531c4d525f6a7a28f74326972c4438682f56"],["p","5e7ae588d7d11eac4c25906e6da807e68c6498f49a38e4692be5a089616ceb18"],["p","0fe0b18b4dbf0e0aa40fcd47209b2a49b3431fc453b460efcf45ca0bd16bd6ac"],["p","85080d3bad70ccdcd7f74c29a44f55bb85cbcd3dd0cbb957da1d215bdb931204"],["p","e7424ad457e512fdf4764a56bf6d428a06a13a1006af1fb8e0fe32f6d03265c7"],["p","834ef5926f563b89852965932bf88513595a23ad9d8b026446b454a23b1a7bb4"],["p","52b4a076bcbbbdc3a1aefa3735816cf74993b1b8db202b01c883c58be7fad8bd"],["p","a976156de0384616921e32bfc8314cc647d33843af649d2d91faabb2450b808d"],["p","ee0304bae0d4679bb34347ce3b1b80482262b9812bd0c0d5e19a5e2445043b75"],["p","7fa56f5d6962ab1e3cd424e758c3002b8665f7b0d8dcee9fe9e288d7751ac194"],["p","4918eb332a41b71ba9a74b1dc64276cfff592e55107b93baae38af3520e55975"],["p","34d2f5274f1958fcd2cb2463dabeaddf8a21f84ace4241da888023bf05cc8095"],["p","0114bb11dd8eb89bfb40669509b2a5a473d27126e27acae58257f2fd7cd95776"],["p","020f2d21ae09bf35fcdfb65decf1478b846f5f728ab30c5eaabcd6d081a81c3e"],["p","045745ac0e90a436141a3addd95575c2ead47b613f45287283e5802ff7fd99fd"],["p","064de2497ce621aee2a5b4b926a08b1ca01bce9da85b0c714e883e119375140c"],["p","06bf93843b7cc4f43669fd64627bd0e47e75d34106ca55fea60b93fa7322d63c"],["p","0c28a25357c76ac5ac3714eddc25d81fe98134df13351ab526fc2479cc306e65"],["p","0c371f5ed95076613443e8331c4b60828ed67bcdefaa1698fb5ce9d7b3285ffb"],["p","0c3849bc387593eab66792356d65c44852d1a35bf022a9b2d7cfcf50d4e6a146"],["p","1577e4599dd10c863498fe3c20bd82aafaf829a595ce83c5cf8ac3463531b09b"],["p","187aac66ef6f0598f5cb736c1757073b67a8db75b4907be0d56eda42daa81e6e"],["p","18b2ebab655ab3931dfa0346e7dc1c77ce28387a68956b9d4f0650a2cc0646e0"],["p","1a4ec27b7539c4ddef2c45afeae679af6c42c4fe9b3a0d08d1426d72490e9f32"],["p","23948d2fbac3e2097e902da16dd1b4f1005d16d8485319f00240d828ee6c35f7"],["p","24202e533d2ef4da8acc01fa218bd0e2a85105210e8ab53ed1f3e2c270f33db9"],["p","29fbc05acee671fb579182ca33b0e41b455bb1f9564b90a3d8f2f39dee3f2779"],["p","32bea35c961e2469424c6a3d05a6f379e9d699822b9c325088d649b119e52f24"],["p","338ef72e3deebda385aedea5e89b87ec35a7d296d4a9b642bb2c1ad926007db7"],["p","35f25abceda5f71685dd378f02167cc51dd19313660951c40266a5dc3b8ad0f5"],["p","3b6a3d3bb3358836a64d1c80292b96e7698ec35a2e5ca451defa6bd3af3eeb84"],["p","3efdaebb1d8923ebd99c9e7ace3b4194ab45512e2be79c1b7d68d9243e0d2681"],["p","40e10350fed534e5226b73761925030134d9f85306ee1db5cfbd663118034e84"],["p","44f85003a8d6f05cd966516267e8ca5111df15933e185bb80e9f5246acf3375f"],["p","457e17b7ea97a845a0d1fa8feda9976596678e3a8af46dc6671d40e050ce857d"],["p","4657dfe8965be8980a93072bcfb5e59a65124406db0f819215ee78ba47934b3e"],["p","46fcbe3065eaf1ae7811465924e48923363ff3f526bd6f73d7c184b16bd8ce4d"],["p","4e3016e742f44520e9077a20f898179b026bc53d02c9a74b0a027b7d087a7012"],["p","51b826cccd92569a6582e20982fd883fccfa78ad03e0241f7abec1830d7a2565"],["p","59fbee7369df7713dbbfa9bbdb0892c62eba929232615c6ff2787da384cb770f"],["p","5a8e581f16a012e24d2a640152ad562058cb065e1df28e907c1bfa82c150c8ba"],["p","5c04fb11b79bb4ec9f8c59da5abc8c529f98c34ada48bf7f38caf62b97e0384a"],["p","5cc29169f09efdfc8cf63e3458c6938f9d9d68af02d7f39d74a6882b48d7ede4"],["p","5df21e8ec11e21e7b710ac7d6c94427407ae69e93a7fcf0d0a3ee2fac4fdc84b"],["p","623341a4a92b31d0738b39ee6be2bd3916a2326c283cd40ba46eb319d46e09a3"],["p","6389be6491e7b693e9f368ece88fcd145f07c068d2c1bbae4247b9b5ef439d32"],["p","64a8961fe8536fea89b357f192002720f8110498fa0256f8adda635b9e3e35e8"],["p","677b3a764ee6cc24d4def8f848913469868ccd4b5cff88cccbf3aa3701e9e7c7"],["p","69a0a0910b49a1dbfbc4e4f10df22b5806af5403a228267638f2e908c968228d"],["p","6ef322e8d90d39b8675d4fa27dc1d3d45c1f983e53fe751047ed7322da349dbb"],["p","7acce9b3da22ceedc511a15cb730c898235ab551623955314b003e9f33e8b10c"],["p","7b394902eeadb8370931f1903d00569545e84113fb6a09634664763be232009c"],["p","7ca66d4166b16f54a16868191ba1c6386a976624f4634f3896d9b6740a388ca3"]],"content":"","sig":"689c9d91372937460c039c4f1bb68b8828c97e9c570c66fcca9a48e8f6acc893102b544821f48d090822114574a438d517096035ba072adcf833576a4588de78"}`, +} diff --git a/eventstore/mmm/betterbinary/filtering.go b/eventstore/mmm/betterbinary/filtering.go new file mode 100644 index 0000000..df7d9d8 --- /dev/null +++ b/eventstore/mmm/betterbinary/filtering.go @@ -0,0 +1,33 @@ +package betterbinary + +import ( + "encoding/binary" + "slices" +) + +func TagMatches(evtb []byte, key string, vals []string) bool { + matches := make([][]byte, 0, len(vals)) + for _, val := range vals { + match := append([]byte{1, 0, key[0], uint8(len(val)), 0}, val...) + matches = append(matches, match) + } + + ntags := binary.LittleEndian.Uint16(evtb[137:]) + var t uint16 + for t = 0; t < ntags; t++ { + offset := int(binary.LittleEndian.Uint16(evtb[139+t*2:])) + nitems := evtb[135+offset] + if nitems >= 2 { + for _, match := range matches { + if slices.Equal(evtb[135+offset+1:135+offset+1+len(match)], match) { + return true + } + } + } + } + return false +} + +func KindMatches(evtb []byte, kind uint16) bool { + return binary.LittleEndian.Uint16(evtb[1:3]) == kind +} diff --git a/eventstore/mmm/betterbinary/filtering_test.go b/eventstore/mmm/betterbinary/filtering_test.go new file mode 100644 index 0000000..b10116b --- /dev/null +++ b/eventstore/mmm/betterbinary/filtering_test.go @@ -0,0 +1,51 @@ +package betterbinary + +import ( + "testing" + + "github.com/mailru/easyjson" + "github.com/nbd-wtf/go-nostr" +) + +func TestTagFiltering(t *testing.T) { + for _, tc := range []struct { + json string + tagKey string + tagValues []string + matches bool + }{ + { + `{"id":"a9663055164ab8b30d9524656370c4bf93393bb051b7edf4556f40c5298dc0c7","pubkey":"ee11a5dff40c19a555f41fe42b48f00e618c91225622ae37b6c2bb67b76c4e49","created_at":1681778790,"kind":1,"sig":"4dfea1a6f73141d5691e43afc3234dbe73016db0fb207cf247e0127cc2591ee6b4be5b462272030a9bde75882aae810f359682b1b6ce6cbb97201141c576db42","content":"He got snowed in"}`, + "x", + []string{"sadjqw", ""}, + false, + }, + { + `{"id":"a9663055164ab8b30d9524656370c4bf93393bb051b7edf4556f40c5298dc0c7","pubkey":"ee11a5dff40c19a555f41fe42b48f00e618c91225622ae37b6c2bb67b76c4e49","created_at":1681778790,"kind":1,"sig":"4dfea1a6f73141d5691e43afc3234dbe73016db0fb207cf247e0127cc2591ee6b4be5b462272030a9bde75882aae810f359682b1b6ce6cbb97201141c576db42","content":"He got snowed in","tags":[["client","gossip"],["p","e2ccf7cf20403f3f2a4a55b328f0de3be38558a7d5f33632fdaaefc726c1c8eb"],["e","2c86abcc98f7fd8a6750aab8df6c1863903f107206cc2d72e8afeb6c38357aed","wss://nostr-pub.wellorder.net/","root"]]}`, + "e", + []string{"2c86abcc98f7fd8a6750aab8df6c1863903f107206cc2d72e8afeb6c38357aed"}, + true, + }, + { + `{"id":"3f551da67788c7aae15360d025595dc2d391f10bb7e759ee5d9b2ad7d64392e4","pubkey":"79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798","created_at":1712715433,"kind":1,"tags":[["-"],["askdasds"],["t","spam"],["t","nada"]],"content":"ggsgsgsg","sig":"43431f4cf8bd015305c2d484841e5730d261beeb375a86c57a61df3d26e820ce8d6712d2a3c89e3f2298597f14abf58079954e9e658ba59bfc2d7ce6384f25c7"}`, + "t", + []string{"nothing", "nada"}, + true, + }, + { + `{"id":"3f551da67788c7aae15360d025595dc2d391f10bb7e759ee5d9b2ad7d64392e4","pubkey":"79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798","created_at":1712715433,"kind":1,"tags":[["-"],["askdasds"],["t","spam"],["t","nada"]],"content":"ggsgsgsg","sig":"43431f4cf8bd015305c2d484841e5730d261beeb375a86c57a61df3d26e820ce8d6712d2a3c89e3f2298597f14abf58079954e9e658ba59bfc2d7ce6384f25c7"}`, + "z", + []string{"nothing", "nada"}, + false, + }, + } { + var evt nostr.Event + easyjson.Unmarshal([]byte(tc.json), &evt) + bin := make([]byte, Measure(evt)) + Marshal(evt, bin) + + if res := TagMatches(bin, tc.tagKey, tc.tagValues); res != tc.matches { + t.Fatalf("matched incorrectly: %v=>%v over %s was %v, expected %v", tc.tagKey, tc.tagValues, tc.json, res, tc.matches) + } + } +} diff --git a/eventstore/mmm/count.go b/eventstore/mmm/count.go new file mode 100644 index 0000000..7567a58 --- /dev/null +++ b/eventstore/mmm/count.go @@ -0,0 +1,91 @@ +package mmm + +import ( + "bytes" + "context" + "encoding/binary" + "slices" + + "github.com/PowerDNS/lmdb-go/lmdb" + "github.com/fiatjaf/eventstore/mmm/betterbinary" + "github.com/nbd-wtf/go-nostr" +) + +func (il *IndexingLayer) CountEvents(ctx context.Context, filter nostr.Filter) (int64, error) { + var count int64 = 0 + + queries, extraAuthors, extraKinds, extraTagKey, extraTagValues, since, err := il.prepareQueries(filter) + if err != nil { + return 0, err + } + + err = il.lmdbEnv.View(func(txn *lmdb.Txn) error { + // actually iterate + for _, q := range queries { + cursor, err := txn.OpenCursor(q.dbi) + if err != nil { + continue + } + + it := &iterator{cursor: cursor} + it.seek(q.startingPoint) + + for { + // we already have a k and a v and an err from the cursor setup, so check and use these + if it.err != nil || + len(it.key) != q.keySize || + !bytes.HasPrefix(it.key, q.prefix) { + // either iteration has errored or we reached the end of this prefix + break // stop this cursor and move to the next one + } + + // "id" indexes don't contain a timestamp + if q.timestampSize == 4 { + createdAt := binary.BigEndian.Uint32(it.key[len(it.key)-4:]) + if createdAt < since { + break + } + } + + if extraAuthors == nil && extraKinds == nil && extraTagValues == nil { + count++ + } else { + // fetch actual event + pos := positionFromBytes(it.posb) + bin := il.mmmm.mmapf[pos.start : pos.start+uint64(pos.size)] + + // check it against pubkeys without decoding the entire thing + if extraAuthors != nil && !slices.Contains(extraAuthors, [32]byte(bin[39:71])) { + it.next() + continue + } + + // check it against kinds without decoding the entire thing + if extraKinds != nil && !slices.Contains(extraKinds, [2]byte(bin[1:3])) { + it.next() + continue + } + + // decode the entire thing (TODO: do a conditional decode while also checking the extra tag) + event := &nostr.Event{} + if err := betterbinary.Unmarshal(bin, event); err != nil { + it.next() + continue + } + + // if there is still a tag to be checked, do it now + if !event.Tags.ContainsAny(extraTagKey, extraTagValues) { + it.next() + continue + } + + count++ + } + } + } + + return nil + }) + + return count, err +} diff --git a/eventstore/mmm/delete.go b/eventstore/mmm/delete.go new file mode 100644 index 0000000..2f08c2c --- /dev/null +++ b/eventstore/mmm/delete.go @@ -0,0 +1,78 @@ +package mmm + +import ( + "context" + "encoding/binary" + "encoding/hex" + "fmt" + "slices" + + "github.com/PowerDNS/lmdb-go/lmdb" + "github.com/nbd-wtf/go-nostr" +) + +func (il *IndexingLayer) DeleteEvent(ctx context.Context, evt *nostr.Event) error { + return il.mmmm.lmdbEnv.Update(func(mmmtxn *lmdb.Txn) error { + return il.lmdbEnv.Update(func(iltxn *lmdb.Txn) error { + return il.delete(mmmtxn, iltxn, evt) + }) + }) +} + +func (il *IndexingLayer) delete(mmmtxn *lmdb.Txn, iltxn *lmdb.Txn, evt *nostr.Event) error { + zeroRefs := false + b := il.mmmm + + b.Logger.Debug().Str("layer", il.name).Uint16("il", il.id).Msg("deleting") + + // first in the mmmm txn we check if we have the event still + idPrefix8, _ := hex.DecodeString(evt.ID[0 : 8*2]) + val, err := mmmtxn.Get(b.indexId, idPrefix8) + if err != nil { + if lmdb.IsNotFound(err) { + // we already do not have this anywhere + return nil + } + return fmt.Errorf("failed to check if we have the event %x: %w", idPrefix8, err) + } + + // we have this, but do we have it in the current layer? + // val is [posb][il_idx][il_idx...] + pos := positionFromBytes(val[0:12]) + + // check references + currentLayer := binary.BigEndian.AppendUint16(nil, il.id) + for i := 12; i < len(val); i += 2 { + if slices.Equal(val[i:i+2], currentLayer) { + // we will remove the current layer if it's found + nextval := make([]byte, len(val)-2) + copy(nextval, val[0:i]) + copy(nextval[i:], val[i+2:]) + + if err := mmmtxn.Put(b.indexId, idPrefix8, nextval, 0); err != nil { + return fmt.Errorf("failed to update references for %x: %w", idPrefix8, err) + } + + // if there are no more layers we will delete everything later + zeroRefs = len(nextval) == 12 + + break + } + } + + // calculate all index keys we have for this event and delete them + for k := range il.getIndexKeysForEvent(evt) { + if err := iltxn.Del(k.dbi, k.key, val[0:12]); err != nil && !lmdb.IsNotFound(err) { + return fmt.Errorf("index entry %v/%x deletion failed: %w", k.dbi, k.key, err) + } + } + + // if there are no more refs we delete the event from the id index and mmap + if zeroRefs { + if err := b.purge(mmmtxn, idPrefix8, pos); err != nil { + panic(err) + } + } + + return nil +} diff --git a/eventstore/mmm/freeranges.go b/eventstore/mmm/freeranges.go new file mode 100644 index 0000000..fc3a734 --- /dev/null +++ b/eventstore/mmm/freeranges.go @@ -0,0 +1,68 @@ +package mmm + +import ( + "fmt" + "slices" + + "github.com/PowerDNS/lmdb-go/lmdb" +) + +func (b *MultiMmapManager) mergeNewFreeRange(pos position) (isAtEnd bool) { + // before adding check if we can merge this with some other range + // (to merge means to delete the previous and add a new one) + toDelete := make([]int, 0, 2) + for f, fr := range b.freeRanges { + if pos.start+uint64(pos.size) == fr.start { + // [new_pos_to_be_freed][existing_fr] -> merge! + toDelete = append(toDelete, f) + pos.size = pos.size + fr.size + } else if fr.start+uint64(fr.size) == pos.start { + // [existing_fr][new_pos_to_be_freed] -> merge! + toDelete = append(toDelete, f) + pos.start = fr.start + pos.size = fr.size + pos.size + } + } + slices.SortFunc(toDelete, func(a, b int) int { return b - a }) + for _, idx := range toDelete { + b.freeRanges = slices.Delete(b.freeRanges, idx, idx+1) + } + + // when we're at the end of a file we just delete everything and don't add new free ranges + // the caller will truncate the mmap file and adjust the position accordingly + if pos.start+uint64(pos.size) == b.mmapfEnd { + return true + } + + b.addNewFreeRange(pos) + return false +} + +func (b *MultiMmapManager) addNewFreeRange(pos position) { + // update freeranges slice in memory + idx, _ := slices.BinarySearchFunc(b.freeRanges, pos, func(item, target position) int { + if item.size > target.size { + return 1 + } else if target.size > item.size { + return -1 + } else if item.start > target.start { + return 1 + } else { + return -1 + } + }) + b.freeRanges = slices.Insert(b.freeRanges, idx, pos) +} + +func (b *MultiMmapManager) saveFreeRanges(txn *lmdb.Txn) error { + // save to database + valReserve, err := txn.PutReserve(b.stuff, FREERANGES_KEY, len(b.freeRanges)*12, 0) + if err != nil { + return fmt.Errorf("on put freeranges: %w", err) + } + for f, fr := range b.freeRanges { + bytesFromPosition(valReserve[f*12:], fr) + } + + return nil +} diff --git a/eventstore/mmm/fuzz_test.go b/eventstore/mmm/fuzz_test.go new file mode 100644 index 0000000..6fa501e --- /dev/null +++ b/eventstore/mmm/fuzz_test.go @@ -0,0 +1,191 @@ +package mmm + +import ( + "context" + "fmt" + "math/rand/v2" + "os" + "slices" + "testing" + + "github.com/nbd-wtf/go-nostr" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" +) + +func FuzzTest(f *testing.F) { + f.Add(0, uint(84), uint(10), uint(5)) + f.Fuzz(func(t *testing.T, seed int, nlayers, nevents, ndeletes uint) { + nlayers = nlayers%23 + 1 + nevents = nevents%10000 + 1 + ndeletes = ndeletes % nevents + + // create a temporary directory for the test + tmpDir, err := os.MkdirTemp("", "mmm-test-*") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + logger := zerolog.Nop() + rnd := rand.New(rand.NewPCG(uint64(seed), 0)) + + // initialize MMM + mmm := &MultiMmapManager{ + Dir: tmpDir, + Logger: &logger, + } + + err = mmm.Init() + require.NoError(t, err) + defer mmm.Close() + + for i := range nlayers { + name := string([]byte{97 + byte(i)}) + err = mmm.EnsureLayer(name, &IndexingLayer{ + MaxLimit: 1000, + }) + require.NoError(t, err, "layer %s/%d", name, i) + } + + // create test events + ctx := context.Background() + sk := "945e01e37662430162121b804d3645a86d97df9d256917d86735d0eb219393eb" + storedIds := make([]string, nevents) + nTags := make(map[string]int) + storedByLayer := make(map[string][]string) + + // create n events with random combinations of tags + for i := 0; i < int(nevents); i++ { + tags := nostr.Tags{} + // randomly add 1-nlayers tags + numTags := 1 + (i % int(nlayers)) + usedTags := make(map[string]bool) + + for j := 0; j < numTags; j++ { + tag := string([]byte{97 + byte(i%int(nlayers))}) + if !usedTags[tag] { + tags = append(tags, nostr.Tag{"t", tag}) + usedTags[tag] = true + } + } + + evt := &nostr.Event{ + CreatedAt: nostr.Timestamp(i), + Kind: i, // hack to query by serial id + Tags: tags, + Content: fmt.Sprintf("test content %d", i), + } + evt.Sign(sk) + + for _, layer := range mmm.layers { + if evt.Tags.FindWithValue("t", layer.name) != nil { + err := layer.SaveEvent(ctx, evt) + require.NoError(t, err) + storedByLayer[layer.name] = append(storedByLayer[layer.name], evt.ID) + } + } + + storedIds = append(storedIds, evt.ID) + nTags[evt.ID] = len(evt.Tags) + } + + // verify each layer has the correct events + for _, layer := range mmm.layers { + results, err := layer.QueryEvents(ctx, nostr.Filter{}) + require.NoError(t, err) + + count := 0 + for evt := range results { + require.True(t, evt.Tags.ContainsAny("t", []string{layer.name})) + count++ + } + require.Equal(t, count, len(storedByLayer[layer.name])) + } + + // randomly select n events to delete from random layers + deleted := make(map[string][]*IndexingLayer) + + for range ndeletes { + id := storedIds[rnd.Int()%len(storedIds)] + layer := mmm.layers[rnd.Int()%len(mmm.layers)] + + evt, layers := mmm.GetByID(id) + + if slices.Contains(deleted[id], layer) { + // already deleted from this layer + require.NotContains(t, layers, layer) + } else if evt != nil && evt.Tags.FindWithValue("t", layer.name) != nil { + require.Contains(t, layers, layer) + + // delete now + layer.DeleteEvent(ctx, evt) + deleted[id] = append(deleted[id], layer) + } else { + // was never saved to this in the first place + require.NotContains(t, layers, layer) + } + } + + for id, deletedlayers := range deleted { + evt, foundlayers := mmm.GetByID(id) + + for _, layer := range deletedlayers { + require.NotContains(t, foundlayers, layer) + } + for _, layer := range foundlayers { + require.NotNil(t, evt.Tags.FindWithValue("t", layer.name)) + } + + if nTags[id] == len(deletedlayers) && evt != nil { + deletedlayersnames := make([]string, len(deletedlayers)) + for i, layer := range deletedlayers { + deletedlayersnames[i] = layer.name + } + + t.Fatalf("id %s has %d tags %v, should have been deleted from %v, but wasn't: %s", + id, nTags[id], evt.Tags, deletedlayersnames, evt) + } else if nTags[id] > len(deletedlayers) { + t.Fatalf("id %s should still be available as it had %d tags and was only deleted from %v, but isn't", + id, nTags[id], deletedlayers) + } + + if evt != nil { + for _, layer := range mmm.layers { + // verify event still accessible from other layers + if slices.Contains(foundlayers, layer) { + ch, err := layer.QueryEvents(ctx, nostr.Filter{Kinds: []int{evt.Kind}}) // hack + require.NoError(t, err) + fetched := <-ch + require.NotNil(t, fetched) + } else { + // and not accessible from this layer we just deleted + ch, err := layer.QueryEvents(ctx, nostr.Filter{Kinds: []int{evt.Kind}}) // hack + require.NoError(t, err) + fetched := <-ch + require.Nil(t, fetched) + } + } + } + } + + // now delete a layer and events that only exist in that layer should vanish + layer := mmm.layers[rnd.Int()%len(mmm.layers)] + ch, err := layer.QueryEvents(ctx, nostr.Filter{}) + require.NoError(t, err) + + eventsThatShouldVanish := make([]string, 0, nevents/2) + for evt := range ch { + if len(evt.Tags) == 1+len(deleted[evt.ID]) { + eventsThatShouldVanish = append(eventsThatShouldVanish, evt.ID) + } + } + + err = mmm.DropLayer(layer.name) + require.NoError(t, err) + + for _, id := range eventsThatShouldVanish { + v, ils := mmm.GetByID(id) + require.Nil(t, v) + require.Empty(t, ils) + } + }) +} diff --git a/eventstore/mmm/helpers.go b/eventstore/mmm/helpers.go new file mode 100644 index 0000000..04545d7 --- /dev/null +++ b/eventstore/mmm/helpers.go @@ -0,0 +1,165 @@ +package mmm + +import ( + "encoding/binary" + "encoding/hex" + "iter" + "slices" + "strconv" + "strings" + + "github.com/PowerDNS/lmdb-go/lmdb" + "github.com/nbd-wtf/go-nostr" +) + +// this iterator always goes backwards +type iterator struct { + cursor *lmdb.Cursor + key []byte + posb []byte + err error +} + +func (it *iterator) seek(key []byte) { + if _, _, errsr := it.cursor.Get(key, nil, lmdb.SetRange); errsr != nil { + if operr, ok := errsr.(*lmdb.OpError); !ok || operr.Errno != lmdb.NotFound { + // in this case it's really an error + panic(operr) + } else { + // we're at the end and we just want notes before this, + // so we just need to set the cursor the last key, this is not a real error + it.key, it.posb, it.err = it.cursor.Get(nil, nil, lmdb.Last) + } + } else { + // move one back as the first step + it.key, it.posb, it.err = it.cursor.Get(nil, nil, lmdb.Prev) + } +} + +func (it *iterator) next() { + // move one back (we'll look into k and v and err in the next iteration) + it.key, it.posb, it.err = it.cursor.Get(nil, nil, lmdb.Prev) +} + +type key struct { + dbi lmdb.DBI + key []byte +} + +func (il *IndexingLayer) getIndexKeysForEvent(evt *nostr.Event) iter.Seq[key] { + return func(yield func(key) bool) { + { + // ~ by pubkey+date + k := make([]byte, 8+4) + hex.Decode(k[0:8], []byte(evt.PubKey[0:8*2])) + binary.BigEndian.PutUint32(k[8:8+4], uint32(evt.CreatedAt)) + if !yield(key{dbi: il.indexPubkey, key: k[0 : 8+4]}) { + return + } + } + + { + // ~ by kind+date + k := make([]byte, 2+4) + binary.BigEndian.PutUint16(k[0:2], uint16(evt.Kind)) + binary.BigEndian.PutUint32(k[2:2+4], uint32(evt.CreatedAt)) + if !yield(key{dbi: il.indexKind, key: k[0 : 2+4]}) { + return + } + } + + { + // ~ by pubkey+kind+date + k := make([]byte, 8+2+4) + hex.Decode(k[0:8], []byte(evt.PubKey[0:8*2])) + binary.BigEndian.PutUint16(k[8:8+2], uint16(evt.Kind)) + binary.BigEndian.PutUint32(k[8+2:8+2+4], uint32(evt.CreatedAt)) + if !yield(key{dbi: il.indexPubkeyKind, key: k[0 : 8+2+4]}) { + return + } + } + + // ~ by tagvalue+date + // ~ by p-tag+kind+date + for i, tag := range evt.Tags { + if len(tag) < 2 || len(tag[0]) != 1 || len(tag[1]) == 0 || len(tag[1]) > 100 { + // not indexable + continue + } + firstIndex := slices.IndexFunc(evt.Tags, func(t nostr.Tag) bool { return len(t) >= 2 && t[1] == tag[1] }) + if firstIndex != i { + // duplicate + continue + } + + // get key prefix (with full length) and offset where to write the created_at + dbi, k, offset := il.getTagIndexPrefix(tag[1]) + binary.BigEndian.PutUint32(k[offset:], uint32(evt.CreatedAt)) + if !yield(key{dbi: dbi, key: k}) { + return + } + + // now the p-tag+kind+date + if dbi == il.indexTag32 && tag[0] == "p" { + k := make([]byte, 8+2+4) + hex.Decode(k[0:8], []byte(tag[1][0:8*2])) + binary.BigEndian.PutUint16(k[8:8+2], uint16(evt.Kind)) + binary.BigEndian.PutUint32(k[8+2:8+2+4], uint32(evt.CreatedAt)) + dbi := il.indexPTagKind + if !yield(key{dbi: dbi, key: k[0 : 8+2+4]}) { + return + } + } + } + + { + // ~ by date only + k := make([]byte, 4) + binary.BigEndian.PutUint32(k[0:4], uint32(evt.CreatedAt)) + if !yield(key{dbi: il.indexCreatedAt, key: k[0:4]}) { + return + } + } + } +} + +func (il *IndexingLayer) getTagIndexPrefix(tagValue string) (lmdb.DBI, []byte, int) { + var k []byte // the key with full length for created_at and idx at the end, but not filled with these + var offset int // the offset -- i.e. where the prefix ends and the created_at and idx would start + var dbi lmdb.DBI + + // if it's 32 bytes as hex, save it as bytes + if len(tagValue) == 64 { + // but we actually only use the first 8 bytes + k = make([]byte, 8+4) + if _, err := hex.Decode(k[0:8], []byte(tagValue[0:8*2])); err == nil { + offset = 8 + dbi = il.indexTag32 + return dbi, k[0 : 8+4], offset + } + } + + // if it looks like an "a" tag, index it in this special format + spl := strings.Split(tagValue, ":") + if len(spl) == 3 && len(spl[1]) == 64 { + k = make([]byte, 2+8+30) + if _, err := hex.Decode(k[2:2+8], []byte(tagValue[0:8*2])); err == nil { + if kind, err := strconv.ParseUint(spl[0], 10, 16); err == nil { + k[0] = byte(kind >> 8) + k[1] = byte(kind) + // limit "d" identifier to 30 bytes (so we don't have to grow our byte slice) + n := copy(k[2+8:2+8+30], spl[2]) + offset = 2 + 8 + n + return dbi, k[0 : offset+4], offset + } + } + } + + // index whatever else as utf-8, but limit it to 40 bytes + k = make([]byte, 40+4) + n := copy(k[0:40], tagValue) + offset = n + dbi = il.indexTag + + return dbi, k[0 : n+4], offset +} diff --git a/eventstore/mmm/indexinglayer.go b/eventstore/mmm/indexinglayer.go new file mode 100644 index 0000000..ac74e2e --- /dev/null +++ b/eventstore/mmm/indexinglayer.go @@ -0,0 +1,200 @@ +package mmm + +import ( + "context" + "encoding/binary" + "fmt" + "os" + "path/filepath" + + "github.com/PowerDNS/lmdb-go/lmdb" + "github.com/fiatjaf/eventstore" + "github.com/nbd-wtf/go-nostr" +) + +var _ eventstore.Store = (*IndexingLayer)(nil) + +type IndexingLayer struct { + isInitialized bool + name string + + ShouldIndex func(context.Context, *nostr.Event) bool + MaxLimit int + + mmmm *MultiMmapManager + + // this is stored in the knownLayers db as a value, and used to keep track of which layer owns each event + id uint16 + + lmdbEnv *lmdb.Env + + indexCreatedAt lmdb.DBI + indexKind lmdb.DBI + indexPubkey lmdb.DBI + indexPubkeyKind lmdb.DBI + indexTag lmdb.DBI + indexTag32 lmdb.DBI + indexTagAddr lmdb.DBI + indexPTagKind lmdb.DBI +} + +type IndexingLayers []*IndexingLayer + +func (ils IndexingLayers) ByID(ilid uint16) *IndexingLayer { + for _, il := range ils { + if il.id == ilid { + return il + } + } + return nil +} + +const multiIndexCreationFlags uint = lmdb.Create | lmdb.DupSort + +func (il *IndexingLayer) Init() error { + if il.isInitialized { + return nil + } + il.isInitialized = true + + path := filepath.Join(il.mmmm.Dir, il.name) + + if il.MaxLimit == 0 { + il.MaxLimit = 500 + } + + // open lmdb + env, err := lmdb.NewEnv() + if err != nil { + return err + } + + env.SetMaxDBs(8) + env.SetMaxReaders(1000) + env.SetMapSize(1 << 38) // ~273GB + + // create directory if it doesn't exist and open it + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + + err = env.Open(path, lmdb.NoTLS, 0644) + if err != nil { + return err + } + il.lmdbEnv = env + + // open each db + if err := il.lmdbEnv.Update(func(txn *lmdb.Txn) error { + if dbi, err := txn.OpenDBI("created_at", multiIndexCreationFlags); err != nil { + return err + } else { + il.indexCreatedAt = dbi + } + if dbi, err := txn.OpenDBI("kind", multiIndexCreationFlags); err != nil { + return err + } else { + il.indexKind = dbi + } + if dbi, err := txn.OpenDBI("pubkey", multiIndexCreationFlags); err != nil { + return err + } else { + il.indexPubkey = dbi + } + if dbi, err := txn.OpenDBI("pubkeyKind", multiIndexCreationFlags); err != nil { + return err + } else { + il.indexPubkeyKind = dbi + } + if dbi, err := txn.OpenDBI("tag", multiIndexCreationFlags); err != nil { + return err + } else { + il.indexTag = dbi + } + if dbi, err := txn.OpenDBI("tag32", multiIndexCreationFlags); err != nil { + return err + } else { + il.indexTag32 = dbi + } + if dbi, err := txn.OpenDBI("tagaddr", multiIndexCreationFlags); err != nil { + return err + } else { + il.indexTagAddr = dbi + } + if dbi, err := txn.OpenDBI("ptagKind", multiIndexCreationFlags); err != nil { + return err + } else { + il.indexPTagKind = dbi + } + return nil + }); err != nil { + return err + } + + return nil +} + +func (il *IndexingLayer) Name() string { return il.name } + +func (il *IndexingLayer) runThroughEvents(txn *lmdb.Txn) error { + ctx := context.Background() + b := il.mmmm + + // run through all events we have and see if this new index wants them + cursor, err := txn.OpenCursor(b.indexId) + if err != nil { + return fmt.Errorf("when opening cursor on %v: %w", b.indexId, err) + } + defer cursor.Close() + + for { + idPrefix8, val, err := cursor.Get(nil, nil, lmdb.Next) + if lmdb.IsNotFound(err) { + break + } + if err != nil { + return fmt.Errorf("when moving the cursor: %w", err) + } + + update := false + + posb := val[0:12] + pos := positionFromBytes(posb) + evt := &nostr.Event{} + if err := b.loadEvent(pos, evt); err != nil { + return fmt.Errorf("when loading event from mmap: %w", err) + } + + if il.ShouldIndex != nil && il.ShouldIndex(ctx, evt) { + // add the current reference + val = binary.BigEndian.AppendUint16(val, il.id) + + // if we were already updating to remove the reference + // now that we've added the reference back we don't really have to update + update = !update + + // actually index + if err := il.lmdbEnv.Update(func(iltxn *lmdb.Txn) error { + for k := range il.getIndexKeysForEvent(evt) { + if err := iltxn.Put(k.dbi, k.key, posb, 0); err != nil { + return err + } + } + return nil + }); err != nil { + return fmt.Errorf("failed to index: %w", err) + } + } + + if update { + if err := txn.Put(b.indexId, idPrefix8, val, 0); err != nil { + return fmt.Errorf("failed to put updated index+refs: %w", err) + } + } + } + return nil +} + +func (il *IndexingLayer) Close() { + il.lmdbEnv.Close() +} diff --git a/eventstore/mmm/mmmm.go b/eventstore/mmm/mmmm.go new file mode 100644 index 0000000..b271221 --- /dev/null +++ b/eventstore/mmm/mmmm.go @@ -0,0 +1,335 @@ +package mmm + +import ( + "encoding/binary" + "fmt" + "os" + "path/filepath" + "slices" + "sync" + "syscall" + "unsafe" + + "github.com/PowerDNS/lmdb-go/lmdb" + "github.com/fiatjaf/eventstore/mmm/betterbinary" + "github.com/nbd-wtf/go-nostr" + "github.com/rs/zerolog" +) + +type mmap []byte + +func (_ mmap) String() string { return "" } + +type MultiMmapManager struct { + Dir string + Logger *zerolog.Logger + + layers IndexingLayers + + mmapfPath string + mmapf mmap + mmapfEnd uint64 + + lmdbEnv *lmdb.Env + stuff lmdb.DBI + knownLayers lmdb.DBI + indexId lmdb.DBI + + freeRanges []position + + mutex sync.Mutex +} + +func (b *MultiMmapManager) String() string { + return fmt.Sprintf("", b.Dir, len(b.layers), unsafe.Pointer(b)) +} + +const ( + MMAP_INFINITE_SIZE = 1 << 40 + maxuint16 = 65535 + maxuint32 = 4294967295 +) + +var FREERANGES_KEY = []byte{'F'} + +func (b *MultiMmapManager) Init() error { + // create directory if it doesn't exist + dbpath := filepath.Join(b.Dir, "mmmm") + if err := os.MkdirAll(dbpath, 0755); err != nil { + return fmt.Errorf("failed to create directory %s: %w", dbpath, err) + } + + // open a huge mmapped file + b.mmapfPath = filepath.Join(b.Dir, "events") + file, err := os.OpenFile(b.mmapfPath, os.O_CREATE|os.O_RDWR, 0644) + if err != nil { + return fmt.Errorf("failed to open events file at %s: %w", b.mmapfPath, err) + } + mmapf, err := syscall.Mmap(int(file.Fd()), 0, MMAP_INFINITE_SIZE, + syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED) + if err != nil { + return fmt.Errorf("failed to mmap events file at %s: %w", b.mmapfPath, err) + } + b.mmapf = mmapf + + if stat, err := os.Stat(b.mmapfPath); err != nil { + return err + } else { + b.mmapfEnd = uint64(stat.Size()) + } + + // open lmdb + env, err := lmdb.NewEnv() + if err != nil { + return err + } + + env.SetMaxDBs(3) + env.SetMaxReaders(1000) + env.SetMapSize(1 << 38) // ~273GB + + err = env.Open(dbpath, lmdb.NoTLS, 0644) + if err != nil { + return fmt.Errorf("failed to open lmdb at %s: %w", dbpath, err) + } + b.lmdbEnv = env + + if err := b.lmdbEnv.Update(func(txn *lmdb.Txn) error { + if dbi, err := txn.OpenDBI("stuff", lmdb.Create); err != nil { + return err + } else { + b.stuff = dbi + } + + // this just keeps track of all the layers we know (just their names) + // they will be instantiated by the application after their name is read from the database. + // new layers created at runtime will be saved here. + if dbi, err := txn.OpenDBI("layers", lmdb.Create); err != nil { + return err + } else { + b.knownLayers = dbi + } + + // this is a global index of events by id that also keeps references + // to all the layers that may be indexing them -- such that whenever + // an event is deleted from all layers it can be deleted from global + if dbi, err := txn.OpenDBI("id-references", lmdb.Create); err != nil { + return err + } else { + b.indexId = dbi + } + + // load all free ranges into memory + { + data, err := txn.Get(b.stuff, FREERANGES_KEY) + if err != nil && !lmdb.IsNotFound(err) { + return fmt.Errorf("on freeranges: %w", err) + } + b.freeRanges = make([]position, len(data)/12) + logOp := b.Logger.Debug() + for f := range b.freeRanges { + pos := positionFromBytes(data[f*12 : (f+1)*12]) + b.freeRanges[f] = pos + if pos.size > 20 { + logOp = logOp.Uint32(fmt.Sprintf("%d", pos.start), pos.size) + } + } + slices.SortFunc(b.freeRanges, func(a, b position) int { return int(a.size - b.size) }) + logOp.Msg("loaded free ranges") + } + + return nil + }); err != nil { + return fmt.Errorf("failed to open and load db data: %w", err) + } + + return nil +} + +func (b *MultiMmapManager) EnsureLayer(name string, il *IndexingLayer) error { + b.mutex.Lock() + defer b.mutex.Unlock() + + il.mmmm = b + il.name = name + + err := b.lmdbEnv.Update(func(txn *lmdb.Txn) error { + txn.RawRead = true + + nameb := []byte(name) + if idv, err := txn.Get(b.knownLayers, nameb); lmdb.IsNotFound(err) { + if id, err := b.getNextAvailableLayerId(txn); err != nil { + return fmt.Errorf("failed to reserve a layer id for %s: %w", name, err) + } else { + il.id = id + } + + if err := il.Init(); err != nil { + return fmt.Errorf("failed to init new layer %s: %w", name, err) + } + + if err := il.runThroughEvents(txn); err != nil { + return fmt.Errorf("failed to run %s through events: %w", name, err) + } + return txn.Put(b.knownLayers, []byte(name), binary.BigEndian.AppendUint16(nil, il.id), 0) + } else if err == nil { + il.id = binary.BigEndian.Uint16(idv) + + if err := il.Init(); err != nil { + return fmt.Errorf("failed to init old layer %s: %w", name, err) + } + + return nil + } else { + return err + } + }) + if err != nil { + return err + } + + b.layers = append(b.layers, il) + return nil +} + +func (b *MultiMmapManager) DropLayer(name string) error { + b.mutex.Lock() + defer b.mutex.Unlock() + + // get layer reference + idx := slices.IndexFunc(b.layers, func(il *IndexingLayer) bool { return il.name == name }) + if idx == -1 { + return fmt.Errorf("layer '%s' doesn't exist", name) + } + il := b.layers[idx] + + // remove layer references + err := b.lmdbEnv.Update(func(txn *lmdb.Txn) error { + if err := b.removeAllReferencesFromLayer(txn, il.id); err != nil { + return err + } + + return txn.Del(b.knownLayers, []byte(il.name), nil) + }) + if err != nil { + return err + } + + // delete everything (the indexes) from this layer db actually + err = il.lmdbEnv.Update(func(txn *lmdb.Txn) error { + for _, dbi := range []lmdb.DBI{ + il.indexCreatedAt, + il.indexKind, + il.indexPubkey, + il.indexPubkeyKind, + il.indexTag, + il.indexTag32, + il.indexTagAddr, + il.indexPTagKind, + } { + if err := txn.Drop(dbi, true); err != nil { + return err + } + } + return nil + }) + if err != nil { + return err + } + + return il.lmdbEnv.Close() +} + +func (b *MultiMmapManager) removeAllReferencesFromLayer(txn *lmdb.Txn, layerId uint16) error { + cursor, err := txn.OpenCursor(b.indexId) + if err != nil { + return fmt.Errorf("when opening cursor on %v: %w", b.indexId, err) + } + defer cursor.Close() + + for { + idPrefix8, val, err := cursor.Get(nil, nil, lmdb.Next) + if lmdb.IsNotFound(err) { + break + } + if err != nil { + return fmt.Errorf("when moving the cursor: %w", err) + } + + var zeroRefs bool + var update bool + + needle := binary.BigEndian.AppendUint16(nil, layerId) + for s := 12; s < len(val); s += 2 { + if slices.Equal(val[s:s+2], needle) { + // swap delete + copy(val[s:s+2], val[len(val)-2:]) + val = val[0 : len(val)-2] + + update = true + + // we must erase this event if its references reach zero + zeroRefs = len(val) == 12 + + break + } + } + + if zeroRefs { + posb := val[0:12] + pos := positionFromBytes(posb) + + if err := b.purge(txn, idPrefix8, pos); err != nil { + return fmt.Errorf("failed to purge unreferenced event %x: %w", idPrefix8, err) + } + } else if update { + if err := txn.Put(b.indexId, idPrefix8, val, 0); err != nil { + return fmt.Errorf("failed to put updated index+refs: %w", err) + } + } + } + + return nil +} + +func (b *MultiMmapManager) loadEvent(pos position, eventReceiver *nostr.Event) error { + return betterbinary.Unmarshal(b.mmapf[pos.start:pos.start+uint64(pos.size)], eventReceiver) +} + +// getNextAvailableLayerId iterates through all existing layers to find a vacant id +func (b *MultiMmapManager) getNextAvailableLayerId(txn *lmdb.Txn) (uint16, error) { + cursor, err := txn.OpenCursor(b.knownLayers) + if err != nil { + return 0, fmt.Errorf("failed to open cursor: %w", err) + } + + used := [1 << 16]bool{} + _, val, err := cursor.Get(nil, nil, lmdb.First) + for err == nil { + // something was found + used[binary.BigEndian.Uint16(val)] = true + // next + _, val, err = cursor.Get(nil, nil, lmdb.Next) + } + if !lmdb.IsNotFound(err) { + // a real error + return 0, err + } + + // loop exited, get the first available + var id uint16 + for num, isUsed := range used { + if !isUsed { + id = uint16(num) + break + } + } + return id, nil +} + +func (b *MultiMmapManager) Close() { + b.lmdbEnv.Close() + for _, il := range b.layers { + il.Close() + } +} diff --git a/eventstore/mmm/mmmm_test.go b/eventstore/mmm/mmmm_test.go new file mode 100644 index 0000000..57e966d --- /dev/null +++ b/eventstore/mmm/mmmm_test.go @@ -0,0 +1,386 @@ +package mmm + +import ( + "context" + "encoding/binary" + "encoding/hex" + "fmt" + "os" + "testing" + + "github.com/PowerDNS/lmdb-go/lmdb" + "github.com/nbd-wtf/go-nostr" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" +) + +func TestMultiLayerIndexing(t *testing.T) { + // Create a temporary directory for the test + tmpDir := "/tmp/eventstore-mmm-test" + os.RemoveAll(tmpDir) + + logger := zerolog.New(zerolog.ConsoleWriter{Out: os.Stderr}) + + // initialize MMM with three layers: + // 1. odd timestamps layer + // 2. even timestamps layer + // 3. all events layer + mmm := &MultiMmapManager{ + Dir: tmpDir, + Logger: &logger, + } + + err := mmm.Init() + require.NoError(t, err) + defer mmm.Close() + + // create layers + err = mmm.EnsureLayer("odd", &IndexingLayer{ + MaxLimit: 100, + ShouldIndex: func(ctx context.Context, evt *nostr.Event) bool { + return evt.CreatedAt%2 == 1 + }, + }) + require.NoError(t, err) + err = mmm.EnsureLayer("even", &IndexingLayer{ + MaxLimit: 100, + ShouldIndex: func(ctx context.Context, evt *nostr.Event) bool { + return evt.CreatedAt%2 == 0 + }, + }) + require.NoError(t, err) + err = mmm.EnsureLayer("all", &IndexingLayer{ + MaxLimit: 100, + ShouldIndex: func(ctx context.Context, evt *nostr.Event) bool { + return true + }, + }) + require.NoError(t, err) + + // create test events + ctx := context.Background() + baseTime := nostr.Timestamp(0) + sk := "945e01e37662430162121b804d3645a86d97df9d256917d86735d0eb219393eb" + events := make([]*nostr.Event, 10) + for i := 0; i < 10; i++ { + evt := &nostr.Event{ + CreatedAt: baseTime + nostr.Timestamp(i), + Kind: 1, + Tags: nostr.Tags{}, + Content: "test content", + } + evt.Sign(sk) + events[i] = evt + stored, err := mmm.StoreGlobal(ctx, evt) + require.NoError(t, err) + require.True(t, stored) + } + + { + // query odd layer + oddResults, err := mmm.layers[0].QueryEvents(ctx, nostr.Filter{ + Kinds: []int{1}, + }) + require.NoError(t, err) + + oddCount := 0 + for evt := range oddResults { + require.Equal(t, evt.CreatedAt%2, nostr.Timestamp(1)) + oddCount++ + } + require.Equal(t, 5, oddCount) + } + + { + // query even layer + evenResults, err := mmm.layers[1].QueryEvents(ctx, nostr.Filter{ + Kinds: []int{1}, + }) + require.NoError(t, err) + + evenCount := 0 + for evt := range evenResults { + require.Equal(t, evt.CreatedAt%2, nostr.Timestamp(0)) + evenCount++ + } + require.Equal(t, 5, evenCount) + } + + { + // query all layer + allResults, err := mmm.layers[2].QueryEvents(ctx, nostr.Filter{ + Kinds: []int{1}, + }) + require.NoError(t, err) + + allCount := 0 + for range allResults { + allCount++ + } + require.Equal(t, 10, allCount) + } + + // delete some events + err = mmm.layers[0].DeleteEvent(ctx, events[1]) // odd timestamp + require.NoError(t, err) + err = mmm.layers[1].DeleteEvent(ctx, events[2]) // even timestamp + + // verify deletions + { + oddResults, err := mmm.layers[0].QueryEvents(ctx, nostr.Filter{ + Kinds: []int{1}, + }) + require.NoError(t, err) + oddCount := 0 + for range oddResults { + oddCount++ + } + require.Equal(t, 4, oddCount) + } + + { + evenResults, err := mmm.layers[1].QueryEvents(ctx, nostr.Filter{ + Kinds: []int{1}, + }) + require.NoError(t, err) + evenCount := 0 + for range evenResults { + evenCount++ + } + require.Equal(t, 4, evenCount) + } + + { + allResults, err := mmm.layers[2].QueryEvents(ctx, nostr.Filter{ + Kinds: []int{1}, + }) + require.NoError(t, err) + allCount := 0 + for range allResults { + allCount++ + } + require.Equal(t, 10, allCount) + } + + // save events directly to layers regardless of timestamp + { + oddEvent := &nostr.Event{ + CreatedAt: baseTime + 100, // even timestamp + Kind: 1, + Content: "forced odd", + } + oddEvent.Sign(sk) + err = mmm.layers[0].SaveEvent(ctx, oddEvent) // save even timestamp to odd layer + require.NoError(t, err) + + // it is added to the odd il + oddResults, err := mmm.layers[0].QueryEvents(ctx, nostr.Filter{ + Kinds: []int{1}, + }) + require.NoError(t, err) + oddCount := 0 + for range oddResults { + oddCount++ + } + require.Equal(t, 5, oddCount) + + // it doesn't affect the event il + evenResults, err := mmm.layers[1].QueryEvents(ctx, nostr.Filter{ + Kinds: []int{1}, + }) + require.NoError(t, err) + evenCount := 0 + for range evenResults { + evenCount++ + } + require.Equal(t, 4, evenCount) + } + + // test replaceable events + for _, layer := range mmm.layers { + replaceable := &nostr.Event{ + CreatedAt: baseTime + 0, + Kind: 0, + Content: fmt.Sprintf("first"), + } + replaceable.Sign(sk) + err := layer.ReplaceEvent(ctx, replaceable) + require.NoError(t, err) + } + + // replace events alternating between layers + for i := range mmm.layers { + content := fmt.Sprintf("last %d", i) + + newEvt := &nostr.Event{ + CreatedAt: baseTime + 1000, + Kind: 0, + Content: content, + } + newEvt.Sign(sk) + + layer := mmm.layers[i] + err = layer.ReplaceEvent(ctx, newEvt) + require.NoError(t, err) + + // verify replacement in the layer that did it + results, err := layer.QueryEvents(ctx, nostr.Filter{ + Kinds: []int{0}, + }) + require.NoError(t, err) + + count := 0 + for evt := range results { + require.Equal(t, content, evt.Content) + count++ + } + require.Equal(t, 1, count) + + // verify other layers still have the old version + for j := 0; j < 3; j++ { + if mmm.layers[j] == layer { + continue + } + results, err := mmm.layers[j].QueryEvents(ctx, nostr.Filter{ + Kinds: []int{0}, + }) + require.NoError(t, err) + + count := 0 + for evt := range results { + if i < j { + require.Equal(t, "first", evt.Content) + } else { + require.Equal(t, evt.Content, fmt.Sprintf("last %d", j)) + } + count++ + } + + require.Equal(t, 1, count, "%d/%d", i, j) + } + } +} + +func TestLayerReferenceTracking(t *testing.T) { + // Create a temporary directory for the test + tmpDir, err := os.MkdirTemp("", "mmm-test-*") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + logger := zerolog.New(zerolog.ConsoleWriter{Out: os.Stderr}) + + // initialize MMM with three layers + mmm := &MultiMmapManager{ + Dir: tmpDir, + Logger: &logger, + } + + err = mmm.Init() + require.NoError(t, err) + defer mmm.Close() + + // create three layers + err = mmm.EnsureLayer("layer1", &IndexingLayer{ + MaxLimit: 100, + ShouldIndex: func(ctx context.Context, evt *nostr.Event) bool { return true }, + }) + require.NoError(t, err) + err = mmm.EnsureLayer("layer2", &IndexingLayer{ + MaxLimit: 100, + ShouldIndex: func(ctx context.Context, evt *nostr.Event) bool { return true }, + }) + require.NoError(t, err) + err = mmm.EnsureLayer("layer3", &IndexingLayer{ + MaxLimit: 100, + ShouldIndex: func(ctx context.Context, evt *nostr.Event) bool { return true }, + }) + require.NoError(t, err) + err = mmm.EnsureLayer("layer4", &IndexingLayer{ + MaxLimit: 100, + ShouldIndex: func(ctx context.Context, evt *nostr.Event) bool { return true }, + }) + require.NoError(t, err) + + // create test events + ctx := context.Background() + sk := "945e01e37662430162121b804d3645a86d97df9d256917d86735d0eb219393eb" + evt1 := &nostr.Event{ + CreatedAt: 1000, + Kind: 1, + Tags: nostr.Tags{}, + Content: "event 1", + } + evt1.Sign(sk) + + evt2 := &nostr.Event{ + CreatedAt: 2000, + Kind: 1, + Tags: nostr.Tags{}, + Content: "event 2", + } + evt2.Sign(sk) + + // save evt1 to layer1 + err = mmm.layers[0].SaveEvent(ctx, evt1) + require.NoError(t, err) + + // save evt1 to layer2 + err = mmm.layers[1].SaveEvent(ctx, evt1) + require.NoError(t, err) + + // save evt1 to layer4 + err = mmm.layers[0].SaveEvent(ctx, evt1) + require.NoError(t, err) + + // delete evt1 from layer1 + err = mmm.layers[0].DeleteEvent(ctx, evt1) + require.NoError(t, err) + + // save evt2 to layer3 + err = mmm.layers[2].SaveEvent(ctx, evt2) + require.NoError(t, err) + + // save evt2 to layer4 + err = mmm.layers[3].SaveEvent(ctx, evt2) + require.NoError(t, err) + + // save evt2 to layer3 again + err = mmm.layers[2].SaveEvent(ctx, evt2) + require.NoError(t, err) + + // delete evt1 from layer4 + err = mmm.layers[3].DeleteEvent(ctx, evt1) + require.NoError(t, err) + + // verify the state of the indexId database + err = mmm.lmdbEnv.View(func(txn *lmdb.Txn) error { + cursor, err := txn.OpenCursor(mmm.indexId) + if err != nil { + return err + } + defer cursor.Close() + + count := 0 + for k, v, err := cursor.Get(nil, nil, lmdb.First); err == nil; k, v, err = cursor.Get(nil, nil, lmdb.Next) { + count++ + if hex.EncodeToString(k) == evt1.ID[:16] { + // evt1 should only reference layer2 + require.Equal(t, 14, len(v), "evt1 should have one layer reference") + layerRef := binary.BigEndian.Uint16(v[12:14]) + require.Equal(t, mmm.layers[1].id, layerRef, "evt1 should reference layer2") + } else if hex.EncodeToString(k) == evt2.ID[:16] { + // evt2 should references to layer3 and layer4 + require.Equal(t, 16, len(v), "evt2 should have two layer references") + layer3Ref := binary.BigEndian.Uint16(v[12:14]) + require.Equal(t, mmm.layers[2].id, layer3Ref, "evt2 should reference layer3") + layer4Ref := binary.BigEndian.Uint16(v[14:16]) + require.Equal(t, mmm.layers[3].id, layer4Ref, "evt2 should reference layer4") + } else { + t.Errorf("unexpected event in indexId: %x", k) + } + } + require.Equal(t, 2, count, "should have exactly two events in indexId") + return nil + }) + require.NoError(t, err) +} diff --git a/eventstore/mmm/position.go b/eventstore/mmm/position.go new file mode 100644 index 0000000..bcb34b1 --- /dev/null +++ b/eventstore/mmm/position.go @@ -0,0 +1,27 @@ +package mmm + +import ( + "encoding/binary" + "fmt" +) + +type position struct { + start uint64 + size uint32 +} + +func (pos position) String() string { + return fmt.Sprintf("<%d|%d|%d>", pos.start, pos.size, pos.start+uint64(pos.size)) +} + +func positionFromBytes(posb []byte) position { + return position{ + size: binary.BigEndian.Uint32(posb[0:4]), + start: binary.BigEndian.Uint64(posb[4:12]), + } +} + +func bytesFromPosition(out []byte, pos position) { + binary.BigEndian.PutUint32(out[0:4], pos.size) + binary.BigEndian.PutUint64(out[4:12], pos.start) +} diff --git a/eventstore/mmm/purge.go b/eventstore/mmm/purge.go new file mode 100644 index 0000000..8ce73ff --- /dev/null +++ b/eventstore/mmm/purge.go @@ -0,0 +1,36 @@ +package mmm + +import ( + "bytes" + "fmt" + "os" + + "github.com/PowerDNS/lmdb-go/lmdb" +) + +func (b *MultiMmapManager) purge(txn *lmdb.Txn, idPrefix8 []byte, pos position) error { + b.Logger.Debug().Hex("event", idPrefix8).Stringer("pos", pos).Msg("purging") + + // delete from index + if err := txn.Del(b.indexId, idPrefix8, nil); err != nil { + return err + } + + // will add the current range to free ranges, which means it is "deleted" (or merge with existing) + isAtEnd := b.mergeNewFreeRange(pos) + + if isAtEnd { + // when at the end, truncate the mmap + // [new_pos_to_be_freed][end_of_file] -> shrink file! + pos.size = 0 // so we don't try to add this some lines below + if err := os.Truncate(b.mmapfPath, int64(pos.start)); err != nil { + panic(fmt.Errorf("error decreasing %s: %w", b.mmapfPath, err)) + } + b.mmapfEnd = pos.start + } else { + // this is for debugging ------------- + copy(b.mmapf[pos.start:], bytes.Repeat([]byte{'!'}, int(pos.size))) + } + + return b.saveFreeRanges(txn) +} diff --git a/eventstore/mmm/query.go b/eventstore/mmm/query.go new file mode 100644 index 0000000..c280410 --- /dev/null +++ b/eventstore/mmm/query.go @@ -0,0 +1,460 @@ +package mmm + +import ( + "bytes" + "context" + "encoding/binary" + "encoding/hex" + "fmt" + "log" + "slices" + + "github.com/PowerDNS/lmdb-go/lmdb" + "github.com/fiatjaf/eventstore/internal" + "github.com/fiatjaf/eventstore/mmm/betterbinary" + "github.com/nbd-wtf/go-nostr" +) + +// GetByID returns the event -- if found in this mmm -- and all the IndexingLayers it belongs to. +func (b *MultiMmapManager) GetByID(id string) (*nostr.Event, IndexingLayers) { + events := make(chan *nostr.Event) + presence := make(chan []uint16) + b.queryByIDs(events, []string{id}, presence) + for evt := range events { + p := <-presence + present := make([]*IndexingLayer, len(p)) + for i, id := range p { + present[i] = b.layers.ByID(id) + } + return evt, present + } + return nil, nil +} + +// queryByIDs emits the events of the given id to the given channel if they exist anywhere in this mmm. +// if presence is given it will also be used to emit slices of the ids of the IndexingLayers this event is stored in. +// it closes the channels when it ends. +func (b *MultiMmapManager) queryByIDs(ch chan *nostr.Event, ids []string, presence chan []uint16) { + go b.lmdbEnv.View(func(txn *lmdb.Txn) error { + txn.RawRead = true + defer close(ch) + if presence != nil { + defer close(presence) + } + + for _, id := range ids { + if len(id) != 64 { + continue + } + + idPrefix8, _ := hex.DecodeString(id[0 : 8*2]) + val, err := txn.Get(b.indexId, idPrefix8) + if err == nil { + pos := positionFromBytes(val[0:12]) + evt := &nostr.Event{} + if err := b.loadEvent(pos, evt); err != nil { + panic(fmt.Errorf("failed to decode event from %v: %w", pos, err)) + } + ch <- evt + + if presence != nil { + layers := make([]uint16, 0, (len(val)-12)/2) + for s := 12; s < len(val); s += 2 { + layers = append(layers, binary.BigEndian.Uint16(val[s:s+2])) + } + presence <- layers + } + } + } + + return nil + }) +} + +func (il *IndexingLayer) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) { + ch := make(chan *nostr.Event) + + if len(filter.IDs) > 0 { + il.mmmm.queryByIDs(ch, filter.IDs, nil) + return ch, nil + } + + if filter.Search != "" { + close(ch) + return ch, nil + } + + // max number of events we'll return + limit := il.MaxLimit / 4 + if filter.Limit > 0 && filter.Limit < il.MaxLimit { + limit = filter.Limit + } + if tlimit := nostr.GetTheoreticalLimit(filter); tlimit == 0 { + close(ch) + return ch, nil + } else if tlimit > 0 { + limit = tlimit + } + + go il.lmdbEnv.View(func(txn *lmdb.Txn) error { + txn.RawRead = true + defer close(ch) + + results, err := il.query(txn, filter, limit) + + for _, ie := range results { + ch <- ie.Event + } + + return err + }) + + return ch, nil +} + +func (il *IndexingLayer) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]internal.IterEvent, error) { + queries, extraAuthors, extraKinds, extraTagKey, extraTagValues, since, err := il.prepareQueries(filter) + if err != nil { + return nil, err + } + + iterators := make([]*iterator, len(queries)) + exhausted := make([]bool, len(queries)) // indicates that a query won't be used anymore + results := make([][]internal.IterEvent, len(queries)) + pulledPerQuery := make([]int, len(queries)) + + // these are kept updated so we never pull from the iterator that is at further distance + // (i.e. the one that has the oldest event among all) + // we will continue to pull from it as soon as some other iterator takes the position + oldest := internal.IterEvent{Q: -1} + + secondPhase := false // after we have gathered enough events we will change the way we iterate + secondBatch := make([][]internal.IterEvent, 0, len(queries)+1) + secondPhaseParticipants := make([]int, 0, len(queries)+1) + + // while merging results in the second phase we will alternate between these two lists + // to avoid having to create new lists all the time + var secondPhaseResultsA []internal.IterEvent + var secondPhaseResultsB []internal.IterEvent + var secondPhaseResultsToggle bool // this is just a dummy thing we use to keep track of the alternating + var secondPhaseHasResultsPending bool + + remainingUnexhausted := len(queries) // when all queries are exhausted we can finally end this thing + batchSizePerQuery := internal.BatchSizePerNumberOfQueries(limit, remainingUnexhausted) + firstPhaseTotalPulled := 0 + + exhaust := func(q int) { + exhausted[q] = true + remainingUnexhausted-- + if q == oldest.Q { + oldest = internal.IterEvent{Q: -1} + } + } + + var firstPhaseResults []internal.IterEvent + + for q := range queries { + cursor, err := txn.OpenCursor(queries[q].dbi) + if err != nil { + return nil, err + } + iterators[q] = &iterator{cursor: cursor} + defer cursor.Close() + iterators[q].seek(queries[q].startingPoint) + results[q] = make([]internal.IterEvent, 0, batchSizePerQuery*2) + } + + // fmt.Println("queries", len(queries)) + + for c := 0; ; c++ { + batchSizePerQuery = internal.BatchSizePerNumberOfQueries(limit, remainingUnexhausted) + + // fmt.Println(" iteration", c, "remaining", remainingUnexhausted, "batchsize", batchSizePerQuery) + // we will go through all the iterators in batches until we have pulled all the required results + for q, query := range queries { + if exhausted[q] { + continue + } + if oldest.Q == q && remainingUnexhausted > 1 { + continue + } + // fmt.Println(" query", q, unsafe.Pointer(&results[q]), hex.EncodeToString(query.prefix), len(results[q])) + + it := iterators[q] + pulledThisIteration := 0 + + for { + // we already have a k and a v and an err from the cursor setup, so check and use these + if it.err != nil || + len(it.key) != query.keySize || + !bytes.HasPrefix(it.key, query.prefix) { + // either iteration has errored or we reached the end of this prefix + // fmt.Println(" reached end", it.key, query.keySize, query.prefix) + exhaust(q) + break + } + + // "id" indexes don't contain a timestamp + if query.timestampSize == 4 { + createdAt := binary.BigEndian.Uint32(it.key[len(it.key)-4:]) + if createdAt < since { + // fmt.Println(" reached since", createdAt, "<", since) + exhaust(q) + break + } + } + + // fetch actual event + pos := positionFromBytes(it.posb) + bin := il.mmmm.mmapf[pos.start : pos.start+uint64(pos.size)] + + // check it against pubkeys without decoding the entire thing + if extraAuthors != nil && !slices.Contains(extraAuthors, [32]byte(bin[39:71])) { + it.next() + continue + } + + // check it against kinds without decoding the entire thing + if extraKinds != nil && !slices.Contains(extraKinds, [2]byte(bin[1:3])) { + it.next() + continue + } + + // decode the entire thing (TODO: do a conditional decode while also checking the extra tag) + event := &nostr.Event{} + if err := betterbinary.Unmarshal(bin, event); err != nil { + log.Printf("mmm: value read error (id %x) on query prefix %x sp %x dbi %d: %s\n", + bin[0:32], query.prefix, query.startingPoint, query.dbi, err) + return nil, fmt.Errorf("event read error: %w", err) + } + + // fmt.Println(" event", hex.EncodeToString(val[0:4]), "kind", binary.BigEndian.Uint16(val[132:134]), "author", hex.EncodeToString(val[32:36]), "ts", nostr.Timestamp(binary.BigEndian.Uint32(val[128:132])), hex.EncodeToString(it.key), it.valIdx) + + // if there is still a tag to be checked, do it now + if extraTagValues != nil && !event.Tags.ContainsAny(extraTagKey, extraTagValues) { + it.next() + continue + } + + // this event is good to be used + evt := internal.IterEvent{Event: event, Q: q} + // + // + if secondPhase { + // do the process described below at HIWAWVRTP. + // if we've reached here this means we've already passed the `since` check. + // now we have to eliminate the event currently at the `since` threshold. + nextThreshold := firstPhaseResults[len(firstPhaseResults)-2] + if oldest.Event == nil { + // fmt.Println(" b1", evt.ID[0:8]) + // BRANCH WHEN WE DON'T HAVE THE OLDEST EVENT (BWWDHTOE) + // when we don't have the oldest set, we will keep the results + // and not change the cutting point -- it's bad, but hopefully not that bad. + results[q] = append(results[q], evt) + secondPhaseHasResultsPending = true + } else if nextThreshold.CreatedAt > oldest.CreatedAt { + // fmt.Println(" b2", nextThreshold.CreatedAt, ">", oldest.CreatedAt, evt.ID[0:8]) + // one of the events we have stored is the actual next threshold + // eliminate last, update since with oldest + firstPhaseResults = firstPhaseResults[0 : len(firstPhaseResults)-1] + since = uint32(oldest.CreatedAt) + // fmt.Println(" new since", since, evt.ID[0:8]) + // we null the oldest Event as we can't rely on it anymore + // (we'll fall under BWWDHTOE above) until we have a new oldest set. + oldest = internal.IterEvent{Q: -1} + // anything we got that would be above this won't trigger an update to + // the oldest anyway, because it will be discarded as being after the limit. + // + // finally + // add this to the results to be merged later + results[q] = append(results[q], evt) + secondPhaseHasResultsPending = true + } else if nextThreshold.CreatedAt < evt.CreatedAt { + // the next last event in the firstPhaseResults is the next threshold + // fmt.Println(" b3", nextThreshold.CreatedAt, "<", oldest.CreatedAt, evt.ID[0:8]) + // eliminate last, update since with the antelast + firstPhaseResults = firstPhaseResults[0 : len(firstPhaseResults)-1] + since = uint32(nextThreshold.CreatedAt) + // fmt.Println(" new since", since) + // add this to the results to be merged later + results[q] = append(results[q], evt) + secondPhaseHasResultsPending = true + // update the oldest event + if evt.CreatedAt < oldest.CreatedAt { + oldest = evt + } + } else { + // fmt.Println(" b4", evt.ID[0:8]) + // oops, _we_ are the next `since` threshold + firstPhaseResults[len(firstPhaseResults)-1] = evt + since = uint32(evt.CreatedAt) + // fmt.Println(" new since", since) + // do not add us to the results to be merged later + // as we're already inhabiting the firstPhaseResults slice + } + } else { + results[q] = append(results[q], evt) + firstPhaseTotalPulled++ + + // update the oldest event + if oldest.Event == nil || evt.CreatedAt < oldest.CreatedAt { + oldest = evt + } + } + + pulledPerQuery[q]++ + pulledThisIteration++ + if pulledThisIteration > batchSizePerQuery { + // batch filled + it.next() + // fmt.Println(" filled", hex.EncodeToString(it.key), it.valIdx) + break + } + if pulledPerQuery[q] >= limit { + // batch filled + reached limit for this query (which is the global limit) + exhaust(q) + it.next() + break + } + + it.next() + } + } + + // we will do this check if we don't accumulated the requested number of events yet + // fmt.Println("oldest", oldest.Event, "from iter", oldest.Q) + if secondPhase && secondPhaseHasResultsPending && (oldest.Event == nil || remainingUnexhausted == 0) { + // fmt.Println("second phase aggregation!") + // when we are in the second phase we will aggressively aggregate results on every iteration + // + secondBatch = secondBatch[:0] + for s := 0; s < len(secondPhaseParticipants); s++ { + q := secondPhaseParticipants[s] + + if len(results[q]) > 0 { + secondBatch = append(secondBatch, results[q]) + } + + if exhausted[q] { + secondPhaseParticipants = internal.SwapDelete(secondPhaseParticipants, s) + s-- + } + } + + // every time we get here we will alternate between these A and B lists + // combining everything we have into a new partial results list. + // after we've done that we can again set the oldest. + // fmt.Println(" xxx", secondPhaseResultsToggle) + if secondPhaseResultsToggle { + secondBatch = append(secondBatch, secondPhaseResultsB) + secondPhaseResultsA = internal.MergeSortMultiple(secondBatch, limit, secondPhaseResultsA) + oldest = secondPhaseResultsA[len(secondPhaseResultsA)-1] + // fmt.Println(" new aggregated a", len(secondPhaseResultsB)) + } else { + secondBatch = append(secondBatch, secondPhaseResultsA) + secondPhaseResultsB = internal.MergeSortMultiple(secondBatch, limit, secondPhaseResultsB) + oldest = secondPhaseResultsB[len(secondPhaseResultsB)-1] + // fmt.Println(" new aggregated b", len(secondPhaseResultsB)) + } + secondPhaseResultsToggle = !secondPhaseResultsToggle + + since = uint32(oldest.CreatedAt) + // fmt.Println(" new since", since) + + // reset the `results` list so we can keep using it + results = results[:len(queries)] + for _, q := range secondPhaseParticipants { + results[q] = results[q][:0] + } + } else if !secondPhase && firstPhaseTotalPulled >= limit && remainingUnexhausted > 0 { + // fmt.Println("have enough!", firstPhaseTotalPulled, "/", limit, "remaining", remainingUnexhausted) + + // we will exclude this oldest number as it is not relevant anymore + // (we now want to keep track only of the oldest among the remaining iterators) + oldest = internal.IterEvent{Q: -1} + + // HOW IT WORKS AFTER WE'VE REACHED THIS POINT (HIWAWVRTP) + // now we can combine the results we have and check what is our current oldest event. + // we also discard anything that is after the current cutting point (`limit`). + // so if we have [1,2,3], [10, 15, 20] and [7, 21, 49] but we only want 6 total + // we can just keep [1,2,3,7,10,15] and discard [20, 21, 49], + // and also adjust our `since` parameter to `15`, discarding anything we get after it + // and immediately declaring that iterator exhausted. + // also every time we get result that is more recent than this updated `since` we can + // keep it but also discard the previous since, moving the needle one back -- for example, + // if we get an `8` we can keep it and move the `since` parameter to `10`, discarding `15` + // in the process. + all := make([][]internal.IterEvent, len(results)) + copy(all, results) // we have to use this otherwise internal.MergeSortMultiple will scramble our results slice + firstPhaseResults = internal.MergeSortMultiple(all, limit, nil) + oldest = firstPhaseResults[limit-1] + since = uint32(oldest.CreatedAt) + // fmt.Println("new since", since) + + for q := range queries { + if exhausted[q] { + continue + } + + // we also automatically exhaust any of the iterators that have already passed the + // cutting point (`since`) + if results[q][len(results[q])-1].CreatedAt < oldest.CreatedAt { + exhausted[q] = true + remainingUnexhausted-- + continue + } + + // for all the remaining iterators, + // since we have merged all the events in this `firstPhaseResults` slice, we can empty the + // current `results` slices and reuse them. + results[q] = results[q][:0] + + // build this index of indexes with everybody who remains + secondPhaseParticipants = append(secondPhaseParticipants, q) + } + + // we create these two lists and alternate between them so we don't have to create a + // a new one every time + secondPhaseResultsA = make([]internal.IterEvent, 0, limit*2) + secondPhaseResultsB = make([]internal.IterEvent, 0, limit*2) + + // from now on we won't run this block anymore + secondPhase = true + } + + // fmt.Println("remaining", remainingUnexhausted) + if remainingUnexhausted == 0 { + break + } + } + + // fmt.Println("is secondPhase?", secondPhase) + + var combinedResults []internal.IterEvent + + if secondPhase { + // fmt.Println("ending second phase") + // when we reach this point either secondPhaseResultsA or secondPhaseResultsB will be full of stuff, + // the other will be empty + var secondPhaseResults []internal.IterEvent + // fmt.Println("xxx", secondPhaseResultsToggle, len(secondPhaseResultsA), len(secondPhaseResultsB)) + if secondPhaseResultsToggle { + secondPhaseResults = secondPhaseResultsB + combinedResults = secondPhaseResultsA[0:limit] // reuse this + // fmt.Println(" using b", len(secondPhaseResultsA)) + } else { + secondPhaseResults = secondPhaseResultsA + combinedResults = secondPhaseResultsB[0:limit] // reuse this + // fmt.Println(" using a", len(secondPhaseResultsA)) + } + + all := [][]internal.IterEvent{firstPhaseResults, secondPhaseResults} + combinedResults = internal.MergeSortMultiple(all, limit, combinedResults) + // fmt.Println("final combinedResults", len(combinedResults), cap(combinedResults), limit) + } else { + combinedResults = make([]internal.IterEvent, limit) + combinedResults = internal.MergeSortMultiple(results, limit, combinedResults) + } + + return combinedResults, nil +} diff --git a/eventstore/mmm/query_planner.go b/eventstore/mmm/query_planner.go new file mode 100644 index 0000000..3e30469 --- /dev/null +++ b/eventstore/mmm/query_planner.go @@ -0,0 +1,202 @@ +package mmm + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + + "github.com/PowerDNS/lmdb-go/lmdb" + "github.com/fiatjaf/eventstore/internal" + "github.com/nbd-wtf/go-nostr" +) + +type query struct { + i int + dbi lmdb.DBI + prefix []byte + results chan *nostr.Event + keySize int + timestampSize int + startingPoint []byte +} + +func (il *IndexingLayer) prepareQueries(filter nostr.Filter) ( + queries []query, + extraAuthors [][32]byte, + extraKinds [][2]byte, + extraTagKey string, + extraTagValues []string, + since uint32, + err error, +) { + // we will apply this to every query we return + defer func() { + if queries == nil { + return + } + + var until uint32 = 4294967295 + if filter.Until != nil { + if fu := uint32(*filter.Until); fu < until { + until = fu + 1 + } + } + for i, q := range queries { + sp := make([]byte, len(q.prefix)) + sp = sp[0:len(q.prefix)] + copy(sp, q.prefix) + queries[i].startingPoint = binary.BigEndian.AppendUint32(sp, uint32(until)) + queries[i].results = make(chan *nostr.Event, 12) + } + }() + + // this is where we'll end the iteration + if filter.Since != nil { + if fs := uint32(*filter.Since); fs > since { + since = fs + } + } + + if len(filter.Tags) > 0 { + // we will select ONE tag to query for and ONE extra tag to do further narrowing, if available + tagKey, tagValues, goodness := internal.ChooseNarrowestTag(filter) + + // we won't use a tag index for this as long as we have something else to match with + if goodness < 2 && (len(filter.Authors) > 0 || len(filter.Kinds) > 0) { + goto pubkeyMatching + } + + // only "p" tag has a goodness of 2, so + if goodness == 2 { + // this means we got a "p" tag, so we will use the ptag-kind index + i := 0 + if filter.Kinds != nil { + queries = make([]query, len(tagValues)*len(filter.Kinds)) + for _, value := range tagValues { + if len(value) != 64 { + return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid 'p' tag '%s'", value) + } + + for _, kind := range filter.Kinds { + k := make([]byte, 8+2) + if _, err := hex.Decode(k[0:8], []byte(value[0:8*2])); err != nil { + return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid 'p' tag '%s'", value) + } + binary.BigEndian.PutUint16(k[8:8+2], uint16(kind)) + queries[i] = query{i: i, dbi: il.indexPTagKind, prefix: k[0 : 8+2], keySize: 8 + 2 + 4, timestampSize: 4} + i++ + } + } + } else { + // even if there are no kinds, in that case we will just return any kind and not care + queries = make([]query, len(tagValues)) + for i, value := range tagValues { + if len(value) != 64 { + return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid 'p' tag '%s'", value) + } + + k := make([]byte, 8) + if _, err := hex.Decode(k[0:8], []byte(value[0:8*2])); err != nil { + return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid 'p' tag '%s'", value) + } + queries[i] = query{i: i, dbi: il.indexPTagKind, prefix: k[0:8], keySize: 8 + 2 + 4, timestampSize: 4} + } + } + } else { + // otherwise we will use a plain tag index + queries = make([]query, len(tagValues)) + for i, value := range tagValues { + // get key prefix (with full length) and offset where to write the created_at + dbi, k, offset := il.getTagIndexPrefix(value) + // remove the last parts part to get just the prefix we want here + prefix := k[0:offset] + queries[i] = query{i: i, dbi: dbi, prefix: prefix, keySize: len(prefix) + 4, timestampSize: 4} + i++ + } + + // add an extra kind filter if available (only do this on plain tag index, not on ptag-kind index) + if filter.Kinds != nil { + extraKinds = make([][2]byte, len(filter.Kinds)) + for i, kind := range filter.Kinds { + binary.BigEndian.PutUint16(extraKinds[i][0:2], uint16(kind)) + } + } + } + + // add an extra author search if possible + if filter.Authors != nil { + extraAuthors = make([][32]byte, len(filter.Authors)) + for i, pk := range filter.Authors { + hex.Decode(extraAuthors[i][:], []byte(pk)) + } + } + + // add an extra useless tag if available + filter.Tags = internal.CopyMapWithoutKey(filter.Tags, tagKey) + if len(filter.Tags) > 0 { + extraTagKey, extraTagValues, _ = internal.ChooseNarrowestTag(filter) + } + + return queries, extraAuthors, extraKinds, extraTagKey, extraTagValues, since, nil + } + +pubkeyMatching: + if len(filter.Authors) > 0 { + if len(filter.Kinds) == 0 { + // will use pubkey index + queries = make([]query, len(filter.Authors)) + for i, pubkeyHex := range filter.Authors { + if len(pubkeyHex) != 64 { + return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid author '%s'", pubkeyHex) + } + prefix := make([]byte, 8) + if _, err := hex.Decode(prefix[0:8], []byte(pubkeyHex[0:8*2])); err != nil { + return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid author '%s'", pubkeyHex) + } + queries[i] = query{i: i, dbi: il.indexPubkey, prefix: prefix[0:8], keySize: 8 + 4, timestampSize: 4} + } + } else { + // will use pubkeyKind index + queries = make([]query, len(filter.Authors)*len(filter.Kinds)) + i := 0 + for _, pubkeyHex := range filter.Authors { + for _, kind := range filter.Kinds { + if len(pubkeyHex) != 64 { + return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid author '%s'", pubkeyHex) + } + prefix := make([]byte, 8+2) + if _, err := hex.Decode(prefix[0:8], []byte(pubkeyHex[0:8*2])); err != nil { + return nil, nil, nil, "", nil, 0, fmt.Errorf("invalid author '%s'", pubkeyHex) + } + binary.BigEndian.PutUint16(prefix[8:8+2], uint16(kind)) + queries[i] = query{i: i, dbi: il.indexPubkeyKind, prefix: prefix[0 : 8+2], keySize: 10 + 4, timestampSize: 4} + i++ + } + } + } + + // potentially with an extra useless tag filtering + extraTagKey, extraTagValues, _ = internal.ChooseNarrowestTag(filter) + return queries, nil, nil, extraTagKey, extraTagValues, since, nil + } + + if len(filter.Kinds) > 0 { + // will use a kind index + queries = make([]query, len(filter.Kinds)) + for i, kind := range filter.Kinds { + prefix := make([]byte, 2) + binary.BigEndian.PutUint16(prefix[0:2], uint16(kind)) + queries[i] = query{i: i, dbi: il.indexKind, prefix: prefix[0:2], keySize: 2 + 4, timestampSize: 4} + } + + // potentially with an extra useless tag filtering + tagKey, tagValues, _ := internal.ChooseNarrowestTag(filter) + return queries, nil, nil, tagKey, tagValues, since, nil + } + + // if we got here our query will have nothing to filter with + queries = make([]query, 1) + prefix := make([]byte, 0) + queries[0] = query{i: 0, dbi: il.indexCreatedAt, prefix: prefix, keySize: 0 + 4, timestampSize: 4} + return queries, nil, nil, "", nil, since, nil +} diff --git a/eventstore/mmm/replace.go b/eventstore/mmm/replace.go new file mode 100644 index 0000000..720d041 --- /dev/null +++ b/eventstore/mmm/replace.go @@ -0,0 +1,54 @@ +package mmm + +import ( + "context" + "fmt" + "math" + + "github.com/PowerDNS/lmdb-go/lmdb" + "github.com/fiatjaf/eventstore/internal" + "github.com/nbd-wtf/go-nostr" +) + +func (il *IndexingLayer) ReplaceEvent(ctx context.Context, evt *nostr.Event) error { + // sanity checking + if evt.CreatedAt > math.MaxUint32 || evt.Kind > math.MaxUint16 { + return fmt.Errorf("event with values out of expected boundaries") + } + + filter := nostr.Filter{Limit: 1, Kinds: []int{evt.Kind}, Authors: []string{evt.PubKey}} + if nostr.IsAddressableKind(evt.Kind) { + // when addressable, add the "d" tag to the filter + filter.Tags = nostr.TagMap{"d": []string{evt.Tags.GetD()}} + } + + return il.mmmm.lmdbEnv.Update(func(mmmtxn *lmdb.Txn) error { + mmmtxn.RawRead = true + + return il.lmdbEnv.Update(func(iltxn *lmdb.Txn) error { + // now we fetch the past events, whatever they are, delete them and then save the new + prevResults, err := il.query(iltxn, filter, 10) // in theory limit could be just 1 and this should work + if err != nil { + return fmt.Errorf("failed to query past events with %s: %w", filter, err) + } + + shouldStore := true + for _, previous := range prevResults { + if internal.IsOlder(previous.Event, evt) { + if err := il.delete(mmmtxn, iltxn, previous.Event); err != nil { + return fmt.Errorf("failed to delete event %s for replacing: %w", previous.Event.ID, err) + } + } else { + // there is a newer event already stored, so we won't store this + shouldStore = false + } + } + if shouldStore { + _, err := il.mmmm.storeOn(mmmtxn, []*IndexingLayer{il}, []*lmdb.Txn{iltxn}, evt) + return err + } + + return nil + }) + }) +} diff --git a/eventstore/mmm/save.go b/eventstore/mmm/save.go new file mode 100644 index 0000000..33b9125 --- /dev/null +++ b/eventstore/mmm/save.go @@ -0,0 +1,234 @@ +package mmm + +import ( + "context" + "encoding/binary" + "encoding/hex" + "fmt" + "os" + "runtime" + "slices" + "syscall" + "unsafe" + + "github.com/PowerDNS/lmdb-go/lmdb" + "github.com/fiatjaf/eventstore/mmm/betterbinary" + "github.com/nbd-wtf/go-nostr" +) + +func (b *MultiMmapManager) StoreGlobal(ctx context.Context, evt *nostr.Event) (stored bool, err error) { + someoneWantsIt := false + + b.mutex.Lock() + defer b.mutex.Unlock() + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + // do this just so it's cleaner, we're already locking the thread and the mutex anyway + mmmtxn, err := b.lmdbEnv.BeginTxn(nil, 0) + if err != nil { + return false, fmt.Errorf("failed to begin global transaction: %w", err) + } + mmmtxn.RawRead = true + + iltxns := make([]*lmdb.Txn, 0, len(b.layers)) + ils := make([]*IndexingLayer, 0, len(b.layers)) + + // ask if any of the indexing layers want this + for _, il := range b.layers { + if il.ShouldIndex != nil && il.ShouldIndex(ctx, evt) { + someoneWantsIt = true + + iltxn, err := il.lmdbEnv.BeginTxn(nil, 0) + if err != nil { + mmmtxn.Abort() + for _, txn := range iltxns { + txn.Abort() + } + return false, fmt.Errorf("failed to start txn on %s: %w", il.name, err) + } + + ils = append(ils, il) + iltxns = append(iltxns, iltxn) + } + } + + if !someoneWantsIt { + // no one wants it + mmmtxn.Abort() + return false, fmt.Errorf("not wanted") + } + + stored, err = b.storeOn(mmmtxn, ils, iltxns, evt) + if stored { + mmmtxn.Commit() + for _, txn := range iltxns { + txn.Commit() + } + } else { + mmmtxn.Abort() + for _, txn := range iltxns { + txn.Abort() + } + } + + return stored, err +} + +func (il *IndexingLayer) SaveEvent(ctx context.Context, evt *nostr.Event) error { + il.mmmm.mutex.Lock() + defer il.mmmm.mutex.Unlock() + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + // do this just so it's cleaner, we're already locking the thread and the mutex anyway + mmmtxn, err := il.mmmm.lmdbEnv.BeginTxn(nil, 0) + if err != nil { + return fmt.Errorf("failed to begin global transaction: %w", err) + } + mmmtxn.RawRead = true + + iltxn, err := il.lmdbEnv.BeginTxn(nil, 0) + if err != nil { + mmmtxn.Abort() + return fmt.Errorf("failed to start txn on %s: %w", il.name, err) + } + + if _, err := il.mmmm.storeOn(mmmtxn, []*IndexingLayer{il}, []*lmdb.Txn{iltxn}, evt); err != nil { + mmmtxn.Abort() + if iltxn != nil { + iltxn.Abort() + } + return err + } + + mmmtxn.Commit() + iltxn.Commit() + return nil +} + +func (b *MultiMmapManager) storeOn( + mmmtxn *lmdb.Txn, + ils []*IndexingLayer, + iltxns []*lmdb.Txn, + evt *nostr.Event, +) (stored bool, err error) { + // sanity checking + if evt.CreatedAt > maxuint32 || evt.Kind > maxuint16 { + return false, fmt.Errorf("event with values out of expected boundaries") + } + + // check if we already have this id + idPrefix8, _ := hex.DecodeString(evt.ID[0 : 8*2]) + val, err := mmmtxn.Get(b.indexId, idPrefix8) + if err == nil { + // we found the event, now check if it is already indexed by the layers that want to store it + for i := len(ils) - 1; i >= 0; i-- { + for s := 12; s < len(val); s += 2 { + ilid := binary.BigEndian.Uint16(val[s : s+2]) + if ils[i].id == ilid { + // swap delete this il, but keep the deleted ones at the end + // (so the caller can successfully finalize the transactions) + ils[i], ils[len(ils)-1] = ils[len(ils)-1], ils[i] + ils = ils[0 : len(ils)-1] + iltxns[i], iltxns[len(iltxns)-1] = iltxns[len(iltxns)-1], iltxns[i] + iltxns = iltxns[0 : len(iltxns)-1] + break + } + } + } + } else if !lmdb.IsNotFound(err) { + // now if we got an error from lmdb we will only proceed if we get a NotFound -- for anything else we will error + return false, fmt.Errorf("error checking existence: %w", err) + } + + // if all ils already have this event indexed (or no il was given) we can end here + if len(ils) == 0 { + return false, nil + } + + // get event binary size + pos := position{ + size: uint32(betterbinary.Measure(*evt)), + } + if pos.size >= 1<<16 { + return false, fmt.Errorf("event too large to store, max %d, got %d", 1<<16, pos.size) + } + + // find a suitable place for this to be stored in + appendToMmap := true + for f, fr := range b.freeRanges { + if fr.size >= pos.size { + // found the smallest possible place that can fit this event + appendToMmap = false + pos.start = fr.start + + // modify the free ranges we're keeping track of + // (i.e. delete the current and add a new freerange with the remaining space) + b.freeRanges = slices.Delete(b.freeRanges, f, f+1) + + if pos.size != fr.size { + b.addNewFreeRange(position{ + start: fr.start + uint64(pos.size), + size: fr.size - pos.size, + }) + } + + if err := b.saveFreeRanges(mmmtxn); err != nil { + return false, fmt.Errorf("failed to save modified free ranges: %w", err) + } + + break + } + } + + if appendToMmap { + // no free ranges found, so write to the end of the mmap file + pos.start = b.mmapfEnd + mmapfNewSize := int64(b.mmapfEnd) + int64(pos.size) + if err := os.Truncate(b.mmapfPath, mmapfNewSize); err != nil { + return false, fmt.Errorf("error increasing %s: %w", b.mmapfPath, err) + } + b.mmapfEnd = uint64(mmapfNewSize) + } + + // write to the mmap + if err := betterbinary.Marshal(*evt, b.mmapf[pos.start:]); err != nil { + return false, fmt.Errorf("error marshaling to %d: %w", pos.start, err) + } + + // prepare value to be saved in the id index (if we didn't have it already) + // val: [posb][layerIdRefs...] + if val == nil { + val = make([]byte, 12, 12+2*len(b.layers)) + binary.BigEndian.PutUint32(val[0:4], pos.size) + binary.BigEndian.PutUint64(val[4:12], pos.start) + } + + // each index that was reserved above for the different layers + for i, il := range ils { + iltxn := iltxns[i] + + for k := range il.getIndexKeysForEvent(evt) { + if err := iltxn.Put(k.dbi, k.key, val[0:12] /* pos */, 0); err != nil { + b.Logger.Warn().Str("name", il.name).Msg("failed to index event on layer") + } + } + + val = binary.BigEndian.AppendUint16(val, il.id) + } + + // store the id index with the refcounts + if err := mmmtxn.Put(b.indexId, idPrefix8, val, 0); err != nil { + panic(fmt.Errorf("failed to store %x by id: %w", idPrefix8, err)) + } + + // msync + _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, + uintptr(unsafe.Pointer(&b.mmapf[0])), uintptr(len(b.mmapf)), syscall.MS_SYNC) + if errno != 0 { + panic(fmt.Errorf("msync failed: %w", syscall.Errno(errno))) + } + + return true, nil +} diff --git a/eventstore/negentropy.go b/eventstore/negentropy.go new file mode 100644 index 0000000..bbcb8d3 --- /dev/null +++ b/eventstore/negentropy.go @@ -0,0 +1,13 @@ +package eventstore + +import "context" + +var negentropySessionKey = struct{}{} + +func IsNegentropySession(ctx context.Context) bool { + return ctx.Value(negentropySessionKey) != nil +} + +func SetNegentropy(ctx context.Context) context.Context { + return context.WithValue(ctx, negentropySessionKey, struct{}{}) +} diff --git a/eventstore/nullstore/README.md b/eventstore/nullstore/README.md new file mode 100644 index 0000000..8f9673d --- /dev/null +++ b/eventstore/nullstore/README.md @@ -0,0 +1,2 @@ +`nullstore` is an eventstore that doesn't actually do anything. +It doesn't store anything, it doesn't return anything. diff --git a/eventstore/nullstore/lib.go b/eventstore/nullstore/lib.go new file mode 100644 index 0000000..66715f1 --- /dev/null +++ b/eventstore/nullstore/lib.go @@ -0,0 +1,36 @@ +package nullstore + +import ( + "context" + + "github.com/fiatjaf/eventstore" + "github.com/nbd-wtf/go-nostr" +) + +var _ eventstore.Store = NullStore{} + +type NullStore struct{} + +func (b NullStore) Init() error { + return nil +} + +func (b NullStore) Close() {} + +func (b NullStore) DeleteEvent(ctx context.Context, evt *nostr.Event) error { + return nil +} + +func (b NullStore) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) { + ch := make(chan *nostr.Event) + close(ch) + return ch, nil +} + +func (b NullStore) SaveEvent(ctx context.Context, evt *nostr.Event) error { + return nil +} + +func (b NullStore) ReplaceEvent(ctx context.Context, evt *nostr.Event) error { + return nil +} diff --git a/eventstore/relay_interface.go b/eventstore/relay_interface.go new file mode 100644 index 0000000..77259e4 --- /dev/null +++ b/eventstore/relay_interface.go @@ -0,0 +1,56 @@ +package eventstore + +import ( + "context" + "fmt" + + "github.com/nbd-wtf/go-nostr" +) + +type RelayWrapper struct { + Store +} + +var _ nostr.RelayStore = (*RelayWrapper)(nil) + +func (w RelayWrapper) Publish(ctx context.Context, evt nostr.Event) error { + if nostr.IsEphemeralKind(evt.Kind) { + // do not store ephemeral events + return nil + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + if nostr.IsRegularKind(evt.Kind) { + // regular events are just saved directly + if err := w.SaveEvent(ctx, &evt); err != nil && err != ErrDupEvent { + return fmt.Errorf("failed to save: %w", err) + } + return nil + } + + // others are replaced + w.Store.ReplaceEvent(ctx, &evt) + + return nil +} + +func (w RelayWrapper) QuerySync(ctx context.Context, filter nostr.Filter) ([]*nostr.Event, error) { + ch, err := w.Store.QueryEvents(ctx, filter) + if err != nil { + return nil, fmt.Errorf("failed to query: %w", err) + } + + n := filter.Limit + if n == 0 { + n = 500 + } + + results := make([]*nostr.Event, 0, n) + for evt := range ch { + results = append(results, evt) + } + + return results, nil +} diff --git a/eventstore/slicestore/lib.go b/eventstore/slicestore/lib.go new file mode 100644 index 0000000..a3fcaad --- /dev/null +++ b/eventstore/slicestore/lib.go @@ -0,0 +1,157 @@ +package slicestore + +import ( + "context" + "fmt" + "strings" + "sync" + + "github.com/fiatjaf/eventstore" + "github.com/fiatjaf/eventstore/internal" + "github.com/nbd-wtf/go-nostr" + "golang.org/x/exp/slices" +) + +var _ eventstore.Store = (*SliceStore)(nil) + +type SliceStore struct { + sync.Mutex + internal []*nostr.Event + + MaxLimit int +} + +func (b *SliceStore) Init() error { + b.internal = make([]*nostr.Event, 0, 5000) + if b.MaxLimit == 0 { + b.MaxLimit = 500 + } + return nil +} + +func (b *SliceStore) Close() {} + +func (b *SliceStore) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) { + ch := make(chan *nostr.Event) + if filter.Limit > b.MaxLimit || (filter.Limit == 0 && !filter.LimitZero) { + filter.Limit = b.MaxLimit + } + + // efficiently determine where to start and end + start := 0 + end := len(b.internal) + if filter.Until != nil { + start, _ = slices.BinarySearchFunc(b.internal, *filter.Until, eventTimestampComparator) + } + if filter.Since != nil { + end, _ = slices.BinarySearchFunc(b.internal, *filter.Since, eventTimestampComparator) + } + + // ham + if end < start { + close(ch) + return ch, nil + } + + count := 0 + go func() { + for _, event := range b.internal[start:end] { + if count == filter.Limit { + break + } + + if filter.Matches(event) { + select { + case ch <- event: + case <-ctx.Done(): + return + } + count++ + } + } + close(ch) + }() + return ch, nil +} + +func (b *SliceStore) CountEvents(ctx context.Context, filter nostr.Filter) (int64, error) { + var val int64 + for _, event := range b.internal { + if filter.Matches(event) { + val++ + } + } + return val, nil +} + +func (b *SliceStore) SaveEvent(ctx context.Context, evt *nostr.Event) error { + idx, found := slices.BinarySearchFunc(b.internal, evt, eventComparator) + if found { + return eventstore.ErrDupEvent + } + // let's insert at the correct place in the array + b.internal = append(b.internal, evt) // bogus + copy(b.internal[idx+1:], b.internal[idx:]) + b.internal[idx] = evt + + return nil +} + +func (b *SliceStore) DeleteEvent(ctx context.Context, evt *nostr.Event) error { + idx, found := slices.BinarySearchFunc(b.internal, evt, eventComparator) + if !found { + // we don't have this event + return nil + } + + // we have it + copy(b.internal[idx:], b.internal[idx+1:]) + b.internal = b.internal[0 : len(b.internal)-1] + return nil +} + +func (b *SliceStore) ReplaceEvent(ctx context.Context, evt *nostr.Event) error { + b.Lock() + defer b.Unlock() + + filter := nostr.Filter{Limit: 1, Kinds: []int{evt.Kind}, Authors: []string{evt.PubKey}} + if nostr.IsAddressableKind(evt.Kind) { + filter.Tags = nostr.TagMap{"d": []string{evt.Tags.GetD()}} + } + + ch, err := b.QueryEvents(ctx, filter) + if err != nil { + return fmt.Errorf("failed to query before replacing: %w", err) + } + + shouldStore := true + for previous := range ch { + if internal.IsOlder(previous, evt) { + if err := b.DeleteEvent(ctx, previous); err != nil { + return fmt.Errorf("failed to delete event for replacing: %w", err) + } + } else { + shouldStore = false + } + } + + if shouldStore { + if err := b.SaveEvent(ctx, evt); err != nil && err != eventstore.ErrDupEvent { + return fmt.Errorf("failed to save: %w", err) + } + } + + return nil +} + +func eventTimestampComparator(e *nostr.Event, t nostr.Timestamp) int { + return int(t) - int(e.CreatedAt) +} + +func eventComparator(a *nostr.Event, b *nostr.Event) int { + c := int(b.CreatedAt) - int(a.CreatedAt) + if c != 0 { + return c + } + return strings.Compare(b.ID, a.ID) +} diff --git a/eventstore/slicestore/slicestore_test.go b/eventstore/slicestore/slicestore_test.go new file mode 100644 index 0000000..cc5c4bb --- /dev/null +++ b/eventstore/slicestore/slicestore_test.go @@ -0,0 +1,60 @@ +package slicestore + +import ( + "context" + "testing" + + "github.com/nbd-wtf/go-nostr" +) + +func TestBasicStuff(t *testing.T) { + ctx := context.Background() + ss := &SliceStore{} + ss.Init() + defer ss.Close() + + for i := 0; i < 20; i++ { + v := i + kind := 11 + if i%2 == 0 { + v = i + 10000 + } + if i%3 == 0 { + kind = 12 + } + ss.SaveEvent(ctx, &nostr.Event{CreatedAt: nostr.Timestamp(v), Kind: kind}) + } + + ch, _ := ss.QueryEvents(ctx, nostr.Filter{}) + list := make([]*nostr.Event, 0, 20) + for event := range ch { + list = append(list, event) + } + + if len(list) != 20 { + t.Fatalf("failed to load 20 events") + } + if list[0].CreatedAt != 10018 || list[1].CreatedAt != 10016 || list[18].CreatedAt != 3 || list[19].CreatedAt != 1 { + t.Fatalf("order is incorrect") + } + + until := nostr.Timestamp(9999) + ch, _ = ss.QueryEvents(ctx, nostr.Filter{Limit: 15, Until: &until, Kinds: []int{11}}) + list = make([]*nostr.Event, 0, 7) + for event := range ch { + list = append(list, event) + } + if len(list) != 7 { + t.Fatalf("should have gotten 7, not %d", len(list)) + } + + since := nostr.Timestamp(10009) + ch, _ = ss.QueryEvents(ctx, nostr.Filter{Since: &since}) + list = make([]*nostr.Event, 0, 5) + for event := range ch { + list = append(list, event) + } + if len(list) != 5 { + t.Fatalf("should have gotten 5, not %d", len(list)) + } +} diff --git a/eventstore/store.go b/eventstore/store.go new file mode 100644 index 0000000..58d16e6 --- /dev/null +++ b/eventstore/store.go @@ -0,0 +1,32 @@ +package eventstore + +import ( + "context" + + "github.com/nbd-wtf/go-nostr" +) + +// Store is a persistence layer for nostr events handled by a relay. +type Store interface { + // Init is called at the very beginning by [Server.Start], after [Relay.Init], + // allowing a storage to initialize its internal resources. + Init() error + + // Close must be called after you're done using the store, to free up resources and so on. + Close() + + // QueryEvents should return a channel with the events as they're recovered from a database. + // the channel should be closed after the events are all delivered. + QueryEvents(context.Context, nostr.Filter) (chan *nostr.Event, error) + // DeleteEvent just deletes an event, no side-effects. + DeleteEvent(context.Context, *nostr.Event) error + // SaveEvent just saves an event, no side-effects. + SaveEvent(context.Context, *nostr.Event) error + // ReplaceEvent atomically replaces a replaceable or addressable event. + // Conceptually it is like a Query->Delete->Save, but streamlined. + ReplaceEvent(context.Context, *nostr.Event) error +} + +type Counter interface { + CountEvents(context.Context, nostr.Filter) (int64, error) +} diff --git a/eventstore/strfry/lib.go b/eventstore/strfry/lib.go new file mode 100644 index 0000000..fc1d081 --- /dev/null +++ b/eventstore/strfry/lib.go @@ -0,0 +1,164 @@ +package strfry + +import ( + "bytes" + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/fiatjaf/eventstore" + "github.com/mailru/easyjson" + "github.com/nbd-wtf/go-nostr" +) + +var _ eventstore.Store = (*StrfryBackend)(nil) + +type StrfryBackend struct { + ConfigPath string + ExecutablePath string +} + +func (s *StrfryBackend) Init() error { + if s.ExecutablePath == "" { + configPath := filepath.Dir(s.ConfigPath) + os.Setenv("PATH", configPath+":"+os.Getenv("PATH")) + exe, err := exec.LookPath("strfry") + if err != nil { + return fmt.Errorf("failed to find strfry executable: %w (better provide it manually)", err) + } + s.ExecutablePath = exe + } + + return nil +} + +func (_ StrfryBackend) Close() {} + +func (s StrfryBackend) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) { + stdout, err := s.baseStrfryScan(ctx, filter) + if err != nil { + return nil, err + } + + ch := make(chan *nostr.Event) + go func() { + defer close(ch) + for { + line, err := stdout.ReadBytes('\n') + if err != nil { + break + } + + evt := &nostr.Event{} + easyjson.Unmarshal(line, evt) + if evt.ID == "" { + continue + } + + ch <- evt + } + }() + + return ch, nil +} + +func (s *StrfryBackend) ReplaceEvent(ctx context.Context, evt *nostr.Event) error { + return s.SaveEvent(ctx, evt) +} + +func (s StrfryBackend) SaveEvent(ctx context.Context, evt *nostr.Event) error { + args := make([]string, 0, 4) + if s.ConfigPath != "" { + args = append(args, "--config="+s.ConfigPath) + } + args = append(args, "import") + args = append(args, "--show-rejected") + args = append(args, "--no-verify") + + cmd := exec.CommandContext(ctx, s.ExecutablePath, args...) + var stderr bytes.Buffer + cmd.Stderr = &stderr + + // event is sent on stdin + j, _ := easyjson.Marshal(evt) + cmd.Stdin = bytes.NewBuffer(j) + + err := cmd.Run() + if err != nil { + return fmt.Errorf( + "%s %s failed: %w, (%s)", + s.ExecutablePath, strings.Join(args, " "), err, stderr.String(), + ) + } + + return nil +} + +func (s StrfryBackend) DeleteEvent(ctx context.Context, evt *nostr.Event) error { + args := make([]string, 0, 3) + if s.ConfigPath != "" { + args = append(args, "--config="+s.ConfigPath) + } + args = append(args, "delete") + args = append(args, "--filter={\"ids\":[\""+evt.ID+"\"]}") + + cmd := exec.CommandContext(ctx, s.ExecutablePath, args...) + var stderr bytes.Buffer + cmd.Stderr = &stderr + + err := cmd.Run() + if err != nil { + return fmt.Errorf( + "%s %s failed: %w, (%s)", + s.ExecutablePath, strings.Join(args, " "), err, stderr.String(), + ) + } + + return nil +} + +func (s StrfryBackend) CountEvents(ctx context.Context, filter nostr.Filter) (int64, error) { + stdout, err := s.baseStrfryScan(ctx, filter) + if err != nil { + return 0, err + } + + var count int64 + for { + _, err := stdout.ReadBytes('\n') + if err != nil { + break + } + count++ + } + + return count, nil +} + +func (s StrfryBackend) baseStrfryScan(ctx context.Context, filter nostr.Filter) (*bytes.Buffer, error) { + args := make([]string, 0, 3) + if s.ConfigPath != "" { + args = append(args, "--config="+s.ConfigPath) + } + args = append(args, "scan") + args = append(args, filter.String()) + + cmd := exec.CommandContext(ctx, s.ExecutablePath, args...) + var stdout bytes.Buffer + cmd.Stdout = &stdout + var stderr bytes.Buffer + cmd.Stderr = &stderr + + err := cmd.Run() + if err != nil { + return nil, fmt.Errorf( + "%s %s failed: %w, (%s)", + s.ExecutablePath, strings.Join(args, " "), err, stderr.String(), + ) + } + + return &stdout, nil +} diff --git a/eventstore/test/benchmark_test.go b/eventstore/test/benchmark_test.go new file mode 100644 index 0000000..8bd9370 --- /dev/null +++ b/eventstore/test/benchmark_test.go @@ -0,0 +1,113 @@ +package test + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "os" + "testing" + + "github.com/fiatjaf/eventstore" + "github.com/fiatjaf/eventstore/badger" + "github.com/fiatjaf/eventstore/lmdb" + "github.com/fiatjaf/eventstore/slicestore" + "github.com/fiatjaf/eventstore/sqlite3" + "github.com/nbd-wtf/go-nostr" +) + +func BenchmarkSliceStore(b *testing.B) { + s := &slicestore.SliceStore{} + s.Init() + runBenchmarkOn(b, s) +} + +func BenchmarkLMDB(b *testing.B) { + os.RemoveAll(dbpath + "lmdb") + l := &lmdb.LMDBBackend{Path: dbpath + "lmdb"} + l.Init() + + runBenchmarkOn(b, l) +} + +func BenchmarkBadger(b *testing.B) { + d := &badger.BadgerBackend{Path: dbpath + "badger"} + d.Init() + runBenchmarkOn(b, d) +} + +func BenchmarkSQLite(b *testing.B) { + os.RemoveAll(dbpath + "sqlite") + q := &sqlite3.SQLite3Backend{DatabaseURL: dbpath + "sqlite", QueryTagsLimit: 50} + q.Init() + + runBenchmarkOn(b, q) +} + +func runBenchmarkOn(b *testing.B, db eventstore.Store) { + for i := 0; i < 10000; i++ { + eTag := make([]byte, 32) + binary.BigEndian.PutUint16(eTag, uint16(i)) + + ref, _ := nostr.GetPublicKey(sk3) + if i%3 == 0 { + ref, _ = nostr.GetPublicKey(sk4) + } + + evt := &nostr.Event{ + CreatedAt: nostr.Timestamp(i*10 + 2), + Content: fmt.Sprintf("hello %d", i), + Tags: nostr.Tags{ + {"t", fmt.Sprintf("t%d", i)}, + {"e", hex.EncodeToString(eTag)}, + {"p", ref}, + }, + Kind: i % 10, + } + sk := sk3 + if i%3 == 0 { + sk = sk4 + } + evt.Sign(sk) + db.SaveEvent(ctx, evt) + } + + filters := make([]nostr.Filter, 0, 10) + filters = append(filters, nostr.Filter{Kinds: []int{1, 4, 8, 16}}) + pk3, _ := nostr.GetPublicKey(sk3) + filters = append(filters, nostr.Filter{Authors: []string{pk3, nostr.GeneratePrivateKey()}}) + filters = append(filters, nostr.Filter{Authors: []string{pk3, nostr.GeneratePrivateKey()}, Kinds: []int{3, 4}}) + filters = append(filters, nostr.Filter{}) + filters = append(filters, nostr.Filter{Limit: 20}) + filters = append(filters, nostr.Filter{Kinds: []int{8, 9}, Tags: nostr.TagMap{"p": []string{pk3}}}) + pk4, _ := nostr.GetPublicKey(sk4) + filters = append(filters, nostr.Filter{Kinds: []int{8, 9}, Tags: nostr.TagMap{"p": []string{pk3, pk4}}}) + filters = append(filters, nostr.Filter{Kinds: []int{8, 9}, Tags: nostr.TagMap{"p": []string{pk3, pk4}}}) + eTags := make([]string, 20) + for i := 0; i < 20; i++ { + eTag := make([]byte, 32) + binary.BigEndian.PutUint16(eTag, uint16(i)) + eTags[i] = hex.EncodeToString(eTag) + } + filters = append(filters, nostr.Filter{Kinds: []int{9}, Tags: nostr.TagMap{"e": eTags}}) + filters = append(filters, nostr.Filter{Kinds: []int{5}, Tags: nostr.TagMap{"e": eTags, "t": []string{"t5"}}}) + filters = append(filters, nostr.Filter{Tags: nostr.TagMap{"e": eTags}}) + filters = append(filters, nostr.Filter{Tags: nostr.TagMap{"e": eTags}, Limit: 50}) + + b.Run("filter", func(b *testing.B) { + for q, filter := range filters { + b.Run(fmt.Sprintf("q-%d", q), func(b *testing.B) { + for i := 0; i < b.N; i++ { + _, _ = db.QueryEvents(ctx, filter) + } + }) + } + }) + + b.Run("insert", func(b *testing.B) { + evt := &nostr.Event{Kind: 788, CreatedAt: nostr.Now(), Content: "blergh", Tags: nostr.Tags{{"t", "spam"}}} + evt.Sign(sk4) + for i := 0; i < b.N; i++ { + db.SaveEvent(ctx, evt) + } + }) +} diff --git a/eventstore/test/db_test.go b/eventstore/test/db_test.go new file mode 100644 index 0000000..efbc311 --- /dev/null +++ b/eventstore/test/db_test.go @@ -0,0 +1,77 @@ +package test + +import ( + "context" + "os" + "testing" + + embeddedpostgres "github.com/fergusstrange/embedded-postgres" + "github.com/fiatjaf/eventstore" + "github.com/fiatjaf/eventstore/badger" + "github.com/fiatjaf/eventstore/lmdb" + "github.com/fiatjaf/eventstore/postgresql" + "github.com/fiatjaf/eventstore/slicestore" + "github.com/fiatjaf/eventstore/sqlite3" +) + +const ( + dbpath = "/tmp/eventstore-test" + sk3 = "0000000000000000000000000000000000000000000000000000000000000003" + sk4 = "0000000000000000000000000000000000000000000000000000000000000004" +) + +var ctx = context.Background() + +var tests = []struct { + name string + run func(*testing.T, eventstore.Store) +}{ + {"first", runFirstTestOn}, + {"second", runSecondTestOn}, + {"manyauthors", manyAuthorsTest}, + {"unbalanced", unbalancedTest}, +} + +func TestSliceStore(t *testing.T) { + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { test.run(t, &slicestore.SliceStore{}) }) + } +} + +func TestLMDB(t *testing.T) { + for _, test := range tests { + os.RemoveAll(dbpath + "lmdb") + t.Run(test.name, func(t *testing.T) { test.run(t, &lmdb.LMDBBackend{Path: dbpath + "lmdb"}) }) + } +} + +func TestBadger(t *testing.T) { + for _, test := range tests { + os.RemoveAll(dbpath + "badger") + t.Run(test.name, func(t *testing.T) { test.run(t, &badger.BadgerBackend{Path: dbpath + "badger"}) }) + } +} + +func TestSQLite(t *testing.T) { + for _, test := range tests { + os.RemoveAll(dbpath + "sqlite") + t.Run(test.name, func(t *testing.T) { + test.run(t, &sqlite3.SQLite3Backend{DatabaseURL: dbpath + "sqlite", QueryLimit: 1000, QueryTagsLimit: 50, QueryAuthorsLimit: 2000}) + }) + } +} + +func TestPostgres(t *testing.T) { + for _, test := range tests { + postgres := embeddedpostgres.NewDatabase() + err := postgres.Start() + if err != nil { + t.Fatalf("failed to start embedded postgres: %s", err) + return + } + t.Run(test.name, func(t *testing.T) { + test.run(t, &postgresql.PostgresBackend{DatabaseURL: "postgres://postgres:postgres@localhost:5432/postgres?sslmode=disable", QueryLimit: 1000, QueryTagsLimit: 50, QueryAuthorsLimit: 2000}) + }) + postgres.Stop() + } +} diff --git a/eventstore/test/first_test.go b/eventstore/test/first_test.go new file mode 100644 index 0000000..e34f13f --- /dev/null +++ b/eventstore/test/first_test.go @@ -0,0 +1,248 @@ +package test + +import ( + "fmt" + "slices" + "strconv" + "strings" + "testing" + + "github.com/fiatjaf/eventstore" + "github.com/nbd-wtf/go-nostr" + "github.com/stretchr/testify/require" +) + +func runFirstTestOn(t *testing.T, db eventstore.Store) { + err := db.Init() + require.NoError(t, err) + + allEvents := make([]*nostr.Event, 0, 10) + + // insert + for i := 0; i < 10; i++ { + evt := &nostr.Event{ + CreatedAt: nostr.Timestamp(i*10 + 2), + Content: fmt.Sprintf("hello %d", i), + Tags: nostr.Tags{ + {"t", fmt.Sprintf("%d", i)}, + {"e", "0" + strconv.Itoa(i) + strings.Repeat("0", 62)}, + }, + Kind: 1, + } + sk := sk3 + if i%3 == 0 { + sk = sk4 + } + if i%2 == 0 { + evt.Kind = 9 + } + evt.Sign(sk) + allEvents = append(allEvents, evt) + err = db.SaveEvent(ctx, evt) + require.NoError(t, err) + } + + // query + w := eventstore.RelayWrapper{Store: db} + { + results, err := w.QuerySync(ctx, nostr.Filter{}) + require.NoError(t, err) + require.Len(t, results, len(allEvents)) + require.ElementsMatch(t, + allEvents, + results, + "open-ended query results error") + } + + { + for i := 0; i < 10; i++ { + since := nostr.Timestamp(i*10 + 1) + results, err := w.QuerySync(ctx, nostr.Filter{Since: &since}) + require.NoError(t, err) + require.ElementsMatch(t, + allEvents[i:], + results, + "since query results error %d", i) + } + } + + { + results, err := w.QuerySync(ctx, nostr.Filter{IDs: []string{allEvents[7].ID, allEvents[9].ID}}) + require.NoError(t, err) + require.Len(t, results, 2) + require.ElementsMatch(t, + []*nostr.Event{allEvents[7], allEvents[9]}, + results, + "id query error") + } + + { + results, err := w.QuerySync(ctx, nostr.Filter{Kinds: []int{1}}) + require.NoError(t, err) + require.ElementsMatch(t, + []*nostr.Event{allEvents[1], allEvents[3], allEvents[5], allEvents[7], allEvents[9]}, + results, + "kind query error") + } + + { + results, err := w.QuerySync(ctx, nostr.Filter{Kinds: []int{9}}) + require.NoError(t, err) + require.ElementsMatch(t, + []*nostr.Event{allEvents[0], allEvents[2], allEvents[4], allEvents[6], allEvents[8]}, + results, + "second kind query error") + } + + { + pk4, _ := nostr.GetPublicKey(sk4) + results, err := w.QuerySync(ctx, nostr.Filter{Authors: []string{pk4}}) + require.NoError(t, err) + require.ElementsMatch(t, + []*nostr.Event{allEvents[0], allEvents[3], allEvents[6], allEvents[9]}, + results, + "pubkey query error") + } + + { + pk3, _ := nostr.GetPublicKey(sk3) + results, err := w.QuerySync(ctx, nostr.Filter{Kinds: []int{9}, Authors: []string{pk3}}) + require.NoError(t, err) + require.ElementsMatch(t, + []*nostr.Event{allEvents[2], allEvents[4], allEvents[8]}, + results, + "pubkey kind query error") + } + + { + pk3, _ := nostr.GetPublicKey(sk3) + pk4, _ := nostr.GetPublicKey(sk4) + results, err := w.QuerySync(ctx, nostr.Filter{Kinds: []int{9, 5, 7}, Authors: []string{pk3, pk4, pk4[1:] + "a"}}) + require.NoError(t, err) + require.ElementsMatch(t, + []*nostr.Event{allEvents[0], allEvents[2], allEvents[4], allEvents[6], allEvents[8]}, + results, + "2 pubkeys and kind query error") + } + + { + results, err := w.QuerySync(ctx, nostr.Filter{Tags: nostr.TagMap{"t": []string{"2", "4", "6"}}}) + require.NoError(t, err) + require.ElementsMatch(t, + []*nostr.Event{allEvents[2], allEvents[4], allEvents[6]}, + results, + "tag query error") + } + + // delete + require.NoError(t, db.DeleteEvent(ctx, allEvents[4]), "delete 1 error") + require.NoError(t, db.DeleteEvent(ctx, allEvents[5]), "delete 2 error") + + // query again + { + results, err := w.QuerySync(ctx, nostr.Filter{}) + require.NoError(t, err) + require.ElementsMatch(t, + slices.Concat(allEvents[0:4], allEvents[6:]), + results, + "second open-ended query error") + } + + { + results, err := w.QuerySync(ctx, nostr.Filter{Tags: nostr.TagMap{"t": []string{"2", "6"}}}) + require.NoError(t, err) + require.ElementsMatch(t, + []*nostr.Event{allEvents[2], allEvents[6]}, + results, + "second tag query error") + } + + { + results, err := w.QuerySync(ctx, nostr.Filter{Tags: nostr.TagMap{"e": []string{allEvents[3].Tags[1][1]}}}) + require.NoError(t, err) + require.ElementsMatch(t, + []*nostr.Event{allEvents[3]}, + results, + "'e' tag query error") + } + + { + for i := 0; i < 4; i++ { + until := nostr.Timestamp(i*10 + 1) + results, err := w.QuerySync(ctx, nostr.Filter{Until: &until}) + require.NoError(t, err) + + require.ElementsMatch(t, + allEvents[:i], + results, + "until query results error %d", i) + } + } + + // test p-tag querying + { + p := "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee" + p2 := "2eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee" + + newEvents := []*nostr.Event{ + {Tags: nostr.Tags{nostr.Tag{"p", p}}, Kind: 1984, CreatedAt: nostr.Timestamp(100), Content: "first"}, + {Tags: nostr.Tags{nostr.Tag{"p", p}, nostr.Tag{"t", "x"}}, Kind: 1984, CreatedAt: nostr.Timestamp(101), Content: "middle"}, + {Tags: nostr.Tags{nostr.Tag{"p", p}}, Kind: 1984, CreatedAt: nostr.Timestamp(102), Content: "last"}, + {Tags: nostr.Tags{nostr.Tag{"p", p}}, Kind: 1111, CreatedAt: nostr.Timestamp(101), Content: "bulufas"}, + {Tags: nostr.Tags{nostr.Tag{"p", p}}, Kind: 1111, CreatedAt: nostr.Timestamp(102), Content: "safulub"}, + {Tags: nostr.Tags{nostr.Tag{"p", p}}, Kind: 1, CreatedAt: nostr.Timestamp(103), Content: "bololo"}, + {Tags: nostr.Tags{nostr.Tag{"p", p2}}, Kind: 1, CreatedAt: nostr.Timestamp(104), Content: "wololo"}, + {Tags: nostr.Tags{nostr.Tag{"p", p}, nostr.Tag{"p", p2}}, Kind: 1, CreatedAt: nostr.Timestamp(104), Content: "trololo"}, + } + + sk := nostr.GeneratePrivateKey() + for _, newEvent := range newEvents { + newEvent.Sign(sk) + require.NoError(t, db.SaveEvent(ctx, newEvent)) + } + + { + results, err := w.QuerySync(ctx, nostr.Filter{ + Tags: nostr.TagMap{"p": []string{p}}, + Kinds: []int{1984}, + Limit: 2, + }) + require.NoError(t, err) + require.ElementsMatch(t, + []*nostr.Event{newEvents[2], newEvents[1]}, + results, + "'p' tag 1 query error") + } + + { + results, err := w.QuerySync(ctx, nostr.Filter{ + Tags: nostr.TagMap{"p": []string{p}, "t": []string{"x"}}, + Limit: 4, + }) + require.NoError(t, err) + require.ElementsMatch(t, + // the results won't be in canonical time order because this query is too awful, needs a kind + []*nostr.Event{newEvents[1]}, + results, + "'p' tag 2 query error") + } + + { + results, err := w.QuerySync(ctx, nostr.Filter{ + Tags: nostr.TagMap{"p": []string{p, p2}}, + Kinds: []int{1}, + Limit: 4, + }) + require.NoError(t, err) + + for _, idx := range []int{5, 6, 7} { + require.True(t, + slices.ContainsFunc( + results, + func(evt *nostr.Event) bool { return evt.ID == newEvents[idx].ID }, + ), + "'p' tag 3 query error") + } + } + } +} diff --git a/eventstore/test/manyauthors_test.go b/eventstore/test/manyauthors_test.go new file mode 100644 index 0000000..86b93e6 --- /dev/null +++ b/eventstore/test/manyauthors_test.go @@ -0,0 +1,68 @@ +package test + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "slices" + "testing" + + "github.com/fiatjaf/eventstore" + "github.com/nbd-wtf/go-nostr" + "github.com/stretchr/testify/require" +) + +func manyAuthorsTest(t *testing.T, db eventstore.Store) { + db.Init() + + const total = 10000 + const limit = 500 + const authors = 1700 + kinds := []int{6, 7, 8} + + bigfilter := nostr.Filter{ + Authors: make([]string, authors), + Kinds: kinds, + Limit: limit, + } + for i := 0; i < authors; i++ { + sk := make([]byte, 32) + binary.BigEndian.PutUint32(sk, uint32(i%(total/5))+1) + pk, _ := nostr.GetPublicKey(hex.EncodeToString(sk)) + bigfilter.Authors[i] = pk + } + + ordered := make([]*nostr.Event, 0, total) + for i := 0; i < total; i++ { + sk := make([]byte, 32) + binary.BigEndian.PutUint32(sk, uint32(i%(total/5))+1) + + evt := &nostr.Event{ + CreatedAt: nostr.Timestamp(i*i) / 4, + Content: fmt.Sprintf("lots of stuff %d", i), + Tags: nostr.Tags{}, + Kind: i % 10, + } + err := evt.Sign(hex.EncodeToString(sk)) + require.NoError(t, err) + + err = db.SaveEvent(ctx, evt) + require.NoError(t, err) + + if bigfilter.Matches(evt) { + ordered = append(ordered, evt) + } + } + + w := eventstore.RelayWrapper{Store: db} + + res, err := w.QuerySync(ctx, bigfilter) + + require.NoError(t, err) + require.Len(t, res, limit) + require.True(t, slices.IsSortedFunc(res, nostr.CompareEventPtrReverse)) + slices.SortFunc(ordered, nostr.CompareEventPtrReverse) + require.Equal(t, ordered[0], res[0]) + require.Equal(t, ordered[limit-1], res[limit-1]) + require.Equal(t, ordered[0:limit], res) +} diff --git a/eventstore/test/relaywrapper_test.go b/eventstore/test/relaywrapper_test.go new file mode 100644 index 0000000..b9ec8fa --- /dev/null +++ b/eventstore/test/relaywrapper_test.go @@ -0,0 +1,49 @@ +package test + +import ( + "context" + "testing" + "time" + + "github.com/fiatjaf/eventstore" + "github.com/fiatjaf/eventstore/slicestore" + "github.com/nbd-wtf/go-nostr" + "github.com/stretchr/testify/require" +) + +var sk = "486d5f6d4891f4ce3cd5f4d6b62d184ec8ea10db455830ab7918ca43d4d7ad24" + +func TestRelayWrapper(t *testing.T) { + ctx := context.Background() + + s := &slicestore.SliceStore{} + s.Init() + defer s.Close() + + w := eventstore.RelayWrapper{Store: s} + + evt1 := nostr.Event{ + Kind: 3, + CreatedAt: 0, + Tags: nostr.Tags{}, + Content: "first", + } + evt1.Sign(sk) + + evt2 := nostr.Event{ + Kind: 3, + CreatedAt: 1, + Tags: nostr.Tags{}, + Content: "second", + } + evt2.Sign(sk) + + for range 200 { + go w.Publish(ctx, evt1) + go w.Publish(ctx, evt1) + } + time.Sleep(time.Millisecond * 200) + + evts, _ := w.QuerySync(ctx, nostr.Filter{Kinds: []int{3}}) + require.Len(t, evts, 1) +} diff --git a/eventstore/test/second_test.go b/eventstore/test/second_test.go new file mode 100644 index 0000000..59fcf51 --- /dev/null +++ b/eventstore/test/second_test.go @@ -0,0 +1,82 @@ +package test + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "testing" + + "github.com/fiatjaf/eventstore" + "github.com/nbd-wtf/go-nostr" + "github.com/stretchr/testify/require" +) + +func runSecondTestOn(t *testing.T, db eventstore.Store) { + db.Init() + + for i := 0; i < 10000; i++ { + eTag := make([]byte, 32) + binary.BigEndian.PutUint16(eTag, uint16(i)) + + ref, _ := nostr.GetPublicKey(sk3) + if i%3 == 0 { + ref, _ = nostr.GetPublicKey(sk4) + } + + evt := &nostr.Event{ + CreatedAt: nostr.Timestamp(i*10 + 2), + Content: fmt.Sprintf("hello %d", i), + Tags: nostr.Tags{ + {"t", fmt.Sprintf("t%d", i)}, + {"e", hex.EncodeToString(eTag)}, + {"p", ref}, + }, + Kind: i % 10, + } + sk := sk3 + if i%3 == 0 { + sk = sk4 + } + evt.Sign(sk) + err := db.SaveEvent(ctx, evt) + require.NoError(t, err) + } + + w := eventstore.RelayWrapper{Store: db} + pk3, _ := nostr.GetPublicKey(sk3) + pk4, _ := nostr.GetPublicKey(sk4) + eTags := make([]string, 20) + for i := 0; i < 20; i++ { + eTag := make([]byte, 32) + binary.BigEndian.PutUint16(eTag, uint16(i)) + eTags[i] = hex.EncodeToString(eTag) + } + + filters := make([]nostr.Filter, 0, 10) + filters = append(filters, nostr.Filter{Kinds: []int{1, 4, 8, 16}}) + filters = append(filters, nostr.Filter{Authors: []string{pk3, nostr.GeneratePrivateKey()}}) + filters = append(filters, nostr.Filter{Authors: []string{pk3, nostr.GeneratePrivateKey()}, Kinds: []int{3, 4}}) + filters = append(filters, nostr.Filter{}) + filters = append(filters, nostr.Filter{Limit: 20}) + filters = append(filters, nostr.Filter{Kinds: []int{8, 9}, Tags: nostr.TagMap{"p": []string{pk3}}}) + filters = append(filters, nostr.Filter{Kinds: []int{8, 9}, Tags: nostr.TagMap{"p": []string{pk3, pk4}}}) + filters = append(filters, nostr.Filter{Kinds: []int{8, 9}, Tags: nostr.TagMap{"p": []string{pk3, pk4}}}) + filters = append(filters, nostr.Filter{Kinds: []int{9}, Tags: nostr.TagMap{"e": eTags}}) + filters = append(filters, nostr.Filter{Kinds: []int{5}, Tags: nostr.TagMap{"e": eTags, "t": []string{"t5"}}}) + filters = append(filters, nostr.Filter{Tags: nostr.TagMap{"e": eTags}}) + filters = append(filters, nostr.Filter{Tags: nostr.TagMap{"e": eTags}, Limit: 50}) + + t.Run("filter", func(t *testing.T) { + for q, filter := range filters { + q := q + filter := filter + label := fmt.Sprintf("filter %d: %s", q, filter) + + t.Run(fmt.Sprintf("q-%d", q), func(t *testing.T) { + results, err := w.QuerySync(ctx, filter) + require.NoError(t, err, filter) + require.NotEmpty(t, results, label) + }) + } + }) +} diff --git a/eventstore/test/test_helpers.go b/eventstore/test/test_helpers.go new file mode 100644 index 0000000..bfb2c92 --- /dev/null +++ b/eventstore/test/test_helpers.go @@ -0,0 +1,13 @@ +package test + +import ( + "github.com/nbd-wtf/go-nostr" +) + +func getTimestamps(events []*nostr.Event) []nostr.Timestamp { + res := make([]nostr.Timestamp, len(events)) + for i, evt := range events { + res[i] = evt.CreatedAt + } + return res +} diff --git a/eventstore/test/unbalanced_test.go b/eventstore/test/unbalanced_test.go new file mode 100644 index 0000000..164ef63 --- /dev/null +++ b/eventstore/test/unbalanced_test.go @@ -0,0 +1,82 @@ +package test + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "slices" + "testing" + + "github.com/fiatjaf/eventstore" + "github.com/nbd-wtf/go-nostr" + "github.com/stretchr/testify/require" +) + +// this is testing what happens when most results come from the same abstract query -- but not all +func unbalancedTest(t *testing.T, db eventstore.Store) { + db.Init() + + const total = 10000 + const limit = 160 + const authors = 1400 + + bigfilter := nostr.Filter{ + Authors: make([]string, authors), + Limit: limit, + } + for i := 0; i < authors; i++ { + sk := make([]byte, 32) + binary.BigEndian.PutUint32(sk, uint32(i%(authors*2))+1) + pk, _ := nostr.GetPublicKey(hex.EncodeToString(sk)) + bigfilter.Authors[i] = pk + } + // fmt.Println("filter", bigfilter) + + expected := make([]*nostr.Event, 0, total) + for i := 0; i < total; i++ { + skseed := uint32(i%(authors*2)) + 1 + sk := make([]byte, 32) + binary.BigEndian.PutUint32(sk, skseed) + + evt := &nostr.Event{ + CreatedAt: nostr.Timestamp(skseed)*1000 + nostr.Timestamp(i), + Content: fmt.Sprintf("unbalanced %d", i), + Tags: nostr.Tags{}, + Kind: 1, + } + err := evt.Sign(hex.EncodeToString(sk)) + require.NoError(t, err) + + err = db.SaveEvent(ctx, evt) + require.NoError(t, err) + + if bigfilter.Matches(evt) { + expected = append(expected, evt) + } + } + + slices.SortFunc(expected, nostr.CompareEventPtrReverse) + if len(expected) > limit { + expected = expected[0:limit] + } + require.Len(t, expected, limit) + + w := eventstore.RelayWrapper{Store: db} + + res, err := w.QuerySync(ctx, bigfilter) + + require.NoError(t, err) + require.Equal(t, limit, len(res)) + require.True(t, slices.IsSortedFunc(res, nostr.CompareEventPtrReverse)) + require.Equal(t, expected[0], res[0]) + + // fmt.Println(" expected result") + // ets := getTimestamps(expected) + // rts := getTimestamps(res) + // for i := range ets { + // fmt.Println(" ", ets[i], " ", rts[i], " ", i) + // } + + require.Equal(t, expected[limit-1], res[limit-1]) + require.Equal(t, expected[0:limit], res) +} diff --git a/eventstore/wrappers/count/count.go b/eventstore/wrappers/count/count.go new file mode 100644 index 0000000..d4ffac3 --- /dev/null +++ b/eventstore/wrappers/count/count.go @@ -0,0 +1,34 @@ +package count + +import ( + "context" + + "github.com/fiatjaf/eventstore" + "github.com/nbd-wtf/go-nostr" +) + +type Wrapper struct { + eventstore.Store +} + +var _ eventstore.Store = (*Wrapper)(nil) + +func (w Wrapper) CountEvents(ctx context.Context, filter nostr.Filter) (int64, error) { + if counter, ok := w.Store.(eventstore.Counter); ok { + return counter.CountEvents(ctx, filter) + } + + ch, err := w.Store.QueryEvents(ctx, filter) + if err != nil { + return 0, err + } + if ch == nil { + return 0, nil + } + + var count int64 + for range ch { + count++ + } + return count, nil +} diff --git a/eventstore/wrappers/disablesearch/disablesearch.go b/eventstore/wrappers/disablesearch/disablesearch.go new file mode 100644 index 0000000..c93b9fc --- /dev/null +++ b/eventstore/wrappers/disablesearch/disablesearch.go @@ -0,0 +1,21 @@ +package disablesearch + +import ( + "context" + + "github.com/fiatjaf/eventstore" + "github.com/nbd-wtf/go-nostr" +) + +type Wrapper struct { + eventstore.Store +} + +var _ eventstore.Store = (*Wrapper)(nil) + +func (w Wrapper) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) { + if filter.Search != "" { + return nil, nil + } + return w.Store.QueryEvents(ctx, filter) +} diff --git a/eventstore/wrappers/skipevent/skipevent.go b/eventstore/wrappers/skipevent/skipevent.go new file mode 100644 index 0000000..e2c8efe --- /dev/null +++ b/eventstore/wrappers/skipevent/skipevent.go @@ -0,0 +1,24 @@ +package skipevent + +import ( + "context" + + "github.com/fiatjaf/eventstore" + "github.com/nbd-wtf/go-nostr" +) + +type Wrapper struct { + eventstore.Store + + Skip func(ctx context.Context, evt *nostr.Event) bool +} + +var _ eventstore.Store = (*Wrapper)(nil) + +func (w Wrapper) SaveEvent(ctx context.Context, evt *nostr.Event) error { + if w.Skip(ctx, evt) { + return nil + } + + return w.Store.SaveEvent(ctx, evt) +} diff --git a/khatru/.gitignore b/khatru/.gitignore new file mode 100644 index 0000000..46b14e8 --- /dev/null +++ b/khatru/.gitignore @@ -0,0 +1,3 @@ +*.env +.idea/ +knowledge.md diff --git a/khatru/README.md b/khatru/README.md new file mode 100644 index 0000000..8549e5b --- /dev/null +++ b/khatru/README.md @@ -0,0 +1,141 @@ +# khatru, a relay framework [![docs badge](https://img.shields.io/badge/docs-reference-blue)](https://pkg.go.dev/github.com/fiatjaf/khatru#Relay) + +[![Run Tests](https://github.com/fiatjaf/khatru/actions/workflows/test.yml/badge.svg)](https://github.com/fiatjaf/khatru/actions/workflows/test.yml) +[![Go Reference](https://pkg.go.dev/badge/github.com/fiatjaf/khatru.svg)](https://pkg.go.dev/github.com/fiatjaf/khatru) +[![Go Report Card](https://goreportcard.com/badge/github.com/fiatjaf/khatru)](https://goreportcard.com/report/github.com/fiatjaf/khatru) + +Khatru makes it easy to write very very custom relays: + + - custom event or filter acceptance policies + - custom `AUTH` handlers + - custom storage and pluggable databases + - custom webpages and other HTTP handlers + +Here's a sample: + +```go +package main + +import ( + "context" + "fmt" + "log" + "net/http" + + "github.com/fiatjaf/khatru" + "github.com/nbd-wtf/go-nostr" +) + +func main() { + // create the relay instance + relay := khatru.NewRelay() + + // set up some basic properties (will be returned on the NIP-11 endpoint) + relay.Info.Name = "my relay" + relay.Info.PubKey = "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798" + relay.Info.Description = "this is my custom relay" + relay.Info.Icon = "https://external-content.duckduckgo.com/iu/?u=https%3A%2F%2Fliquipedia.net%2Fcommons%2Fimages%2F3%2F35%2FSCProbe.jpg&f=1&nofb=1&ipt=0cbbfef25bce41da63d910e86c3c343e6c3b9d63194ca9755351bb7c2efa3359&ipo=images" + + // you must bring your own storage scheme -- if you want to have any + store := make(map[string]*nostr.Event, 120) + + // set up the basic relay functions + relay.StoreEvent = append(relay.StoreEvent, + func(ctx context.Context, event *nostr.Event) error { + store[event.ID] = event + return nil + }, + ) + relay.QueryEvents = append(relay.QueryEvents, + func(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) { + ch := make(chan *nostr.Event) + go func() { + for _, evt := range store { + if filter.Matches(evt) { + ch <- evt + } + } + close(ch) + }() + return ch, nil + }, + ) + relay.DeleteEvent = append(relay.DeleteEvent, + func(ctx context.Context, event *nostr.Event) error { + delete(store, event.ID) + return nil + }, + ) + + // there are many other configurable things you can set + relay.RejectEvent = append(relay.RejectEvent, + // built-in policies + policies.ValidateKind, + + // define your own policies + policies.PreventLargeTags(100), + func(ctx context.Context, event *nostr.Event) (reject bool, msg string) { + if event.PubKey == "fa984bd7dbb282f07e16e7ae87b26a2a7b9b90b7246a44771f0cf5ae58018f52" { + return true, "we don't allow this person to write here" + } + return false, "" // anyone else can + }, + ) + + // you can request auth by rejecting an event or a request with the prefix "auth-required: " + relay.RejectFilter = append(relay.RejectFilter, + // built-in policies + policies.NoComplexFilters, + + // define your own policies + func(ctx context.Context, filter nostr.Filter) (reject bool, msg string) { + if pubkey := khatru.GetAuthed(ctx); pubkey != "" { + log.Printf("request from %s\n", pubkey) + return false, "" + } + return true, "auth-required: only authenticated users can read from this relay" + // (this will cause an AUTH message to be sent and then a CLOSED message such that clients can + // authenticate and then request again) + }, + ) + // check the docs for more goodies! + + mux := relay.Router() + // set up other http handlers + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("content-type", "text/html") + fmt.Fprintf(w, `welcome to my relay!`) + }) + + // start the server + fmt.Println("running on :3334") + http.ListenAndServe(":3334", relay) +} +``` + +### But I don't want to write my own database! + +Fear no more. Using the https://github.com/fiatjaf/eventstore module you get a bunch of compatible databases out of the box and you can just plug them into your relay. For example, [sqlite](https://pkg.go.dev/github.com/fiatjaf/eventstore/sqlite3): + +```go + db := sqlite3.SQLite3Backend{DatabaseURL: "/tmp/khatru-sqlite-tmp"} + if err := db.Init(); err != nil { + panic(err) + } + + relay.StoreEvent = append(relay.StoreEvent, db.SaveEvent) + relay.QueryEvents = append(relay.QueryEvents, db.QueryEvents) + relay.CountEvents = append(relay.CountEvents, db.CountEvents) + relay.DeleteEvent = append(relay.DeleteEvent, db.DeleteEvent) + relay.ReplaceEvent = append(relay.ReplaceEvent, db.ReplaceEvent) +``` + +### But I don't want to write a bunch of custom policies! + +Fear no more. We have a bunch of common policies written in the `github.com/fiatjaf/khatru/policies` package and also a handpicked selection of base sane defaults, which you can apply with: + +```go + policies.ApplySaneDefaults(relay) +``` + +Contributions to this are very much welcomed. diff --git a/khatru/adding.go b/khatru/adding.go new file mode 100644 index 0000000..40ffb42 --- /dev/null +++ b/khatru/adding.go @@ -0,0 +1,113 @@ +package khatru + +import ( + "context" + "errors" + "fmt" + + "github.com/fiatjaf/eventstore" + "github.com/nbd-wtf/go-nostr" +) + +// AddEvent sends an event through then normal add pipeline, as if it was received from a websocket. +func (rl *Relay) AddEvent(ctx context.Context, evt *nostr.Event) (skipBroadcast bool, writeError error) { + if evt == nil { + return false, errors.New("error: event is nil") + } + + if nostr.IsEphemeralKind(evt.Kind) { + return false, rl.handleEphemeral(ctx, evt) + } else { + return rl.handleNormal(ctx, evt) + } +} + +func (rl *Relay) handleNormal(ctx context.Context, evt *nostr.Event) (skipBroadcast bool, writeError error) { + for _, reject := range rl.RejectEvent { + if reject, msg := reject(ctx, evt); reject { + if msg == "" { + return true, errors.New("blocked: no reason") + } else { + return true, errors.New(nostr.NormalizeOKMessage(msg, "blocked")) + } + } + } + + // will store + // regular kinds are just saved directly + if nostr.IsRegularKind(evt.Kind) { + for _, store := range rl.StoreEvent { + if err := store(ctx, evt); err != nil { + switch err { + case eventstore.ErrDupEvent: + return true, nil + default: + return false, fmt.Errorf("%s", nostr.NormalizeOKMessage(err.Error(), "error")) + } + } + } + } else { + // otherwise it's a replaceable -- so we'll use the replacer functions if we have any + if len(rl.ReplaceEvent) > 0 { + for _, repl := range rl.ReplaceEvent { + if err := repl(ctx, evt); err != nil { + switch err { + case eventstore.ErrDupEvent: + return true, nil + default: + return false, fmt.Errorf("%s", nostr.NormalizeOKMessage(err.Error(), "error")) + } + } + } + } else { + // otherwise do it the manual way + filter := nostr.Filter{Limit: 1, Kinds: []int{evt.Kind}, Authors: []string{evt.PubKey}} + if nostr.IsAddressableKind(evt.Kind) { + // when addressable, add the "d" tag to the filter + filter.Tags = nostr.TagMap{"d": []string{evt.Tags.GetD()}} + } + + // now we fetch old events and delete them + shouldStore := true + for _, query := range rl.QueryEvents { + ch, err := query(ctx, filter) + if err != nil { + continue + } + for previous := range ch { + if isOlder(previous, evt) { + for _, del := range rl.DeleteEvent { + del(ctx, previous) + } + } else { + // we found a more recent event, so we won't delete it and also will not store this new one + shouldStore = false + } + } + } + + // store + if shouldStore { + for _, store := range rl.StoreEvent { + if saveErr := store(ctx, evt); saveErr != nil { + switch saveErr { + case eventstore.ErrDupEvent: + return true, nil + default: + return false, fmt.Errorf("%s", nostr.NormalizeOKMessage(saveErr.Error(), "error")) + } + } + } + } + } + } + + for _, ons := range rl.OnEventSaved { + ons(ctx, evt) + } + + // track event expiration if applicable + rl.expirationManager.trackEvent(evt) + + return false, nil +} diff --git a/khatru/blossom/authorization.go b/khatru/blossom/authorization.go new file mode 100644 index 0000000..0f259da --- /dev/null +++ b/khatru/blossom/authorization.go @@ -0,0 +1,45 @@ +package blossom + +import ( + "encoding/base64" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/mailru/easyjson" + "github.com/nbd-wtf/go-nostr" +) + +func readAuthorization(r *http.Request) (*nostr.Event, error) { + token := r.Header.Get("Authorization") + if !strings.HasPrefix(token, "Nostr ") { + return nil, nil + } + + eventj, err := base64.StdEncoding.DecodeString(token[6:]) + if err != nil { + return nil, fmt.Errorf("invalid base64 token") + } + var evt nostr.Event + if err := easyjson.Unmarshal(eventj, &evt); err != nil { + return nil, fmt.Errorf("broken event") + } + if evt.Kind != 24242 || !evt.CheckID() { + return nil, fmt.Errorf("invalid event") + } + if ok, _ := evt.CheckSignature(); !ok { + return nil, fmt.Errorf("invalid signature") + } + + expirationTag := evt.Tags.Find("expiration") + if expirationTag == nil { + return nil, fmt.Errorf("missing \"expiration\" tag") + } + expiration, _ := strconv.ParseInt(expirationTag[1], 10, 64) + if nostr.Timestamp(expiration) < nostr.Now() { + return nil, fmt.Errorf("event expired") + } + + return &evt, nil +} diff --git a/khatru/blossom/blob.go b/khatru/blossom/blob.go new file mode 100644 index 0000000..d17dd7b --- /dev/null +++ b/khatru/blossom/blob.go @@ -0,0 +1,26 @@ +package blossom + +import ( + "context" + + "github.com/nbd-wtf/go-nostr" +) + +type BlobDescriptor struct { + URL string `json:"url"` + SHA256 string `json:"sha256"` + Size int `json:"size"` + Type string `json:"type"` + Uploaded nostr.Timestamp `json:"uploaded"` + + Owner string `json:"-"` +} + +type BlobIndex interface { + Keep(ctx context.Context, blob BlobDescriptor, pubkey string) error + List(ctx context.Context, pubkey string) (chan BlobDescriptor, error) + Get(ctx context.Context, sha256 string) (*BlobDescriptor, error) + Delete(ctx context.Context, sha256 string, pubkey string) error +} + +var _ BlobIndex = (*EventStoreBlobIndexWrapper)(nil) diff --git a/khatru/blossom/eventstorewrapper.go b/khatru/blossom/eventstorewrapper.go new file mode 100644 index 0000000..d00d761 --- /dev/null +++ b/khatru/blossom/eventstorewrapper.go @@ -0,0 +1,104 @@ +package blossom + +import ( + "context" + "strconv" + + "github.com/fiatjaf/eventstore" + "github.com/nbd-wtf/go-nostr" +) + +// EventStoreBlobIndexWrapper uses fake events to keep track of what blobs we have stored and who owns them +type EventStoreBlobIndexWrapper struct { + eventstore.Store + + ServiceURL string +} + +func (es EventStoreBlobIndexWrapper) Keep(ctx context.Context, blob BlobDescriptor, pubkey string) error { + ch, err := es.Store.QueryEvents(ctx, nostr.Filter{Authors: []string{pubkey}, Kinds: []int{24242}, Tags: nostr.TagMap{"x": []string{blob.SHA256}}}) + if err != nil { + return err + } + + if <-ch == nil { + // doesn't exist, save + evt := &nostr.Event{ + PubKey: pubkey, + Kind: 24242, + Tags: nostr.Tags{ + {"x", blob.SHA256}, + {"type", blob.Type}, + {"size", strconv.Itoa(blob.Size)}, + }, + CreatedAt: blob.Uploaded, + } + evt.ID = evt.GetID() + es.Store.SaveEvent(ctx, evt) + } + + return nil +} + +func (es EventStoreBlobIndexWrapper) List(ctx context.Context, pubkey string) (chan BlobDescriptor, error) { + ech, err := es.Store.QueryEvents(ctx, nostr.Filter{Authors: []string{pubkey}, Kinds: []int{24242}}) + if err != nil { + return nil, err + } + + ch := make(chan BlobDescriptor) + + go func() { + for evt := range ech { + ch <- es.parseEvent(evt) + } + close(ch) + }() + + return ch, nil +} + +func (es EventStoreBlobIndexWrapper) Get(ctx context.Context, sha256 string) (*BlobDescriptor, error) { + ech, err := es.Store.QueryEvents(ctx, nostr.Filter{Tags: nostr.TagMap{"x": []string{sha256}}, Kinds: []int{24242}, Limit: 1}) + if err != nil { + return nil, err + } + + evt := <-ech + if evt != nil { + bd := es.parseEvent(evt) + return &bd, nil + } + + return nil, nil +} + +func (es EventStoreBlobIndexWrapper) Delete(ctx context.Context, sha256 string, pubkey string) error { + ech, err := es.Store.QueryEvents(ctx, nostr.Filter{Authors: []string{pubkey}, Tags: nostr.TagMap{"x": []string{sha256}}, Kinds: []int{24242}, Limit: 1}) + if err != nil { + return err + } + + evt := <-ech + if evt != nil { + return es.Store.DeleteEvent(ctx, evt) + } + + return nil +} + +func (es EventStoreBlobIndexWrapper) parseEvent(evt *nostr.Event) BlobDescriptor { + hhash := evt.Tags[0][1] + mimetype := evt.Tags[1][1] + ext := getExtension(mimetype) + size, _ := strconv.Atoi(evt.Tags[2][1]) + + return BlobDescriptor{ + Owner: evt.PubKey, + Uploaded: evt.CreatedAt, + URL: es.ServiceURL + "/" + hhash + ext, + SHA256: hhash, + Type: mimetype, + Size: size, + } +} diff --git a/khatru/blossom/handlers.go b/khatru/blossom/handlers.go new file mode 100644 index 0000000..5ebdc7c --- /dev/null +++ b/khatru/blossom/handlers.go @@ -0,0 +1,367 @@ +package blossom + +import ( + "crypto/sha256" + "encoding/hex" + "encoding/json" + "io" + "mime" + "net/http" + "strconv" + "strings" + "time" + + "github.com/liamg/magic" + "github.com/nbd-wtf/go-nostr" +) + +func (bs BlossomServer) handleUploadCheck(w http.ResponseWriter, r *http.Request) { + auth, err := readAuthorization(r) + if err != nil { + blossomError(w, err.Error(), 400) + return + } + if auth == nil { + blossomError(w, "missing \"Authorization\" header", 401) + return + } + if auth.Tags.FindWithValue("t", "upload") == nil { + blossomError(w, "invalid \"Authorization\" event \"t\" tag", 403) + return + } + + mimetype := r.Header.Get("X-Content-Type") + exts, _ := mime.ExtensionsByType(mimetype) + var ext string + if len(exts) > 0 { + ext = exts[0] + } + + // get the file size from the incoming header + size, _ := strconv.Atoi(r.Header.Get("X-Content-Length")) + + for _, rb := range bs.RejectUpload { + reject, reason, code := rb(r.Context(), auth, size, ext) + if reject { + blossomError(w, reason, code) + return + } + } +} + +func (bs BlossomServer) handleUpload(w http.ResponseWriter, r *http.Request) { + auth, err := readAuthorization(r) + if err != nil { + blossomError(w, "invalid \"Authorization\": "+err.Error(), 404) + return + } + if auth == nil { + blossomError(w, "missing \"Authorization\" header", 401) + return + } + if auth.Tags.FindWithValue("t", "upload") == nil { + blossomError(w, "invalid \"Authorization\" event \"t\" tag", 403) + return + } + + // get the file size from the incoming header + size, _ := strconv.Atoi(r.Header.Get("Content-Length")) + if size == 0 { + blossomError(w, "missing \"Content-Length\" header", 400) + return + } + + // read first bytes of upload so we can find out the filetype + b := make([]byte, min(50, size), size) + if n, err := r.Body.Read(b); err != nil && n != size { + blossomError(w, "failed to read initial bytes of upload body: "+err.Error(), 400) + return + } + var ext string + if ft, _ := magic.Lookup(b); ft != nil { + ext = "." + ft.Extension + } else { + // if we can't find, use the filetype given by the upload header + mimetype := r.Header.Get("Content-Type") + ext = getExtension(mimetype) + } + + // run the reject hooks + for _, ru := range bs.RejectUpload { + reject, reason, code := ru(r.Context(), auth, size, ext) + if reject { + blossomError(w, reason, code) + return + } + } + + // if it passes then we have to read the entire thing into memory so we can compute the sha256 + for { + var n int + n, err = r.Body.Read(b[len(b):cap(b)]) + b = b[:len(b)+n] + if err != nil { + if err == io.EOF { + err = nil + } + break + } + if len(b) == cap(b) { + // add more capacity (let append pick how much) + // if Content-Length was correct we shouldn't reach this + b = append(b, 0)[:len(b)] + } + } + if err != nil { + blossomError(w, "failed to read upload body: "+err.Error(), 400) + return + } + + hash := sha256.Sum256(b) + hhash := hex.EncodeToString(hash[:]) + + // keep track of the blob descriptor + bd := BlobDescriptor{ + URL: bs.ServiceURL + "/" + hhash + ext, + SHA256: hhash, + Size: len(b), + Type: mime.TypeByExtension(ext), + Uploaded: nostr.Now(), + } + if err := bs.Store.Keep(r.Context(), bd, auth.PubKey); err != nil { + blossomError(w, "failed to save event: "+err.Error(), 400) + return + } + + // save actual blob + for _, sb := range bs.StoreBlob { + if err := sb(r.Context(), hhash, b); err != nil { + blossomError(w, "failed to save: "+err.Error(), 500) + return + } + } + + // return response + json.NewEncoder(w).Encode(bd) +} + +func (bs BlossomServer) handleGetBlob(w http.ResponseWriter, r *http.Request) { + spl := strings.SplitN(r.URL.Path, ".", 2) + hhash := spl[0] + if len(hhash) != 65 { + blossomError(w, "invalid /[.ext] path", 400) + return + } + hhash = hhash[1:] + + // check for an authorization tag, if any + auth, err := readAuthorization(r) + if err != nil { + blossomError(w, err.Error(), 400) + return + } + + // if there is one, we check if it has the extra requirements + if auth != nil { + if auth.Tags.FindWithValue("t", "get") == nil { + blossomError(w, "invalid \"Authorization\" event \"t\" tag", 403) + return + } + + if auth.Tags.FindWithValue("x", hhash) == nil && + auth.Tags.FindWithValue("server", bs.ServiceURL) == nil { + blossomError(w, "invalid \"Authorization\" event \"x\" or \"server\" tag", 403) + return + } + } + + for _, rg := range bs.RejectGet { + reject, reason, code := rg(r.Context(), auth, hhash) + if reject { + blossomError(w, reason, code) + return + } + } + + var ext string + if len(spl) == 2 { + ext = "." + spl[1] + } + + for _, lb := range bs.LoadBlob { + reader, _ := lb(r.Context(), hhash) + if reader != nil { + // use unix epoch as the time if we can't find the descriptor + // as described in the http.ServeContent documentation + t := time.Unix(0, 0) + descriptor, err := bs.Store.Get(r.Context(), hhash) + if err == nil && descriptor != nil { + t = descriptor.Uploaded.Time() + } + w.Header().Set("ETag", hhash) + w.Header().Set("Cache-Control", "public, max-age=604800, immutable") + http.ServeContent(w, r, hhash+ext, t, reader) + return + } + } + + blossomError(w, "file not found", 404) +} + +func (bs BlossomServer) handleHasBlob(w http.ResponseWriter, r *http.Request) { + spl := strings.SplitN(r.URL.Path, ".", 2) + hhash := spl[0] + if len(hhash) != 65 { + blossomError(w, "invalid /[.ext] path", 400) + return + } + hhash = hhash[1:] + + bd, err := bs.Store.Get(r.Context(), hhash) + if err != nil { + blossomError(w, "failed to query: "+err.Error(), 500) + return + } + + if bd == nil { + blossomError(w, "file not found", 404) + return + } +} + +func (bs BlossomServer) handleList(w http.ResponseWriter, r *http.Request) { + // check for an authorization tag, if any + auth, err := readAuthorization(r) + if err != nil { + blossomError(w, err.Error(), 400) + return + } + + // if there is one, we check if it has the extra requirements + if auth != nil { + if auth.Tags.FindWithValue("t", "list") == nil { + blossomError(w, "invalid \"Authorization\" event \"t\" tag", 403) + return + } + } + + pubkey := r.URL.Path[6:] + + for _, rl := range bs.RejectList { + reject, reason, code := rl(r.Context(), auth, pubkey) + if reject { + blossomError(w, reason, code) + return + } + } + + ch, err := bs.Store.List(r.Context(), pubkey) + if err != nil { + blossomError(w, "failed to query: "+err.Error(), 500) + return + } + + w.Write([]byte{'['}) + enc := json.NewEncoder(w) + first := true + for bd := range ch { + if !first { + w.Write([]byte{','}) + } else { + first = false + } + enc.Encode(bd) + } + w.Write([]byte{']'}) +} + +func (bs BlossomServer) handleDelete(w http.ResponseWriter, r *http.Request) { + auth, err := readAuthorization(r) + if err != nil { + blossomError(w, err.Error(), 400) + return + } + + if auth != nil { + if auth.Tags.FindWithValue("t", "delete") == nil { + blossomError(w, "invalid \"Authorization\" event \"t\" tag", 403) + return + } + } + + spl := strings.SplitN(r.URL.Path, ".", 2) + hhash := spl[0] + if len(hhash) != 65 { + blossomError(w, "invalid /[.ext] path", 400) + return + } + hhash = hhash[1:] + if auth.Tags.FindWithValue("x", hhash) == nil && + auth.Tags.FindWithValue("server", bs.ServiceURL) == nil { + blossomError(w, "invalid \"Authorization\" event \"x\" or \"server\" tag", 403) + return + } + + // should we accept this delete? + for _, rd := range bs.RejectDelete { + reject, reason, code := rd(r.Context(), auth, hhash) + if reject { + blossomError(w, reason, code) + return + } + } + + // delete the entry that links this blob to this author + if err := bs.Store.Delete(r.Context(), hhash, auth.PubKey); err != nil { + blossomError(w, "delete of blob entry failed: "+err.Error(), 500) + return + } + + // we will actually only delete the file if no one else owns it + if bd, err := bs.Store.Get(r.Context(), hhash); err == nil && bd == nil { + for _, del := range bs.DeleteBlob { + if err := del(r.Context(), hhash); err != nil { + blossomError(w, "failed to delete blob: "+err.Error(), 500) + return + } + } + } +} + +func (bs BlossomServer) handleReport(w http.ResponseWriter, r *http.Request) { + var body []byte + _, err := r.Body.Read(body) + if err != nil { + blossomError(w, "can't read request body", 400) + return + } + + var evt *nostr.Event + if err := json.Unmarshal(body, evt); err != nil { + blossomError(w, "can't parse event", 400) + return + } + + if isValid, _ := evt.CheckSignature(); !isValid { + blossomError(w, "invalid report event is provided", 400) + return + } + + if evt.Kind != nostr.KindReporting { + blossomError(w, "invalid report event is provided", 400) + return + } + + for _, rr := range bs.ReceiveReport { + if err := rr(r.Context(), evt); err != nil { + blossomError(w, "failed to receive report: "+err.Error(), 500) + return + } + } +} + +func (bs BlossomServer) handleMirror(w http.ResponseWriter, r *http.Request) { +} + +func (bs BlossomServer) handleNegentropy(w http.ResponseWriter, r *http.Request) { +} diff --git a/khatru/blossom/server.go b/khatru/blossom/server.go new file mode 100644 index 0000000..4e1c7b4 --- /dev/null +++ b/khatru/blossom/server.go @@ -0,0 +1,78 @@ +package blossom + +import ( + "context" + "io" + "net/http" + "strings" + + "github.com/fiatjaf/khatru" + "github.com/nbd-wtf/go-nostr" +) + +type BlossomServer struct { + ServiceURL string + Store BlobIndex + + StoreBlob []func(ctx context.Context, sha256 string, body []byte) error + LoadBlob []func(ctx context.Context, sha256 string) (io.ReadSeeker, error) + DeleteBlob []func(ctx context.Context, sha256 string) error + ReceiveReport []func(ctx context.Context, reportEvt *nostr.Event) error + + RejectUpload []func(ctx context.Context, auth *nostr.Event, size int, ext string) (bool, string, int) + RejectGet []func(ctx context.Context, auth *nostr.Event, sha256 string) (bool, string, int) + RejectList []func(ctx context.Context, auth *nostr.Event, pubkey string) (bool, string, int) + RejectDelete []func(ctx context.Context, auth *nostr.Event, sha256 string) (bool, string, int) +} + +func New(rl *khatru.Relay, serviceURL string) *BlossomServer { + bs := &BlossomServer{ + ServiceURL: serviceURL, + } + + base := rl.Router() + mux := http.NewServeMux() + + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/upload" { + if r.Method == "PUT" { + bs.handleUpload(w, r) + return + } else if r.Method == "HEAD" { + bs.handleUploadCheck(w, r) + return + } + } + + if strings.HasPrefix(r.URL.Path, "/list/") && r.Method == "GET" { + bs.handleList(w, r) + return + } + + if (len(r.URL.Path) == 65 || strings.Index(r.URL.Path, ".") == 65) && strings.Index(r.URL.Path[1:], "/") == -1 { + if r.Method == "HEAD" { + bs.handleHasBlob(w, r) + return + } else if r.Method == "GET" { + bs.handleGetBlob(w, r) + return + } else if r.Method == "DELETE" { + bs.handleDelete(w, r) + return + } + } + + if r.URL.Path == "/report" { + if r.Method == "PUT" { + bs.handleReport(w, r) + return + } + } + + base.ServeHTTP(w, r) + }) + + rl.SetRouter(mux) + + return bs +} diff --git a/khatru/blossom/utils.go b/khatru/blossom/utils.go new file mode 100644 index 0000000..47d8ee5 --- /dev/null +++ b/khatru/blossom/utils.go @@ -0,0 +1,37 @@ +package blossom + +import ( + "mime" + "net/http" +) + +func blossomError(w http.ResponseWriter, msg string, code int) { + w.Header().Add("X-Reason", msg) + w.WriteHeader(code) +} + +func getExtension(mimetype string) string { + if mimetype == "" { + return "" + } + + switch mimetype { + case "image/jpeg": + return ".jpg" + case "image/gif": + return ".gif" + case "image/png": + return ".png" + case "image/webp": + return ".webp" + case "video/mp4": + return ".mp4" + } + + exts, _ := mime.ExtensionsByType(mimetype) + if len(exts) > 0 { + return exts[0] + } + + return "" +} diff --git a/khatru/broadcasting.go b/khatru/broadcasting.go new file mode 100644 index 0000000..8fb1d0b --- /dev/null +++ b/khatru/broadcasting.go @@ -0,0 +1,11 @@ +package khatru + +import ( + "github.com/nbd-wtf/go-nostr" +) + +// BroadcastEvent emits an event to all listeners whose filters' match, skipping all filters and actions +// it also doesn't attempt to store the event or trigger any reactions or callbacks +func (rl *Relay) BroadcastEvent(evt *nostr.Event) int { + return rl.notifyListeners(evt) +} diff --git a/khatru/deleting.go b/khatru/deleting.go new file mode 100644 index 0000000..1e066c4 --- /dev/null +++ b/khatru/deleting.go @@ -0,0 +1,85 @@ +package khatru + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/nbd-wtf/go-nostr" +) + +func (rl *Relay) handleDeleteRequest(ctx context.Context, evt *nostr.Event) error { + // event deletion -- nip09 + for _, tag := range evt.Tags { + if len(tag) >= 2 { + var f nostr.Filter + + switch tag[0] { + case "e": + f = nostr.Filter{IDs: []string{tag[1]}} + case "a": + spl := strings.Split(tag[1], ":") + if len(spl) != 3 { + continue + } + kind, err := strconv.Atoi(spl[0]) + if err != nil { + continue + } + author := spl[1] + identifier := spl[2] + f = nostr.Filter{ + Kinds: []int{kind}, + Authors: []string{author}, + Tags: nostr.TagMap{"d": []string{identifier}}, + Until: &evt.CreatedAt, + } + default: + continue + } + + ctx := context.WithValue(ctx, internalCallKey, struct{}{}) + for _, query := range rl.QueryEvents { + ch, err := query(ctx, f) + if err != nil { + continue + } + target := <-ch + if target == nil { + continue + } + // got the event, now check if the user can delete it + acceptDeletion := target.PubKey == evt.PubKey + var msg string + if !acceptDeletion { + msg = "you are not the author of this event" + } + // but if we have a function to overwrite this outcome, use that instead + for _, odo := range rl.OverwriteDeletionOutcome { + acceptDeletion, msg = odo(ctx, target, evt) + } + + if acceptDeletion { + // delete it + for _, del := range rl.DeleteEvent { + if err := del(ctx, target); err != nil { + return err + } + } + + // if it was tracked to be expired that is not needed anymore + rl.expirationManager.removeEvent(target.ID) + } else { + // fail and stop here + return fmt.Errorf("blocked: %s", msg) + } + + // don't try to query this same event again + break + } + } + } + + return nil +} diff --git a/khatru/docs/.gitignore b/khatru/docs/.gitignore new file mode 100644 index 0000000..3c3629e --- /dev/null +++ b/khatru/docs/.gitignore @@ -0,0 +1 @@ +node_modules diff --git a/khatru/docs/.prettierrc.yaml b/khatru/docs/.prettierrc.yaml new file mode 100644 index 0000000..a881699 --- /dev/null +++ b/khatru/docs/.prettierrc.yaml @@ -0,0 +1,9 @@ +semi: false +arrowParens: avoid +insertPragma: false +printWidth: 80 +proseWrap: preserve +singleQuote: true +trailingComma: none +useTabs: false +bracketSpacing: false diff --git a/khatru/docs/.vitepress/.gitignore b/khatru/docs/.vitepress/.gitignore new file mode 100644 index 0000000..a8d3ed2 --- /dev/null +++ b/khatru/docs/.vitepress/.gitignore @@ -0,0 +1,2 @@ +cache +dist diff --git a/khatru/docs/.vitepress/config.js b/khatru/docs/.vitepress/config.js new file mode 100644 index 0000000..7a469b9 --- /dev/null +++ b/khatru/docs/.vitepress/config.js @@ -0,0 +1,42 @@ +export default { + lang: 'en-US', + title: 'khatru', + description: 'a framework for making Nostr relays', + themeConfig: { + logo: '/logo.png', + nav: [ + {text: 'Home', link: '/'}, + {text: 'Why', link: '/why'}, + {text: 'Docs', link: '/getting-started'}, + {text: 'Source', link: 'https://github.com/fiatjaf/khatru'} + ], + sidebar: [ + { + text: 'Core Concepts', + items: [ + { text: 'Event Storage', link: '/core/eventstore' }, + { text: 'Authentication', link: '/core/auth' }, + { text: 'HTTP Integration', link: '/core/embed' }, + { text: 'Request Routing', link: '/core/routing' }, + { text: 'Management API', link: '/core/management' }, + { text: 'Media Storage (Blossom)', link: '/core/blossom' }, + ] + }, + { + text: 'Cookbook', + items: [ + { text: 'Search', link: '/cookbook/search' }, + { text: 'Dynamic Relays', link: '/cookbook/dynamic' }, + { text: 'Generating Events Live', link: '/cookbook/custom-live-events' }, + { text: 'Custom Stores', link: '/cookbook/custom-stores' }, + { text: 'Using something like Google Drive', link: '/cookbook/google-drive' }, + ] + } + ], + editLink: { + pattern: 'https://github.com/fiatjaf/khatru/edit/master/docs/:path' + } + }, + head: [['link', {rel: 'icon', href: '/logo.png'}]], + cleanUrls: true +} diff --git a/khatru/docs/.vitepress/theme/Layout.vue b/khatru/docs/.vitepress/theme/Layout.vue new file mode 100644 index 0000000..4315dd8 --- /dev/null +++ b/khatru/docs/.vitepress/theme/Layout.vue @@ -0,0 +1,11 @@ + + diff --git a/khatru/docs/.vitepress/theme/custom.css b/khatru/docs/.vitepress/theme/custom.css new file mode 100644 index 0000000..b4606ab --- /dev/null +++ b/khatru/docs/.vitepress/theme/custom.css @@ -0,0 +1,24 @@ +:root { + --vp-c-brand-1: #2eafab; + --vp-c-brand-2: #30373b; + --vp-c-brand-3: #3b6a3e; + --vp-button-brand-bg: #2eafab; + --vp-button-brand-hover-bg: #3b6a3e; + --vp-button-brand-active-bg: #30373b; + + --vp-c-bg: #f2e6e2; + --vp-c-bg-soft: #f3f2f0; +} + +.dark { + --vp-c-bg: #0a0a08; + --vp-c-bg-soft: #161a0e; +} + +.khatru-layout-bottom { + margin: 2rem auto; + width: 200px; + text-align: center; + font-family: monospace; + font-size: 2rem; +} diff --git a/khatru/docs/.vitepress/theme/index.mjs b/khatru/docs/.vitepress/theme/index.mjs new file mode 100644 index 0000000..933b3b1 --- /dev/null +++ b/khatru/docs/.vitepress/theme/index.mjs @@ -0,0 +1,8 @@ +import DefaultTheme from 'vitepress/theme' +import NostrifyLayout from './Layout.vue' +import './custom.css' + +export default { + extends: DefaultTheme, + Layout: NostrifyLayout +} diff --git a/khatru/docs/config.js b/khatru/docs/config.js new file mode 120000 index 0000000..5f77cdd --- /dev/null +++ b/khatru/docs/config.js @@ -0,0 +1 @@ +.vitepress/config.js \ No newline at end of file diff --git a/khatru/docs/cookbook/custom-live-events.md b/khatru/docs/cookbook/custom-live-events.md new file mode 100644 index 0000000..adea1e0 --- /dev/null +++ b/khatru/docs/cookbook/custom-live-events.md @@ -0,0 +1,64 @@ +--- +outline: deep +--- + +# Generating custom live events + +Suppose you want to generate a new event every time a goal is scored on some soccer game and send that to all clients subscribed to a given game according to a tag `t`. + +We'll assume you'll be polling some HTTP API that gives you the game's current score, and that in your `main` function you'll start the function that does the polling: + +```go +func main () { + // other stuff here + relay := khatru.NewRelay() + + go startPollingGame(relay) + // other stuff here +} + +type GameStatus struct { + TeamA int `json:"team_a"` + TeamB int `json:"team_b"` +} + +func startPollingGame(relay *khatru.Relay) { + current := GameStatus{0, 0} + + for { + newStatus, err := fetchGameStatus() + if err != nil { + continue + } + + if newStatus.TeamA > current.TeamA { + // team A has scored a goal, here we generate an event + evt := nostr.Event{ + CreatedAt: nostr.Now(), + Kind: 1, + Content: "team A has scored!", + Tags: nostr.Tags{{"t", "this-game"}} + } + evt.Sign(global.RelayPrivateKey) + // calling BroadcastEvent will send the event to everybody who has been listening for tag "t=[this-game]" + // there is no need to do any code to keep track of these clients or who is listening to what, khatru + // does that already in the background automatically + relay.BroadcastEvent(evt) + + // just calling BroadcastEvent won't cause this event to be be stored, + // if for any reason you want to store these events you must call the store functions manually + for _, store := range relay.StoreEvent { + store(context.TODO(), evt) + } + } + if newStatus.TeamB > current.TeamB { + // same here, if team B has scored a goal + // ... + } + } +} + +func fetchGameStatus() (GameStatus, error) { + // implementation of calling some external API goes here +} +``` diff --git a/khatru/docs/cookbook/custom-stores.md b/khatru/docs/cookbook/custom-stores.md new file mode 100644 index 0000000..efce73d --- /dev/null +++ b/khatru/docs/cookbook/custom-stores.md @@ -0,0 +1,88 @@ +--- +outline: deep +--- + +# Generating events on the fly from a non-Nostr data-source + +Suppose you want to serve events with the weather data for periods in the past. All you have is a big CSV file with the data. + +Then you get a query like `{"#g": ["d6nvp"], "since": 1664074800, "until": 1666666800, "kind": 10774}`, imagine for a while that kind `10774` means weather data. + +First you do some geohashing calculation to discover that `d6nvp` corresponds to Willemstad, Curaçao, then you query your XML file for the Curaçao weather data for the given period -- from `2022-09-25` to `2022-10-25`, then you return the events corresponding to such query, signed on the fly: + +```go +func main () { + // other stuff here + relay := khatru.NewRelay() + + relay.QueryEvents = append(relay.QueryEvents, + handleWeatherQuery, + ) + // other stuff here +} + +func handleWeatherQuery(ctx context.Context, filter nostr.Filter) (ch chan *nostr.Event, err error) { + if filter.Kind != 10774 { + // this function only handles kind 10774, if the query is for something else we return + // a nil channel, which corresponds to no results + return nil, nil + } + + file, err := os.Open("weatherdata.xml") + if err != nil { + return nil, fmt.Errorf("we have lost our file: %w", err) + } + + // QueryEvents functions are expected to return a channel + ch := make(chan *nostr.Event) + + // and they can do their query asynchronously, emitting events to the channel as they come + go func () { + defer file.Close() + + // we're going to do this for each tag in the filter + gTags, _ := filter.Tags["g"] + for _, gTag := range gTags { + // translate geohash into city name + citName, err := geohashToCityName(gTag) + if err != nil { + continue + } + + reader := csv.NewReader(file) + for { + record, err := reader.Read() + if err != nil { + return + } + + // ensure we're only getting records for Willemstad + if cityName != record[0] { + continue + } + + date, _ := time.Parse("2006-01-02", record[1]) + ts := nostr.Timestamp(date.Unix()) + if ts > filter.Since && ts < filter.Until { + // we found a record that matches the filter, so we make + // an event on the fly and return it + evt := nostr.Event{ + CreatedAt: ts, + Kind: 10774, + Tags: nostr.Tags{ + {"temperature", record[2]}, + {"condition", record[3]}, + } + } + evt.Sign(global.RelayPrivateKey) + ch <- evt + } + } + } + }() + + return ch, nil +} +``` + +Beware, the code above is inefficient and the entire approach is not very smart, it's meant just as an example. diff --git a/khatru/docs/cookbook/dynamic.md b/khatru/docs/cookbook/dynamic.md new file mode 100644 index 0000000..d66d3cc --- /dev/null +++ b/khatru/docs/cookbook/dynamic.md @@ -0,0 +1,58 @@ +--- +outline: deep +--- + +# Generating `khatru` relays dynamically and serving them from the same path + +Suppose you want to expose a different relay interface depending on the subdomain that is accessed. I don't know, maybe you want to serve just events with pictures on `pictures.example.com` and just events with audio files on `audios.example.com`; maybe you want just events in English on `en.example.com` and just examples in Portuguese on `pt.example.com`, there are many possibilities. + +You could achieve that with a scheme like the following + +```go +var topLevelHost = "example.com" +var mainRelay = khatru.NewRelay() // we're omitting all the configuration steps for brevity +var subRelays = xsync.NewMapOf[string, *khatru.Relay]() + +func main () { + handler := http.HandlerFunc(dynamicRelayHandler) + + log.Printf("listening at http://0.0.0.0:8080") + http.ListenAndServe("0.0.0.0:8080", handler) +} + +func dynamicRelayHandler(w http.ResponseWriter, r *http.Request) { + var relay *khatru.Relay + subdomain := r.Host[0 : len(topLevelHost)-len(topLevelHost)] + if subdomain == "" { + // no subdomain, use the main top-level relay + relay = mainRelay + } else { + // call on subdomain, so get a dynamic relay + subdomain = subdomain[0 : len(subdomain)-1] // remove dangling "." + // get a dynamic relay + relay, _ = subRelays.LoadOrCompute(subdomain, func () *khatru.Relay { + return makeNewRelay(subdomain) + }) + } + + relay.ServeHTTP(w, r) +} + +func makeNewRelay (subdomain string) *khatru.Relay { + // somehow use the subdomain to generate a relay with specific configurations + relay := khatru.NewRelay() + switch subdomain { + case "pictures": + // relay configuration shenanigans go here + case "audios": + // relay configuration shenanigans go here + case "en": + // relay configuration shenanigans go here + case "pt": + // relay configuration shenanigans go here + } + return relay +} +``` + +In practice you could come up with a way that allows all these dynamic relays to share a common underlying datastore, but this is out of the scope of this example. diff --git a/khatru/docs/cookbook/google-drive.md b/khatru/docs/cookbook/google-drive.md new file mode 100644 index 0000000..e931907 --- /dev/null +++ b/khatru/docs/cookbook/google-drive.md @@ -0,0 +1,67 @@ +--- +outline: deep +--- + +## Querying events from Google Drive + +Suppose you have a bunch of events stored in text files on Google Drive and you want to serve them as a relay. You could just store each event as a separate file and use the native Google Drive search to match the queries when serving requests. It would probably not be as fast as using local database, but it would work. + +```go +func main () { + // other stuff here + relay := khatru.NewRelay() + + relay.StoreEvent = append(relay.StoreEvent, handleEvent) + relay.QueryEvents = append(relay.QueryEvents, handleQuery) + // other stuff here +} + +func handleEvent(ctx context.Context, event *nostr.Event) error { + // store each event as a file on google drive + _, err := gdriveService.Files.Create(googledrive.CreateOptions{ + Name: event.ID, // with the name set to their id + Body: event.String(), // the body as the full event JSON + }) + return err +} + +func handleQuery(ctx context.Context, filter nostr.Filter) (ch chan *nostr.Event, err error) { + // QueryEvents functions are expected to return a channel + ch := make(chan *nostr.Event) + + // and they can do their query asynchronously, emitting events to the channel as they come + go func () { + if len(filter.IDs) > 0 { + // if the query is for ids we can do a simpler name match + for _, id := range filter.IDS { + results, _ := gdriveService.Files.List(googledrive.ListOptions{ + Q: fmt.Sprintf("name = '%s'", id) + }) + if len(results) > 0 { + var evt nostr.Event + json.Unmarshal(results[0].Body, &evt) + ch <- evt + } + } + } else { + // otherwise we use the google-provided search and hope it will catch tags that are in the event body + for tagName, tagValues := range filter.Tags { + results, _ := gdriveService.Files.List(googledrive.ListOptions{ + Q: fmt.Sprintf("fullText contains '%s'", tagValues) + }) + for _, result := range results { + var evt nostr.Event + json.Unmarshal(results[0].Body, &evt) + if filter.Match(evt) { + ch <- evt + } + } + } + } + }() + + return ch, nil +} +``` + +(Disclaimer: since I have no idea of how to properly use the Google Drive API this interface is entirely made up.) diff --git a/khatru/docs/cookbook/search.md b/khatru/docs/cookbook/search.md new file mode 100644 index 0000000..93b8fd5 --- /dev/null +++ b/khatru/docs/cookbook/search.md @@ -0,0 +1,51 @@ +--- +outline: deep +--- + +# Implementing NIP-50 `search` support + +The [`nostr.Filter` type](https://pkg.go.dev/github.com/nbd-wtf/go-nostr#Filter) has a `Search` field, so you basically just has to handle that if it's present. + +It can be tricky to implement fulltext search properly though, so some [eventstores](../core/eventstore) implement it natively, such as [Bluge](https://pkg.go.dev/github.com/fiatjaf/eventstore/bluge), [OpenSearch](https://pkg.go.dev/github.com/fiatjaf/eventstore/opensearch) and [ElasticSearch](https://pkg.go.dev/github.com/fiatjaf/eventstore/elasticsearch) (although for the last two you'll need an instance of these database servers running, while with Bluge it's embedded). + +If you have any of these you can just use them just like any other eventstore: + +```go +func main () { + // other stuff here + + normal := &lmdb.LMDBBackend{Path: "data"} + os.MkdirAll(normal.Path, 0755) + if err := normal.Init(); err != nil { + panic(err) + } + + search := bluge.BlugeBackend{Path: "search", RawEventStore: normal} + if err := search.Init(); err != nil { + panic(err) + } + + relay.StoreEvent = append(relay.StoreEvent, normal.SaveEvent, search.SaveEvent) + relay.QueryEvents = append(relay.QueryEvents, normal.QueryEvents, search.QueryEvents) + relay.DeleteEvent = append(relay.DeleteEvent, normal.DeleteEvent, search.DeleteEvent) + + // other stuff here +} +``` + +Note that in this case we're using the [LMDB](https://pkg.go.dev/github.com/fiatjaf/eventstore/lmdb) adapter for normal queries and it explicitly rejects any filter that contains a `Search` field, while [Bluge](https://pkg.go.dev/github.com/fiatjaf/eventstore/bluge) rejects any filter _without_ a `Search` value, which make them pair well together. + +Other adapters, like [SQLite](https://pkg.go.dev/github.com/fiatjaf/eventstore/sqlite3), implement search functionality on their own, so if you don't want to use that you would have to have a middleware between, like: + +```go + relay.StoreEvent = append(relay.StoreEvent, db.SaveEvent, search.SaveEvent) + relay.QueryEvents = append(relay.QueryEvents, func (ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) { + if len(filter.Search) > 0 { + return search.QueryEvents(ctx, filter) + } else { + filterNoSearch := filter + filterNoSearch.Search = "" + return normal.QueryEvents(ctx, filterNoSearch) + } + }) +``` diff --git a/khatru/docs/core/auth.md b/khatru/docs/core/auth.md new file mode 100644 index 0000000..8f53bd7 --- /dev/null +++ b/khatru/docs/core/auth.md @@ -0,0 +1,85 @@ +--- +outline: deep +--- + +# NIP-42 `AUTH` + +`khatru` supports [NIP-42](https://nips.nostr.com/42) out of the box. The functionality is exposed in the following ways. + +## Sending arbitrary `AUTH` challenges + +At any time you can send an `AUTH` message to a client that is making a request. + +It makes sense to give the user the option to authenticate right after they establish a connection, for example, when you have a relay that works differently depending on whether the user is authenticated or not. + +```go +relay := khatru.NewRelay() + +relay.OnConnect = append(relay.OnConnect, func(ctx context.Context) { + khatru.RequestAuth(ctx) +}) +``` + +This will send a NIP-42 `AUTH` challenge message to the client so it will have the option to authenticate itself whenever it wants to. + +## Signaling to the client that a specific query requires an authenticated user + +If on `RejectFilter` or `RejectEvent` you prefix the message with `auth-required: `, that will automatically send an `AUTH` message before a `CLOSED` or `OK` with that prefix, such that the client will immediately be able to know it must authenticate to proceed and will already have the challenge required for that, so they can immediately replay the request. + +```go +relay.RejectFilter = append(relay.RejectFilter, func(ctx context.Context, filter nostr.Filter) (bool, string) { + return true, "auth-required: this query requires you to be authenticated" +}) +relay.RejectEvent = append(relay.RejectEvent, func(ctx context.Context, event *nostr.Event) (bool, string) { + return true, "auth-required: publishing this event requires authentication" +}) +``` + +## Reading the auth status of a client + +After a client is authenticated and opens a new subscription with `REQ` or sends a new event with `EVENT`, you'll be able to read the public key they're authenticated with. + +```go +relay.RejectFilter = append(relay.RejectFilter, func(ctx context.Context, filter nostr.Filter) (bool, string) { + authenticatedUser := khatru.GetAuthed(ctx) +}) +``` + +## Telling an authenticated user they're still not allowed to do something + +If the user is authenticated but still not allowed (because some specific filters or events are only accessible to some specific users) you can reply on `RejectFilter` or `RejectEvent` with a message prefixed with `"restricted: "` to make that clear to clients. + +```go +relay.RejectFilter = append(relay.RejectFilter, func(ctx context.Context, filter nostr.Filter) (bool, string) { + authenticatedUser := khatru.GetAuthed(ctx) + + if slices.Contain(authorizedUsers, authenticatedUser) { + return false + } else { + return true, "restricted: you're not a member of the privileged group that can read that stuff" + } +}) +``` + +## Reacting to a successful authentication + +Each `khatru.WebSocket` object has an `.Authed` channel that is closed whenever that connection performs a successful authentication. + +You can use that to emulate a listener for these events in case you want to keep track of who is authenticating in real time and not only check it when they request for something. + +```go + relay.OnConnect = append(relay.OnConnect, + khatru.RequestAuth, + func(ctx context.Context) { + go func(ctx context.Context) { + conn := khatru.GetConnection(ctx) + select { + case <-ctx.Done(): + fmt.Println("connection closed") + case <-conn.Authed: + fmt.Println("authenticated as", conn.AuthedPublicKey) + } + }(ctx) + }, + ) +``` diff --git a/khatru/docs/core/blossom.md b/khatru/docs/core/blossom.md new file mode 100644 index 0000000..2d24758 --- /dev/null +++ b/khatru/docs/core/blossom.md @@ -0,0 +1,93 @@ +--- +outline: deep +--- + +# Blossom: Media Storage + +Khatru comes with a built-in Blossom HTTP handler that allows you to store and serve media blobs using storage backend you want (filesystem, S3 etc). + +## Basic Setup + +Here's a minimal example of what you should do to enable it: + +```go +func main() { + relay := khatru.NewRelay() + + // create blossom server with the relay and service URL + bl := blossom.New(relay, "http://localhost:3334") + + // create a database for keeping track of blob metadata + // (do not use the same database used for the relay events) + bl.Store = blossom.EventStoreBlobIndexWrapper{Store: blobdb, ServiceURL: bl.ServiceURL} + + // implement the required storage functions + bl.StoreBlob = append(bl.StoreBlob, func(ctx context.Context, sha256 string, body []byte) error { + // store the blob data somewhere + return nil + }) + bl.LoadBlob = append(bl.LoadBlob, func(ctx context.Context, sha256 string) (io.ReadSeeker, error) { + // load and return the blob data + return nil, nil + }) + bl.DeleteBlob = append(bl.DeleteBlob, func(ctx context.Context, sha256 string) error { + // delete the blob data + return nil + }) + + http.ListenAndServe(":3334", relay) +} +``` + +## Storage Backend Integration + +You can integrate any storage backend by implementing the three core functions: + +- `StoreBlob`: Save the blob data +- `LoadBlob`: Retrieve the blob data +- `DeleteBlob`: Remove the blob data + +## Upload Restrictions + +You can implement upload restrictions using the `RejectUpload` hook. Here's an example that limits file size and restricts uploads to whitelisted users: + +```go +const maxFileSize = 10 * 1024 * 1024 // 10MB + +var allowedUsers = map[string]bool{ + "pubkey1": true, + "pubkey2": true, +} + +bl.RejectUpload = append(bl.RejectUpload, func(ctx context.Context, auth *nostr.Event, size int, ext string) (bool, string, int) { + // check file size + if size > maxFileSize { + return true, "file too large", 413 + } + + // check if user is allowed + if auth == nil || !allowedUsers[auth.PubKey] { + return true, "unauthorized", 403 + } + + return false, "", 0 +}) +``` + +There are other `Reject*` hooks you can also implement, but this is the most important one. + +## Tracking blob metadata + +Blossom needs a database to keep track of blob metadata in order to know which user owns each blob, for example (and mind you that more than one user might own the same blob so when of them deletes the blob we don't actually delete it because the other user still has a claim to it). The simplest way to do it currently is by relying on a wrapper on top of fake Nostr events over eventstore, which is `EventStoreBlobIndexWrapper`, but other solutions can be used. + +```go +db := &badger.BadgerBackend{Path: "/tmp/khatru-badger-blossom-blobstore"} +db.Init() + +bl.Store = blossom.EventStoreBlobIndexWrapper{ + Store: db, + ServiceURL: bl.ServiceURL, +} +``` + +This will store blob metadata as special `kind:24242` events, but you shouldn't have to worry about it as the wrapper handles all the complexity of tracking ownership and managing blob lifecycle. Jut avoid reusing the same datastore that is used for the actual relay events unless you know what you're doing. diff --git a/khatru/docs/core/embed.md b/khatru/docs/core/embed.md new file mode 100644 index 0000000..5cea700 --- /dev/null +++ b/khatru/docs/core/embed.md @@ -0,0 +1,72 @@ +--- +outline: deep +--- + +# Mixing a `khatru` relay with other HTTP handlers + +If you already have a web server with all its HTML handlers or a JSON HTTP API or anything like that, something like: + +```go +func main() { + mux := http.NewServeMux() + + mux.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir("./static")))) + mux.HandleFunc("/.well-known/nostr.json", handleNIP05) + mux.HandleFunc("/page/{page}", handlePage) + mux.HandleFunc("/", handleHomePage) + + log.Printf("listening at http://0.0.0.0:8080") + http.ListenAndServe("0.0.0.0:8080", mux) +} +``` + +Then you can easily inject a relay or two there in alternative paths if you want: + +```diff + mux := http.NewServeMux() + ++ relay1 := khatru.NewRelay() ++ relay2 := khatru.NewRelay() ++ // and so on + + mux.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir("./static")))) + mux.HandleFunc("/.well-known/nostr.json", handleNIP05) + mux.HandleFunc("/page/{page}", handlePage) + mux.HandleFunc("/", handleHomePage) ++ mux.Handle("/relay1", relay1) ++ mux.Handle("/relay2", relay2) ++ // and so forth + + log.Printf("listening at http://0.0.0.0:8080") +``` + +Imagine each of these relay handlers is different, each can be using a different eventstore and have different policies for writing and reading. + +## Exposing a relay interface at the root + +If you want to expose your relay at the root path `/` that is also possible. You can just use it as the `mux` directly: + +```go +func main() { + relay := khatru.NewRelay() + // ... -- relay configuration steps (omitted for brevity) + + mux := relay.Router() // the relay comes with its own http.ServeMux inside + + mux.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir("./static")))) + mux.HandleFunc("/.well-known/nostr.json", handleNIP05) + mux.HandleFunc("/page/{page}", handlePage) + mux.HandleFunc("/", handleHomePage) + + log.Printf("listening at http://0.0.0.0:8080") + http.ListenAndServe("0.0.0.0:8080", mux) +} +``` + +Every [`khatru.Relay`](https://pkg.go.dev/github.com/fiatjaf/khatru#Relay) instance comes with its own ['http.ServeMux`](https://pkg.go.dev/net/http#ServeMux) inside. It ensures all requests are handled normally, but intercepts the requests that are pertinent to the relay operation, specifically the WebSocket requests, and the [NIP-11](https://nips.nostr.com/11) and the [NIP-86](https://nips.nostr.com/86) HTTP requests. + +## Exposing multiple relays at the same path or at the root + +That's also possible, as long as you have a way of differentiating each HTTP request that comes at the middleware level and associating it with a `khatru.Relay` instance in the background. + +See [dynamic](../cookbook/dynamic) for an example that does that using the subdomain. [`countries`](https://git.fiatjaf.com/countries) does it using the requester country implied from its IP address. diff --git a/khatru/docs/core/eventstore.md b/khatru/docs/core/eventstore.md new file mode 100644 index 0000000..1195bf4 --- /dev/null +++ b/khatru/docs/core/eventstore.md @@ -0,0 +1,99 @@ +--- +outline: deep +--- + +# Event Storage + +Khatru doesn't make any assumptions about how you'll want to store events. Any function can be plugged in to the `StoreEvent`, `DeleteEvent`, `ReplaceEvent` and `QueryEvents` hooks. + +However the [`eventstore`](https://github.com/fiatjaf/eventstore) library has adapters that you can easily plug into `khatru`'s hooks. + +# Using the `eventstore` library + +The library includes many different adapters -- often called "backends" --, written by different people and with different levels of quality, reliability and speed. + +For all of them you start by instantiating a struct containing some basic options and a pointer (a file path for local databases, a connection string for remote databases) to the data. Then you call `.Init()` and if all is well you're ready to start storing, querying and deleting events, so you can pass the respective functions to their `khatru` counterparts. These eventstores also expose a `.Close()` function that must be called if you're going to stop using that store and keep your application open. + +Here's an example with the [Badger](https://pkg.go.dev/github.com/fiatjaf/eventstore/badger) adapter, made for the [Badger](https://github.com/dgraph-io/badger) embedded key-value database: + +```go +package main + +import ( + "fmt" + "net/http" + + "github.com/fiatjaf/eventstore/badger" + "github.com/fiatjaf/khatru" +) + +func main() { + relay := khatru.NewRelay() + + db := badger.BadgerBackend{Path: "/tmp/khatru-badger-tmp"} + if err := db.Init(); err != nil { + panic(err) + } + + relay.StoreEvent = append(relay.StoreEvent, db.SaveEvent) + relay.QueryEvents = append(relay.QueryEvents, db.QueryEvents) + relay.CountEvents = append(relay.CountEvents, db.CountEvents) + relay.DeleteEvent = append(relay.DeleteEvent, db.DeleteEvent) + relay.ReplaceEvent = append(relay.ReplaceEvent, db.ReplaceEvent) + + fmt.Println("running on :3334") + http.ListenAndServe(":3334", relay) +} +``` + +[LMDB](https://pkg.go.dev/github.com/fiatjaf/eventstore/lmdb) works the same way. + +[SQLite](https://pkg.go.dev/github.com/fiatjaf/eventstore/sqlite3) also stores things locally so it only needs a `Path`. + +[PostgreSQL](https://pkg.go.dev/github.com/fiatjaf/eventstore/postgresql) and [MySQL](https://pkg.go.dev/github.com/fiatjaf/eventstore/mysql) use remote connections to database servers, so they take a `DatabaseURL` parameter, but after that it's the same. + +## Using two at a time + +If you want to use two different adapters at the same time that's easy. Just add both to the corresponding slices: + +```go + relay.StoreEvent = append(relay.StoreEvent, db1.SaveEvent, db2.SaveEvent) + relay.QueryEvents = append(relay.QueryEvents, db1.QueryEvents, db2.SaveEvent) +``` + +But that will duplicate events on both and then return duplicated events on each query. + +## Sharding + +You can do a kind of sharding, for example, by storing some events in one store and others in another: + +For example, maybe you want kind 1 events in `db1` and kind 30023 events in `db30023`: + +```go + relay.StoreEvent = append(relay.StoreEvent, func (ctx context.Context, evt *nostr.Event) error { + switch evt.Kind { + case 1: + return db1.StoreEvent(ctx, evt) + case 30023: + return db30023.StoreEvent(ctx, evt) + default: + return nil + } + }) + relay.QueryEvents = append(relay.QueryEvents, func (ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) { + for _, kind := range filter.Kinds { + switch kind { + case 1: + filter1 := filter + filter1.Kinds = []int{1} + return db1.QueryEvents(ctx, filter1) + case 30023: + filter30023 := filter + filter30023.Kinds = []int{30023} + return db30023.QueryEvents(ctx, filter30023) + default: + return nil, nil + } + } + }) +``` diff --git a/khatru/docs/core/management.md b/khatru/docs/core/management.md new file mode 100644 index 0000000..3af9fc2 --- /dev/null +++ b/khatru/docs/core/management.md @@ -0,0 +1,85 @@ +--- +outline: deep +--- + +# Management API + +[NIP-86](https://nips.nostr.com/86) specifies a set of RPC methods for managing the boring aspects of relays, such as whitelisting or banning users, banning individual events, banning IPs and so on. + +All [`khatru.Relay`](https://pkg.go.dev/github.com/fiatjaf/khatru#Relay) instances expose a field `ManagementAPI` with a [`RelayManagementAPI`](https://pkg.go.dev/github.com/fiatjaf/khatru#RelayManagementAPI) instance inside, which can be used for creating handlers for each of the RPC methods. + +There is also a generic `RejectAPICall` which is a slice of functions that will be called before any RPC method, if they exist and, if any of them returns true, the request will be rejected. + +The most basic implementation of a `RejectAPICall` handler would be one that checks the public key of the caller with a hardcoded public key of the relay owner: + +```go +var owner = "" +var allowedPubkeys = make([]string, 0, 10) + +func main () { + relay := khatru.NewRelay() + + relay.ManagementAPI.RejectAPICall = append(relay.ManagementAPI.RejectAPICall, + func(ctx context.Context, mp nip86.MethodParams) (reject bool, msg string) { + user := khatru.GetAuthed(ctx) + if user != owner { + return true, "go away, intruder" + } + return false, "" + } + ) + + relay.ManagementAPI.AllowPubKey = func(ctx context.Context, pubkey string, reason string) error { + allowedPubkeys = append(allowedPubkeys, pubkey) + return nil + } + relay.ManagementAPI.BanPubKey = func(ctx context.Context, pubkey string, reason string) error { + idx := slices.Index(allowedPubkeys, pubkey) + if idx == -1 { + return fmt.Errorf("pubkey already not allowed") + } + allowedPubkeys = slices.Delete(allowedPubkeys, idx, idx+1) + } +} +``` + +You can also not provide any `RejectAPICall` handler and do the approval specifically on each RPC handler. + +In the following example any current member can include any other pubkey, and anyone who was added before is able to remove any pubkey that was added afterwards (not a very good idea, but serves as an example). + +```go +var allowedPubkeys = []string{""} + +func main () { + relay := khatru.NewRelay() + + relay.ManagementAPI.AllowPubKey = func(ctx context.Context, pubkey string, reason string) error { + caller := khatru.GetAuthed(ctx) + + if slices.Contains(allowedPubkeys, caller) { + allowedPubkeys = append(allowedPubkeys, pubkey) + return nil + } + + return fmt.Errorf("you're not authorized") + } + relay.ManagementAPI.BanPubKey = func(ctx context.Context, pubkey string, reason string) error { + caller := khatru.GetAuthed(ctx) + + callerIdx := slices.Index(allowedPubkeys, caller) + if callerIdx == -1 { + return fmt.Errorf("you're not even allowed here") + } + + targetIdx := slices.Index(allowedPubkeys, pubkey) + if targetIdx < callerIdx { + // target is a bigger OG than the caller, so it has bigger influence and can't be removed + return fmt.Errorf("you're less powerful than the pubkey you're trying to remove") + } + + // allow deletion since the target came after the caller + allowedPubkeys = slices.Delete(allowedPubkeys, targetIdx, targetIdx+1) + return nil + } +} +``` diff --git a/khatru/docs/core/routing.md b/khatru/docs/core/routing.md new file mode 100644 index 0000000..95b7bfa --- /dev/null +++ b/khatru/docs/core/routing.md @@ -0,0 +1,63 @@ +--- +outline: deep +--- + +# Request Routing + +If you have one (or more) set of policies that have to be executed in sequence (for example, first you check for the presence of a tag, then later in the next policies you use that tag without checking) and they only apply to some class of events, but you still want your relay to deal with other classes of events that can lead to cumbersome sets of rules, always having to check if an event meets the requirements and so on. There is where routing can help you. + +It also can be handy if you get a [`khatru.Relay`](https://pkg.go.dev/github.com/fiatjaf/khatru#Relay) from somewhere else, like a library such as [`relay29`](https://github.com/fiatjaf/relay29), and you want to combine it with other policies without some interfering with the others. As in the example below: + +```go +sk := os.Getenv("RELAY_SECRET_KEY") + +// a relay for NIP-29 groups +groupsStore := badger.BadgerBackend{} +groupsStore.Init() +groupsRelay, _ := khatru29.Init(relay29.Options{Domain: "example.com", DB: groupsStore, SecretKey: sk}) +// ... + +// a relay for everything else +publicStore := slicestore.SliceStore{} +publicStore.Init() +publicRelay := khatru.NewRelay() +publicRelay.StoreEvent = append(publicRelay.StoreEvent, publicStore.SaveEvent) +publicRelay.QueryEvents = append(publicRelay.QueryEvents, publicStore.QueryEvents) +publicRelay.CountEvents = append(publicRelay.CountEvents, publicStore.CountEvents) +publicRelay.DeleteEvent = append(publicRelay.DeleteEvent, publicStore.DeleteEvent) +// ... + +// a higher-level relay that just routes between the two above +router := khatru.NewRouter() + +// route requests and events to the groups relay +router.Route(). + Req(func (filter nostr.Filter) bool { + _, hasHTag := filter.Tags["h"] + if hasHTag { + return true + } + return slices.Contains(filter.Kinds, func (k int) bool { return k == 39000 || k == 39001 || k == 39002 }) + }). + Event(func (event *nostr.Event) bool { + switch { + case event.Kind <= 9021 && event.Kind >= 9000: + return true + case event.Kind <= 39010 && event.Kind >= 39000: + return true + case event.Kind <= 12 && event.Kind >= 9: + return true + case event.Tags.Find("h") != nil: + return true + default: + return false + } + }). + Relay(groupsRelay) + +// route requests and events to the other +router.Route(). + Req(func (filter nostr.Filter) bool { return true }). + Event(func (event *nostr.Event) bool { return true }). + Relay(publicRelay) +``` diff --git a/khatru/docs/getting-started/index.md b/khatru/docs/getting-started/index.md new file mode 100644 index 0000000..ca9ef65 --- /dev/null +++ b/khatru/docs/getting-started/index.md @@ -0,0 +1,79 @@ +--- +outline: deep +--- + +# Getting Started + +Download the library: + +```bash +go get github.com/fiatjaf/khatru +``` + +Include the library: + +```go +import "github.com/fiatjaf/khatru" +``` + +Then in your `main()` function, instantiate a new `Relay`: + +```go +relay := khatru.NewRelay() +``` + +Optionally, set up basic info about the relay that will be returned according to [NIP-11](https://nips.nostr.com/11): + +```go +relay.Info.Name = "my relay" +relay.Info.PubKey = "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798" +relay.Info.Description = "this is my custom relay" +relay.Info.Icon = "https://external-content.duckduckgo.com/iu/?u=https%3A%2F%2Fliquipedia.net%2Fcommons%2Fimages%2F3%2F35%2FSCProbe.jpg&f=1&nofb=1&ipt=0cbbfef25bce41da63d910e86c3c343e6c3b9d63194ca9755351bb7c2efa3359&ipo=images" +``` + +Now we must set up the basic functions for accepting events and answering queries. We could make our own querying engine from scratch, but we can also use [eventstore](https://github.com/fiatjaf/eventstore). In this example we'll use the SQLite adapter: + +```go +db := sqlite3.SQLite3Backend{DatabaseURL: "/tmp/khatru-sqlite-tmp"} +if err := db.Init(); err != nil { + panic(err) +} + +relay.StoreEvent = append(relay.StoreEvent, db.SaveEvent) +relay.QueryEvents = append(relay.QueryEvents, db.QueryEvents) +relay.DeleteEvent = append(relay.DeleteEvent, db.DeleteEvent) +relay.ReplaceEvent = append(relay.ReplaceEvent, db.ReplaceEvent) +``` + +These are lists of functions that will be called in order every time an `EVENT` is received, or a `REQ` query is received. You can add more than one handler there, you can have a function that reads from some other server, but just in some cases, you can do anything. + +The next step is adding some protection, because maybe we don't want to allow _anyone_ to write to our relay. Maybe we want to only allow people that have a pubkey starting with `"a"`, `"b"` or `"c"`: + +```go +relay.RejectEvent = append(relay.RejectEvent, func (ctx context.Context, event *nostr.Event) (reject bool, msg string) { + firstHexChar := event.PubKey[0:1] + if firstHexChar == "a" || firstHexChar == "b" || firstHexChar == "c" { + return false, "" // allow + } + return true, "you're not allowed in this shard" +}) +``` + +We can also make use of some default policies that come bundled with Khatru: + +```go +import "github.com/fiatjaf/khatru" // implied + +relay.RejectEvent = append(relay.RejectEvent, policies.PreventLargeTags(120), policies.PreventTimestampsInThePast(time.Hour * 2), policies.PreventTimestampsInTheFuture(time.Minute * 30)) +``` + +There are many other ways to customize the relay behavior. Take a look at the [`Relay` struct docs](https://pkg.go.dev/github.com/fiatjaf/khatru#Relay) for more, or read the pages on the sidebar. + +The last step is actually running the server. Our relay is actually an `http.Handler`, so it can just be ran directly with `http.ListenAndServe()` from the standard library: + +```go +fmt.Println("running on :3334") +http.ListenAndServe(":3334", relay) +``` + +And that's it. diff --git a/khatru/docs/index.md b/khatru/docs/index.md new file mode 100644 index 0000000..39754f0 --- /dev/null +++ b/khatru/docs/index.md @@ -0,0 +1,57 @@ +--- +layout: home + +hero: + name: khatru + text: a framework for making Nostr relays + tagline: write your custom relay with code over configuration + actions: + - theme: brand + text: Get Started + link: /getting-started + +features: + - title: It's a library + icon: 🐢 + link: /getting-started + details: This is not an executable that you have to tweak with config files, it's a library that you import and use, so you just write code and it does exactly what you want. + - title: It's very very customizable + icon: 🎶 + link: /core/embed + details: Run arbitrary functions to reject events, reject filters, overwrite results of queries, perform actual queries, mix the relay stuff with other HTTP handlers or even run it inside an existing website. + - title: It plugs into event stores easily + icon: 📦 + link: /core/eventstore + details: khatru's companion, the `eventstore` library, provides all methods for storing and querying events efficiently from SQLite, LMDB, Postgres, Badger and others. + - title: It supports NIP-42 AUTH + icon: 🪪 + link: /core/auth + details: You can check if a client is authenticated or request AUTH anytime, or reject an event or a filter with an "auth-required:" and it will be handled automatically. + - title: It supports NIP-86 Management API + icon: 🛠️ + link: /core/management + details: You just define your custom handlers for each RPC call and they will be exposed appropriately to management clients. + - title: It's written in Go + icon: 🛵 + link: https://pkg.go.dev/github.com/fiatjaf/khatru + details: That means it is fast and lightweight, you can learn the language in 5 minutes and it builds your relay into a single binary that's easy to ship and deploy. +--- + +## A glimpse of `khatru`'s power + +It allows you to create a fully-functional relay in 7 lines of code: + +```go +func main() { + relay := khatru.NewRelay() + db := badger.BadgerBackend{Path: "/tmp/khatru-badgern-tmp"} + db.Init() + relay.StoreEvent = append(relay.StoreEvent, db.SaveEvent) + relay.QueryEvents = append(relay.QueryEvents, db.QueryEvents) + relay.DeleteEvent = append(relay.DeleteEvent, db.DeleteEvent) + relay.ReplaceEvent = append(relay.ReplaceEvent, db.ReplaceEvent) + http.ListenAndServe(":3334", relay) +} +``` + +After that you can customize it in infinite ways. See the links above. diff --git a/khatru/docs/justfile b/khatru/docs/justfile new file mode 100644 index 0000000..f4d2a7c --- /dev/null +++ b/khatru/docs/justfile @@ -0,0 +1,7 @@ +export PATH := "./node_modules/.bin:" + env_var('PATH') + +dev: + vitepress dev + +build: + vitepress build diff --git a/khatru/docs/logo.png b/khatru/docs/logo.png new file mode 100644 index 0000000..614d34c Binary files /dev/null and b/khatru/docs/logo.png differ diff --git a/khatru/docs/package.json b/khatru/docs/package.json new file mode 100644 index 0000000..325686d --- /dev/null +++ b/khatru/docs/package.json @@ -0,0 +1,5 @@ +{ + "dependencies": { + "vitepress": "^1.3.0" + } +} diff --git a/khatru/docs/why.md b/khatru/docs/why.md new file mode 100644 index 0000000..4b8cfd7 --- /dev/null +++ b/khatru/docs/why.md @@ -0,0 +1,38 @@ +# Why `khatru`? + +If you want to craft a relay that isn't completely dumb, but it's supposed to + +* have custom own policies for accepting events; +* handle requests for stored events using data from multiple sources; +* require users to authenticate for some operations and not for others; +* and other stuff. + +`khatru` provides a simple framework for creating your custom relay without having to reimplement it all from scratch or hack into other relay codebases. + +# Use cases + +`khatru` is being used today in the real world by + +* [pyramid](https://github.com/github-tijlxyz/khatru-pyramid), a relay with a invite-based whitelisting system similar to [lobste.rs](https://lobste.rs) +* [triflector](https://github.com/coracle-social/triflector), a relay which enforces authentication based on custom policy +* [countries](https://git.fiatjaf.com/countries), a relay that stores and serves content differently according to the country of the reader or writer +* [jingle](https://github.com/fiatjaf/jingle), a simple relay that exposes part of `khatru`'s configuration options to JavaScript code supplied by the user that is interpreted at runtime +* [njump](https://git.njump.me/njump), a Nostr gateway to the web that also serves its cached content in a relay interface +* [song](https://git.fiatjaf.com/song), a personal git server that comes with an embedded relay dedicated to dealing with [NIP-34](https://nips.nostr.com/34) git-related Nostr events +* [relay29](https://github.com/fiatjaf/relay29), a relay that powers most of the [NIP-29](https://nips.nostr.com/29) Nostr groups ecosystem +* [fiatjaf.com](https://fiatjaf.com), a personal website that serves the same content as HTML but also as Nostr events. +* [gm-relay](https://github.com/ptrio42/gm-relay), a relay that only accepts GM notes once a day. + +## Other possible use cases + +Other possible use cases, still not developed, include: + +* Bridges: `khatru` was initially developed to serve as an RSS-to-Nostr bridge server that would fetch RSS feeds on demand in order to serve them to Nostr clients. Other similar use cases could fit. +* Paid relays: Nostr has multiple relays that charge for write-access currently, but there are many other unexplored ways to make this scheme work: charge per each note, charge per month, charge per month per note, have different payment methods, and so on. +* Other whitelisting schemes: _pyramid_ implements a cool inviting scheme for granting access to the relay, same for _triflector_, but there are infinite other possibilities of other ways to grant access to people to an exclusive or community relay. +* Just-in-time content generation: instead of storing a bunch of signed JSON and serving that to clients, there could be relays that store data in a more compact format and turn it into Nostr events at the time they receive a request from a Nostr client -- or relays that do some kind of live data generation based on who is connected, not storing anything. +* Community relays: some internet communities may want relays that restrict writing or browsing of content only to its members, essentially making it a closed group -- or it could be closed for outsiders to write, but public for them to read and vice-versa. +* Automated moderation schemes: relays that are owned by a group (either a static or a dynamic group) can rely on signals from their members, like mutes or reports, to decide what content to allow in its domains and what to disallow, making crowdfunded moderation easy. +* Curation: in the same way as community relays can deal with unwanted content, they can also perform curation based on signals from their members (for example, if a member of the relay likes some note from someone that is outside the relay that note can be fetched and stored), creating a dynamic relay that can be browsed by anyone that share the same interests as that community. +* Local relays: a relay that can be only browsed by people using the WiFi connection of some event or some building, serving as a way to share temporary or restricted content that only interests people sharing that circumstance. +* Cool experiments: relays that only allow one note per user per day, relays that require proof-of-work on event ids], relays that require engagement otherwise you get kicked, relays that return events in different ordering, relays that impose arbitrary funny rules on notes in order for them to be accepted (i.e. they must contain the word "poo"), I don't know! diff --git a/khatru/ephemeral.go b/khatru/ephemeral.go new file mode 100644 index 0000000..ebd0f92 --- /dev/null +++ b/khatru/ephemeral.go @@ -0,0 +1,26 @@ +package khatru + +import ( + "context" + "errors" + + "github.com/nbd-wtf/go-nostr" +) + +func (rl *Relay) handleEphemeral(ctx context.Context, evt *nostr.Event) error { + for _, reject := range rl.RejectEvent { + if reject, msg := reject(ctx, evt); reject { + if msg == "" { + return errors.New("blocked: no reason") + } else { + return errors.New(nostr.NormalizeOKMessage(msg, "blocked")) + } + } + } + + for _, oee := range rl.OnEphemeralEvent { + oee(ctx, evt) + } + + return nil +} diff --git a/khatru/examples/basic-badger/main.go b/khatru/examples/basic-badger/main.go new file mode 100644 index 0000000..8e54596 --- /dev/null +++ b/khatru/examples/basic-badger/main.go @@ -0,0 +1,28 @@ +package main + +import ( + "fmt" + "net/http" + + "github.com/fiatjaf/eventstore/badger" + "github.com/fiatjaf/khatru" +) + +func main() { + relay := khatru.NewRelay() + + db := badger.BadgerBackend{Path: "/tmp/khatru-badgern-tmp"} + if err := db.Init(); err != nil { + panic(err) + } + + relay.StoreEvent = append(relay.StoreEvent, db.SaveEvent) + relay.QueryEvents = append(relay.QueryEvents, db.QueryEvents) + relay.CountEvents = append(relay.CountEvents, db.CountEvents) + relay.DeleteEvent = append(relay.DeleteEvent, db.DeleteEvent) + relay.ReplaceEvent = append(relay.ReplaceEvent, db.ReplaceEvent) + relay.Negentropy = true + + fmt.Println("running on :3334") + http.ListenAndServe(":3334", relay) +} diff --git a/khatru/examples/basic-elasticsearch/main.go b/khatru/examples/basic-elasticsearch/main.go new file mode 100644 index 0000000..3202b3c --- /dev/null +++ b/khatru/examples/basic-elasticsearch/main.go @@ -0,0 +1,27 @@ +package main + +import ( + "fmt" + "net/http" + + "github.com/fiatjaf/eventstore/elasticsearch" + "github.com/fiatjaf/khatru" +) + +func main() { + relay := khatru.NewRelay() + + db := elasticsearch.ElasticsearchStorage{URL: ""} + if err := db.Init(); err != nil { + panic(err) + } + + relay.StoreEvent = append(relay.StoreEvent, db.SaveEvent) + relay.QueryEvents = append(relay.QueryEvents, db.QueryEvents) + relay.CountEvents = append(relay.CountEvents, db.CountEvents) + relay.DeleteEvent = append(relay.DeleteEvent, db.DeleteEvent) + relay.ReplaceEvent = append(relay.ReplaceEvent, db.ReplaceEvent) + + fmt.Println("running on :3334") + http.ListenAndServe(":3334", relay) +} diff --git a/khatru/examples/basic-lmdb/main.go b/khatru/examples/basic-lmdb/main.go new file mode 100644 index 0000000..203d318 --- /dev/null +++ b/khatru/examples/basic-lmdb/main.go @@ -0,0 +1,29 @@ +package main + +import ( + "fmt" + "net/http" + "os" + + "github.com/fiatjaf/eventstore/lmdb" + "github.com/fiatjaf/khatru" +) + +func main() { + relay := khatru.NewRelay() + + db := lmdb.LMDBBackend{Path: "/tmp/khatru-lmdb-tmp"} + os.MkdirAll(db.Path, 0o755) + if err := db.Init(); err != nil { + panic(err) + } + + relay.StoreEvent = append(relay.StoreEvent, db.SaveEvent) + relay.QueryEvents = append(relay.QueryEvents, db.QueryEvents) + relay.CountEvents = append(relay.CountEvents, db.CountEvents) + relay.DeleteEvent = append(relay.DeleteEvent, db.DeleteEvent) + relay.ReplaceEvent = append(relay.ReplaceEvent, db.ReplaceEvent) + + fmt.Println("running on :3334") + http.ListenAndServe(":3334", relay) +} diff --git a/khatru/examples/basic-postgres/main.go b/khatru/examples/basic-postgres/main.go new file mode 100644 index 0000000..4754c18 --- /dev/null +++ b/khatru/examples/basic-postgres/main.go @@ -0,0 +1,27 @@ +package main + +import ( + "fmt" + "net/http" + + "github.com/fiatjaf/eventstore/postgresql" + "github.com/fiatjaf/khatru" +) + +func main() { + relay := khatru.NewRelay() + + db := postgresql.PostgresBackend{DatabaseURL: "postgresql://localhost:5432/tmp-khatru-relay"} + if err := db.Init(); err != nil { + panic(err) + } + + relay.StoreEvent = append(relay.StoreEvent, db.SaveEvent) + relay.QueryEvents = append(relay.QueryEvents, db.QueryEvents) + relay.CountEvents = append(relay.CountEvents, db.CountEvents) + relay.DeleteEvent = append(relay.DeleteEvent, db.DeleteEvent) + relay.ReplaceEvent = append(relay.ReplaceEvent, db.ReplaceEvent) + + fmt.Println("running on :3334") + http.ListenAndServe(":3334", relay) +} diff --git a/khatru/examples/basic-sqlite3/main.go b/khatru/examples/basic-sqlite3/main.go new file mode 100644 index 0000000..d1670e2 --- /dev/null +++ b/khatru/examples/basic-sqlite3/main.go @@ -0,0 +1,27 @@ +package main + +import ( + "fmt" + "net/http" + + "github.com/fiatjaf/eventstore/sqlite3" + "github.com/fiatjaf/khatru" +) + +func main() { + relay := khatru.NewRelay() + + db := sqlite3.SQLite3Backend{DatabaseURL: "/tmp/khatru-sqlite-tmp"} + if err := db.Init(); err != nil { + panic(err) + } + + relay.StoreEvent = append(relay.StoreEvent, db.SaveEvent) + relay.QueryEvents = append(relay.QueryEvents, db.QueryEvents) + relay.CountEvents = append(relay.CountEvents, db.CountEvents) + relay.DeleteEvent = append(relay.DeleteEvent, db.DeleteEvent) + relay.ReplaceEvent = append(relay.ReplaceEvent, db.ReplaceEvent) + + fmt.Println("running on :3334") + http.ListenAndServe(":3334", relay) +} diff --git a/khatru/examples/blossom/main.go b/khatru/examples/blossom/main.go new file mode 100644 index 0000000..c5cf4ef --- /dev/null +++ b/khatru/examples/blossom/main.go @@ -0,0 +1,46 @@ +package main + +import ( + "context" + "fmt" + "io" + "net/http" + "strings" + + "github.com/fiatjaf/eventstore/badger" + "github.com/fiatjaf/khatru" + "github.com/fiatjaf/khatru/blossom" +) + +func main() { + relay := khatru.NewRelay() + + db := &badger.BadgerBackend{Path: "/tmp/khatru-badger-tmp"} + if err := db.Init(); err != nil { + panic(err) + } + relay.StoreEvent = append(relay.StoreEvent, db.SaveEvent) + relay.QueryEvents = append(relay.QueryEvents, db.QueryEvents) + relay.CountEvents = append(relay.CountEvents, db.CountEvents) + relay.DeleteEvent = append(relay.DeleteEvent, db.DeleteEvent) + relay.ReplaceEvent = append(relay.ReplaceEvent, db.ReplaceEvent) + + bdb := &badger.BadgerBackend{Path: "/tmp/khatru-badger-blossom-tmp"} + if err := bdb.Init(); err != nil { + panic(err) + } + bl := blossom.New(relay, "http://localhost:3334") + bl.Store = blossom.EventStoreBlobIndexWrapper{Store: bdb, ServiceURL: bl.ServiceURL} + bl.StoreBlob = append(bl.StoreBlob, func(ctx context.Context, sha256 string, body []byte) error { + fmt.Println("storing", sha256, len(body)) + return nil + }) + bl.LoadBlob = append(bl.LoadBlob, func(ctx context.Context, sha256 string) (io.ReadSeeker, error) { + fmt.Println("loading", sha256) + blob := strings.NewReader("aaaaa") + return blob, nil + }) + + fmt.Println("running on :3334") + http.ListenAndServe(":3334", relay) +} diff --git a/khatru/examples/exclusive/main.go b/khatru/examples/exclusive/main.go new file mode 100644 index 0000000..ab82da6 --- /dev/null +++ b/khatru/examples/exclusive/main.go @@ -0,0 +1,40 @@ +package main + +import ( + "context" + "fmt" + "net/http" + "os" + + "github.com/fiatjaf/eventstore/lmdb" + "github.com/fiatjaf/khatru" + "github.com/fiatjaf/khatru/policies" + "github.com/nbd-wtf/go-nostr" +) + +func main() { + relay := khatru.NewRelay() + + db := lmdb.LMDBBackend{Path: "/tmp/exclusive"} + os.MkdirAll(db.Path, 0o755) + if err := db.Init(); err != nil { + panic(err) + } + + relay.StoreEvent = append(relay.StoreEvent, db.SaveEvent) + relay.QueryEvents = append(relay.QueryEvents, db.QueryEvents) + relay.CountEvents = append(relay.CountEvents, db.CountEvents) + relay.DeleteEvent = append(relay.DeleteEvent, db.DeleteEvent) + + relay.RejectEvent = append(relay.RejectEvent, policies.PreventTooManyIndexableTags(10, nil, nil)) + relay.RejectFilter = append(relay.RejectFilter, policies.NoComplexFilters) + + relay.OnEventSaved = append(relay.OnEventSaved, func(ctx context.Context, event *nostr.Event) { + }) + + fmt.Println("running on :3334") + http.ListenAndServe(":3334", relay) +} + +func deleteStuffThatCanBeFoundElsewhere() { +} diff --git a/khatru/examples/readme-demo/demo-memory b/khatru/examples/readme-demo/demo-memory new file mode 100755 index 0000000..55ffe0b Binary files /dev/null and b/khatru/examples/readme-demo/demo-memory differ diff --git a/khatru/examples/readme-demo/main.go b/khatru/examples/readme-demo/main.go new file mode 100644 index 0000000..5fdc0ee --- /dev/null +++ b/khatru/examples/readme-demo/main.go @@ -0,0 +1,98 @@ +package main + +import ( + "context" + "fmt" + "log" + "net/http" + + "github.com/fiatjaf/khatru" + "github.com/fiatjaf/khatru/policies" + "github.com/nbd-wtf/go-nostr" +) + +func main() { + // create the relay instance + relay := khatru.NewRelay() + + // set up some basic properties (will be returned on the NIP-11 endpoint) + relay.Info.Name = "my relay" + relay.Info.PubKey = "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798" + relay.Info.Description = "this is my custom relay" + relay.Info.Icon = "https://external-content.duckduckgo.com/iu/?u=https%3A%2F%2Fliquipedia.net%2Fcommons%2Fimages%2F3%2F35%2FSCProbe.jpg&f=1&nofb=1&ipt=0cbbfef25bce41da63d910e86c3c343e6c3b9d63194ca9755351bb7c2efa3359&ipo=images" + + // you must bring your own storage scheme -- if you want to have any + store := make(map[string]*nostr.Event, 120) + + // set up the basic relay functions + relay.StoreEvent = append(relay.StoreEvent, + func(ctx context.Context, event *nostr.Event) error { + store[event.ID] = event + return nil + }, + ) + relay.QueryEvents = append(relay.QueryEvents, + func(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) { + ch := make(chan *nostr.Event) + go func() { + for _, evt := range store { + if filter.Matches(evt) { + ch <- evt + } + } + close(ch) + }() + return ch, nil + }, + ) + relay.DeleteEvent = append(relay.DeleteEvent, + func(ctx context.Context, event *nostr.Event) error { + delete(store, event.ID) + return nil + }, + ) + + // there are many other configurable things you can set + relay.RejectEvent = append(relay.RejectEvent, + // built-in policies + policies.ValidateKind, + + // define your own policies + policies.PreventLargeTags(100), + func(ctx context.Context, event *nostr.Event) (reject bool, msg string) { + if event.PubKey == "fa984bd7dbb282f07e16e7ae87b26a2a7b9b90b7246a44771f0cf5ae58018f52" { + return true, "we don't allow this person to write here" + } + return false, "" // anyone else can + }, + ) + + // you can request auth by rejecting an event or a request with the prefix "auth-required: " + relay.RejectFilter = append(relay.RejectFilter, + // built-in policies + policies.NoComplexFilters, + + // define your own policies + func(ctx context.Context, filter nostr.Filter) (reject bool, msg string) { + if pubkey := khatru.GetAuthed(ctx); pubkey != "" { + log.Printf("request from %s\n", pubkey) + return false, "" + } + return true, "auth-required: only authenticated users can read from this relay" + // (this will cause an AUTH message to be sent and then a CLOSED message such that clients can + // authenticate and then request again) + }, + ) + // check the docs for more goodies! + + mux := relay.Router() + // set up other http handlers + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("content-type", "text/html") + fmt.Fprintf(w, `welcome to my relay!`) + }) + + // start the server + fmt.Println("running on :3334") + http.ListenAndServe(":3334", relay) +} diff --git a/khatru/examples/routing/main.go b/khatru/examples/routing/main.go new file mode 100644 index 0000000..6fcd3ae --- /dev/null +++ b/khatru/examples/routing/main.go @@ -0,0 +1,70 @@ +package main + +import ( + "fmt" + "net/http" + "slices" + + "github.com/fiatjaf/eventstore/slicestore" + "github.com/fiatjaf/eventstore/sqlite3" + "github.com/fiatjaf/khatru" + "github.com/nbd-wtf/go-nostr" +) + +func main() { + db1 := slicestore.SliceStore{} + db1.Init() + r1 := khatru.NewRelay() + r1.StoreEvent = append(r1.StoreEvent, db1.SaveEvent) + r1.QueryEvents = append(r1.QueryEvents, db1.QueryEvents) + r1.CountEvents = append(r1.CountEvents, db1.CountEvents) + r1.DeleteEvent = append(r1.DeleteEvent, db1.DeleteEvent) + + db2 := sqlite3.SQLite3Backend{DatabaseURL: "/tmp/t"} + db2.Init() + r2 := khatru.NewRelay() + r2.StoreEvent = append(r2.StoreEvent, db2.SaveEvent) + r2.QueryEvents = append(r2.QueryEvents, db2.QueryEvents) + r2.CountEvents = append(r2.CountEvents, db2.CountEvents) + r2.DeleteEvent = append(r2.DeleteEvent, db2.DeleteEvent) + + db3 := slicestore.SliceStore{} + db3.Init() + r3 := khatru.NewRelay() + r3.StoreEvent = append(r3.StoreEvent, db3.SaveEvent) + r3.QueryEvents = append(r3.QueryEvents, db3.QueryEvents) + r3.CountEvents = append(r3.CountEvents, db3.CountEvents) + r3.DeleteEvent = append(r3.DeleteEvent, db3.DeleteEvent) + + router := khatru.NewRouter() + + router.Route(). + Req(func(filter nostr.Filter) bool { + return slices.Contains(filter.Kinds, 30023) + }). + Event(func(event *nostr.Event) bool { + return event.Kind == 30023 + }). + Relay(r1) + + router.Route(). + Req(func(filter nostr.Filter) bool { + return slices.Contains(filter.Kinds, 1) && slices.Contains(filter.Tags["t"], "spam") + }). + Event(func(event *nostr.Event) bool { + return event.Kind == 1 && event.Tags.FindWithValue("t", "spam") != nil + }). + Relay(r2) + + router.Route(). + Req(func(filter nostr.Filter) bool { + return slices.Contains(filter.Kinds, 1) + }). + Event(func(event *nostr.Event) bool { + return event.Kind == 1 + }). + Relay(r3) + + fmt.Println("running on :3334") + http.ListenAndServe(":3334", router) +} diff --git a/khatru/expiration.go b/khatru/expiration.go new file mode 100644 index 0000000..e7850ce --- /dev/null +++ b/khatru/expiration.go @@ -0,0 +1,150 @@ +package khatru + +import ( + "container/heap" + "context" + "sync" + "time" + + "github.com/nbd-wtf/go-nostr" + "github.com/nbd-wtf/go-nostr/nip40" +) + +type expiringEvent struct { + id string + expiresAt nostr.Timestamp +} + +type expiringEventHeap []expiringEvent + +func (h expiringEventHeap) Len() int { return len(h) } +func (h expiringEventHeap) Less(i, j int) bool { return h[i].expiresAt < h[j].expiresAt } +func (h expiringEventHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } + +func (h *expiringEventHeap) Push(x interface{}) { + *h = append(*h, x.(expiringEvent)) +} + +func (h *expiringEventHeap) Pop() interface{} { + old := *h + n := len(old) + x := old[n-1] + *h = old[0 : n-1] + return x +} + +type expirationManager struct { + events expiringEventHeap + mu sync.Mutex + relay *Relay + interval time.Duration + initialScanDone bool +} + +func newExpirationManager(relay *Relay) *expirationManager { + return &expirationManager{ + events: make(expiringEventHeap, 0), + relay: relay, + interval: time.Hour, + } +} + +func (em *expirationManager) start(ctx context.Context) { + ticker := time.NewTicker(em.interval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if !em.initialScanDone { + em.initialScan(ctx) + em.initialScanDone = true + } + + em.checkExpiredEvents(ctx) + } + } +} + +func (em *expirationManager) initialScan(ctx context.Context) { + em.mu.Lock() + defer em.mu.Unlock() + + // query all events + ctx = context.WithValue(ctx, internalCallKey, struct{}{}) + for _, query := range em.relay.QueryEvents { + ch, err := query(ctx, nostr.Filter{}) + if err != nil { + continue + } + + for evt := range ch { + if expiresAt := nip40.GetExpiration(evt.Tags); expiresAt != -1 { + heap.Push(&em.events, expiringEvent{ + id: evt.ID, + expiresAt: expiresAt, + }) + } + } + } + + heap.Init(&em.events) +} + +func (em *expirationManager) checkExpiredEvents(ctx context.Context) { + em.mu.Lock() + defer em.mu.Unlock() + + now := nostr.Now() + + // keep deleting events from the heap as long as they're expired + for em.events.Len() > 0 { + next := em.events[0] + if now < next.expiresAt { + break + } + + heap.Pop(&em.events) + + ctx := context.WithValue(ctx, internalCallKey, struct{}{}) + for _, query := range em.relay.QueryEvents { + ch, err := query(ctx, nostr.Filter{IDs: []string{next.id}}) + if err != nil { + continue + } + + if evt := <-ch; evt != nil { + for _, del := range em.relay.DeleteEvent { + del(ctx, evt) + } + } + break + } + } +} + +func (em *expirationManager) trackEvent(evt *nostr.Event) { + if expiresAt := nip40.GetExpiration(evt.Tags); expiresAt != -1 { + em.mu.Lock() + heap.Push(&em.events, expiringEvent{ + id: evt.ID, + expiresAt: expiresAt, + }) + em.mu.Unlock() + } +} + +func (em *expirationManager) removeEvent(id string) { + em.mu.Lock() + defer em.mu.Unlock() + + // Find and remove the event from the heap + for i := 0; i < len(em.events); i++ { + if em.events[i].id == id { + heap.Remove(&em.events, i) + break + } + } +} diff --git a/khatru/get-started.go b/khatru/get-started.go new file mode 100644 index 0000000..aca4018 --- /dev/null +++ b/khatru/get-started.go @@ -0,0 +1,65 @@ +package khatru + +import ( + "context" + "net" + "net/http" + "strconv" + "time" + + "github.com/fasthttp/websocket" + "github.com/rs/cors" +) + +func (rl *Relay) Router() *http.ServeMux { + return rl.serveMux +} + +func (rl *Relay) SetRouter(mux *http.ServeMux) { + rl.serveMux = mux +} + +// Start creates an http server and starts listening on given host and port. +func (rl *Relay) Start(host string, port int, started ...chan bool) error { + addr := net.JoinHostPort(host, strconv.Itoa(port)) + ln, err := net.Listen("tcp", addr) + if err != nil { + return err + } + + rl.Addr = ln.Addr().String() + rl.httpServer = &http.Server{ + Handler: cors.Default().Handler(rl), + Addr: addr, + WriteTimeout: 2 * time.Second, + ReadTimeout: 2 * time.Second, + IdleTimeout: 30 * time.Second, + } + + // notify caller that we're starting + for _, started := range started { + close(started) + } + + if err := rl.httpServer.Serve(ln); err == http.ErrServerClosed { + return nil + } else if err != nil { + return err + } else { + return nil + } +} + +// Shutdown sends a websocket close control message to all connected clients. +func (rl *Relay) Shutdown(ctx context.Context) { + rl.httpServer.Shutdown(ctx) + rl.clientsMutex.Lock() + defer rl.clientsMutex.Unlock() + for ws := range rl.clients { + ws.conn.WriteControl(websocket.CloseMessage, nil, time.Now().Add(time.Second)) + ws.cancel() + ws.conn.Close() + } + clear(rl.clients) + rl.listeners = rl.listeners[:0] +} diff --git a/khatru/go.mod b/khatru/go.mod new file mode 100644 index 0000000..1f5db35 --- /dev/null +++ b/khatru/go.mod @@ -0,0 +1,72 @@ +module github.com/fiatjaf/khatru + +go 1.24.1 + +require ( + github.com/bep/debounce v1.2.1 + github.com/fasthttp/websocket v1.5.12 + github.com/fiatjaf/eventstore v0.16.2 + github.com/liamg/magic v0.0.1 + github.com/mailru/easyjson v0.9.0 + github.com/nbd-wtf/go-nostr v0.51.8 + github.com/puzpuzpuz/xsync/v3 v3.5.1 + github.com/rs/cors v1.11.1 + github.com/stretchr/testify v1.10.0 +) + +require ( + fiatjaf.com/lib v0.2.0 // indirect + github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3 // indirect + github.com/PowerDNS/lmdb-go v1.9.3 // indirect + github.com/andybalholm/brotli v1.1.1 // indirect + github.com/aquasecurity/esquery v0.2.0 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect + github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 // indirect + github.com/bytedance/sonic v1.13.2 // indirect + github.com/bytedance/sonic/loader v0.2.4 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cloudwego/base64x v0.1.5 // indirect + github.com/coder/websocket v1.8.13 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/decred/dcrd/crypto/blake256 v1.1.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect + github.com/dgraph-io/badger/v4 v4.5.0 // indirect + github.com/dgraph-io/ristretto/v2 v2.1.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/elastic/elastic-transport-go/v8 v8.6.0 // indirect + github.com/elastic/go-elasticsearch/v7 v7.17.10 // indirect + github.com/elastic/go-elasticsearch/v8 v8.16.0 // indirect + github.com/fatih/structs v1.1.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect + github.com/google/flatbuffers v24.12.23+incompatible // indirect + github.com/jmoiron/sqlx v1.4.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/cpuid/v2 v2.2.10 // indirect + github.com/lib/pq v1.10.9 // indirect + github.com/mattn/go-sqlite3 v1.14.24 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/savsgio/gotils v0.0.0-20240704082632-aef3928b8a38 // indirect + github.com/tidwall/gjson v1.18.0 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.1 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/valyala/bytebufferpool v1.0.0 // indirect + github.com/valyala/fasthttp v1.59.0 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/otel v1.32.0 // indirect + go.opentelemetry.io/otel/metric v1.32.0 // indirect + go.opentelemetry.io/otel/trace v1.32.0 // indirect + golang.org/x/arch v0.15.0 // indirect + golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect + golang.org/x/net v0.37.0 // indirect + golang.org/x/sys v0.31.0 // indirect + google.golang.org/protobuf v1.36.2 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/khatru/go.sum b/khatru/go.sum new file mode 100644 index 0000000..b0de1e8 --- /dev/null +++ b/khatru/go.sum @@ -0,0 +1,254 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +fiatjaf.com/lib v0.2.0 h1:TgIJESbbND6GjOgGHxF5jsO6EMjuAxIzZHPo5DXYexs= +fiatjaf.com/lib v0.2.0/go.mod h1:Ycqq3+mJ9jAWu7XjbQI1cVr+OFgnHn79dQR5oTII47g= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3 h1:ClzzXMDDuUbWfNNZqGeYq4PnYOlwlOVIvSyNaIy0ykg= +github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3/go.mod h1:we0YA5CsBbH5+/NUzC/AlMmxaDtWlXeNsqrwXjTzmzA= +github.com/PowerDNS/lmdb-go v1.9.3 h1:AUMY2pZT8WRpkEv39I9Id3MuoHd+NZbTVpNhruVkPTg= +github.com/PowerDNS/lmdb-go v1.9.3/go.mod h1:TE0l+EZK8Z1B4dx070ZxkWTlp8RG1mjN0/+FkFRQMtU= +github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= +github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= +github.com/aquasecurity/esquery v0.2.0 h1:9WWXve95TE8hbm3736WB7nS6Owl8UGDeu+0jiyE9ttA= +github.com/aquasecurity/esquery v0.2.0/go.mod h1:VU+CIFR6C+H142HHZf9RUkp4Eedpo9UrEKeCQHWf9ao= +github.com/bep/debounce v1.2.1 h1:v67fRdBA9UQu2NhLFXrSg0Brw7CexQekrBwDMM8bzeY= +github.com/bep/debounce v1.2.1/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0= +github.com/btcsuite/btcd v0.24.2 h1:aLmxPguqxza+4ag8R1I2nnJjSu2iFn/kqtHTIImswcY= +github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ= +github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= +github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ= +github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/bytedance/sonic v1.13.1 h1:Jyd5CIvdFnkOWuKXr+wm4Nyk2h0yAFsr8ucJgEasO3g= +github.com/bytedance/sonic v1.13.1/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4= +github.com/bytedance/sonic v1.13.2 h1:8/H1FempDZqC4VqjptGo14QQlJx8VdZJegxs6wwfqpQ= +github.com/bytedance/sonic v1.13.2/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4= +github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= +github.com/bytedance/sonic/loader v0.2.4 h1:ZWCw4stuXUsn1/+zQDqeE7JKP+QO47tz7QCNan80NzY= +github.com/bytedance/sonic/loader v0.2.4/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4= +github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= +github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo= +github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= +github.com/coder/websocket v1.8.13 h1:f3QZdXy7uGVz+4uCJy2nTZyM0yTBj8yANEHhqlXZ9FE= +github.com/coder/websocket v1.8.13/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= +github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= +github.com/dgraph-io/badger/v4 v4.5.0 h1:TeJE3I1pIWLBjYhIYCA1+uxrjWEoJXImFBMEBVSm16g= +github.com/dgraph-io/badger/v4 v4.5.0/go.mod h1:ysgYmIeG8dS/E8kwxT7xHyc7MkmwNYLRoYnFbr7387A= +github.com/dgraph-io/ristretto/v2 v2.1.0 h1:59LjpOJLNDULHh8MC4UaegN52lC4JnO2dITsie/Pa8I= +github.com/dgraph-io/ristretto/v2 v2.1.0/go.mod h1:uejeqfYXpUomfse0+lO+13ATz4TypQYLJZzBSAemuB4= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= +github.com/elastic/elastic-transport-go/v8 v8.6.0 h1:Y2S/FBjx1LlCv5m6pWAF2kDJAHoSjSRSJCApolgfthA= +github.com/elastic/elastic-transport-go/v8 v8.6.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= +github.com/elastic/go-elasticsearch/v7 v7.6.0/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4= +github.com/elastic/go-elasticsearch/v7 v7.17.10 h1:TCQ8i4PmIJuBunvBS6bwT2ybzVFxxUhhltAs3Gyu1yo= +github.com/elastic/go-elasticsearch/v7 v7.17.10/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4= +github.com/elastic/go-elasticsearch/v8 v8.16.0 h1:f7bR+iBz8GTAVhwyFO3hm4ixsz2eMaEy0QroYnXV3jE= +github.com/elastic/go-elasticsearch/v8 v8.16.0/go.mod h1:lGMlgKIbYoRvay3xWBeKahAiJOgmFDsjZC39nmO3H64= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fasthttp/websocket v1.5.12 h1:e4RGPpWW2HTbL3zV0Y/t7g0ub294LkiuXXUuTOUInlE= +github.com/fasthttp/websocket v1.5.12/go.mod h1:I+liyL7/4moHojiOgUOIKEWm9EIxHqxZChS+aMFltyg= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/fiatjaf/eventstore v0.16.2 h1:h4rHwSwPcqAKqWUsAbYWUhDeSgm2Kp+PBkJc3FgBYu4= +github.com/fiatjaf/eventstore v0.16.2/go.mod h1:0gU8fzYO/bG+NQAVlHtJWOlt3JKKFefh5Xjj2d1dLIs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/flatbuffers v24.12.23+incompatible h1:ubBKR94NR4pXUCY/MUsRVzd9umNW7ht7EG9hHfS9FX8= +github.com/google/flatbuffers v24.12.23+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/jgroeneveld/schema v1.0.0 h1:J0E10CrOkiSEsw6dfb1IfrDJD14pf6QLVJ3tRPl/syI= +github.com/jgroeneveld/schema v1.0.0/go.mod h1:M14lv7sNMtGvo3ops1MwslaSYgDYxrSmbzWIQ0Mr5rs= +github.com/jgroeneveld/trial v2.0.0+incompatible h1:d59ctdgor+VqdZCAiUfVN8K13s0ALDioG5DWwZNtRuQ= +github.com/jgroeneveld/trial v2.0.0+incompatible/go.mod h1:I6INLW96EN8WysNBXUFI3M4RIC8ePg9ntAc/Wy+U/+M= +github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= +github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= +github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= +github.com/liamg/magic v0.0.1 h1:Ru22ElY+sCh6RvRTWjQzKKCxsEco8hE0co8n1qe7TBM= +github.com/liamg/magic v0.0.1/go.mod h1:yQkOmZZI52EA+SQ2xyHpVw8fNvTBruF873Y+Vt6S+fk= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM= +github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/nbd-wtf/go-nostr v0.51.7 h1:dGjtaaFQ1kA3H+vF8wt9a9WYl54K8C0JmVDf4cp+a4A= +github.com/nbd-wtf/go-nostr v0.51.7/go.mod h1:d6+DfvMWYG5pA3dmNMBJd6WCHVDDhkXbHqvfljf0Gzg= +github.com/nbd-wtf/go-nostr v0.51.8 h1:CIoS+YqChcm4e1L1rfMZ3/mIwTz4CwApM2qx7MHNzmE= +github.com/nbd-wtf/go-nostr v0.51.8/go.mod h1:d6+DfvMWYG5pA3dmNMBJd6WCHVDDhkXbHqvfljf0Gzg= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg= +github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA= +github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= +github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/savsgio/gotils v0.0.0-20240704082632-aef3928b8a38 h1:D0vL7YNisV2yqE55+q0lFuGse6U8lxlg7fYTctlT5Gc= +github.com/savsgio/gotils v0.0.0-20240704082632-aef3928b8a38/go.mod h1:sM7Mt7uEoCeFSCBM+qBrqvEo+/9vdmj19wzp3yzUhmg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.59.0 h1:Qu0qYHfXvPk1mSLNqcFtEk6DpxgA26hy6bmydotDpRI= +github.com/valyala/fasthttp v1.59.0/go.mod h1:GTxNb9Bc6r2a9D0TWNSPwDz78UxnTGBViY3xZNEqyYU= +github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= +github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= +go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= +golang.org/x/arch v0.15.0 h1:QtOrQd0bTUnhNVNndMpLHNWrDmYzZ2KDqSrEymqInZw= +golang.org/x/arch v0.15.0/go.mod h1:JmwW7aLIoRUKgaTzhkiEFxvcEiQGyOg9BMonBJUS7EE= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw= +golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= +golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.36.2 h1:R8FeyR1/eLmkutZOM5CWghmo5itiG9z0ktFlTVLuTmU= +google.golang.org/protobuf v1.36.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= diff --git a/khatru/handlers.go b/khatru/handlers.go new file mode 100644 index 0000000..dcf4523 --- /dev/null +++ b/khatru/handlers.go @@ -0,0 +1,430 @@ +package khatru + +import ( + "context" + "crypto/rand" + "encoding/hex" + "errors" + "net/http" + "strconv" + "strings" + "sync" + "time" + "unsafe" + + "github.com/bep/debounce" + "github.com/fasthttp/websocket" + "github.com/nbd-wtf/go-nostr" + "github.com/nbd-wtf/go-nostr/nip42" + "github.com/nbd-wtf/go-nostr/nip45" + "github.com/nbd-wtf/go-nostr/nip45/hyperloglog" + "github.com/nbd-wtf/go-nostr/nip70" + "github.com/nbd-wtf/go-nostr/nip77" + "github.com/nbd-wtf/go-nostr/nip77/negentropy" + "github.com/puzpuzpuz/xsync/v3" + "github.com/rs/cors" +) + +// ServeHTTP implements http.Handler interface. +func (rl *Relay) ServeHTTP(w http.ResponseWriter, r *http.Request) { + corsMiddleware := cors.New(cors.Options{ + AllowedOrigins: []string{"*"}, + AllowedMethods: []string{ + http.MethodHead, + http.MethodGet, + http.MethodPost, + http.MethodPut, + http.MethodPatch, + http.MethodDelete, + }, + AllowedHeaders: []string{"Authorization", "*"}, + MaxAge: 86400, + }) + + if r.Header.Get("Upgrade") == "websocket" { + rl.HandleWebsocket(w, r) + } else if r.Header.Get("Accept") == "application/nostr+json" { + corsMiddleware.Handler(http.HandlerFunc(rl.HandleNIP11)).ServeHTTP(w, r) + } else if r.Header.Get("Content-Type") == "application/nostr+json+rpc" { + corsMiddleware.Handler(http.HandlerFunc(rl.HandleNIP86)).ServeHTTP(w, r) + } else { + corsMiddleware.Handler(rl.serveMux).ServeHTTP(w, r) + } +} + +func (rl *Relay) HandleWebsocket(w http.ResponseWriter, r *http.Request) { + for _, reject := range rl.RejectConnection { + if reject(r) { + w.WriteHeader(429) // Too many requests + return + } + } + + conn, err := rl.upgrader.Upgrade(w, r, nil) + if err != nil { + rl.Log.Printf("failed to upgrade websocket: %v\n", err) + return + } + + ticker := time.NewTicker(rl.PingPeriod) + + // NIP-42 challenge + challenge := make([]byte, 8) + rand.Read(challenge) + + ws := &WebSocket{ + conn: conn, + Request: r, + Challenge: hex.EncodeToString(challenge), + negentropySessions: xsync.NewMapOf[string, *NegentropySession](), + } + ws.Context, ws.cancel = context.WithCancel(context.Background()) + + rl.clientsMutex.Lock() + rl.clients[ws] = make([]listenerSpec, 0, 2) + rl.clientsMutex.Unlock() + + ctx, cancel := context.WithCancel( + context.WithValue( + context.Background(), + wsKey, ws, + ), + ) + + kill := func() { + for _, ondisconnect := range rl.OnDisconnect { + ondisconnect(ctx) + } + + ticker.Stop() + cancel() + ws.cancel() + ws.conn.Close() + + rl.removeClientAndListeners(ws) + } + + go func() { + defer kill() + + ws.conn.SetReadLimit(rl.MaxMessageSize) + ws.conn.SetReadDeadline(time.Now().Add(rl.PongWait)) + ws.conn.SetPongHandler(func(string) error { + ws.conn.SetReadDeadline(time.Now().Add(rl.PongWait)) + return nil + }) + + for _, onconnect := range rl.OnConnect { + onconnect(ctx) + } + + smp := nostr.NewMessageParser() + + for { + typ, msgb, err := ws.conn.ReadMessage() + if err != nil { + if websocket.IsUnexpectedCloseError( + err, + websocket.CloseNormalClosure, // 1000 + websocket.CloseGoingAway, // 1001 + websocket.CloseNoStatusReceived, // 1005 + websocket.CloseAbnormalClosure, // 1006 + 4537, // some client seems to send many of these + ) { + rl.Log.Printf("unexpected close error from %s: %v\n", GetIPFromRequest(r), err) + } + ws.cancel() + return + } + + if typ == websocket.PingMessage { + ws.WriteMessage(websocket.PongMessage, nil) + continue + } + + // this is safe because ReadMessage() will always create a new slice + message := unsafe.String(unsafe.SliceData(msgb), len(msgb)) + + // parse messages sequentially otherwise sonic breaks + envelope, err := smp.ParseMessage(message) + + // then delegate to the goroutine + go func(message string) { + if err != nil { + if err == nostr.UnknownLabel && rl.Negentropy { + envelope = nip77.ParseNegMessage(message) + } + if envelope == nil { + ws.WriteJSON(nostr.NoticeEnvelope("failed to parse envelope: " + err.Error())) + return + } + } + + switch env := envelope.(type) { + case *nostr.EventEnvelope: + // check id + if !env.Event.CheckID() { + ws.WriteJSON(nostr.OKEnvelope{EventID: env.Event.ID, OK: false, Reason: "invalid: id is computed incorrectly"}) + return + } + + // check signature + if ok, err := env.Event.CheckSignature(); err != nil { + ws.WriteJSON(nostr.OKEnvelope{EventID: env.Event.ID, OK: false, Reason: "error: failed to verify signature"}) + return + } else if !ok { + ws.WriteJSON(nostr.OKEnvelope{EventID: env.Event.ID, OK: false, Reason: "invalid: signature is invalid"}) + return + } + + // check NIP-70 protected + if nip70.IsProtected(env.Event) { + authed := GetAuthed(ctx) + if authed == "" { + RequestAuth(ctx) + ws.WriteJSON(nostr.OKEnvelope{ + EventID: env.Event.ID, + OK: false, + Reason: "auth-required: must be published by authenticated event author", + }) + return + } else if authed != env.Event.PubKey { + ws.WriteJSON(nostr.OKEnvelope{ + EventID: env.Event.ID, + OK: false, + Reason: "blocked: must be published by event author", + }) + return + } + } else if nip70.HasEmbeddedProtected(env.Event) { + ws.WriteJSON(nostr.OKEnvelope{ + EventID: env.Event.ID, + OK: false, + Reason: "blocked: can't repost nip70 protected", + }) + return + } + + srl := rl + if rl.getSubRelayFromEvent != nil { + srl = rl.getSubRelayFromEvent(&env.Event) + } + + var ok bool + var writeErr error + var skipBroadcast bool + + if env.Event.Kind == 5 { + // this always returns "blocked: " whenever it returns an error + writeErr = srl.handleDeleteRequest(ctx, &env.Event) + } else if nostr.IsEphemeralKind(env.Event.Kind) { + // this will also always return a prefixed reason + writeErr = srl.handleEphemeral(ctx, &env.Event) + } else { + // this will also always return a prefixed reason + skipBroadcast, writeErr = srl.handleNormal(ctx, &env.Event) + } + + var reason string + if writeErr == nil { + ok = true + for _, ovw := range srl.OverwriteResponseEvent { + ovw(ctx, &env.Event) + } + if !skipBroadcast { + n := srl.notifyListeners(&env.Event) + + // the number of notified listeners matters in ephemeral events + if nostr.IsEphemeralKind(env.Event.Kind) { + if n == 0 { + ok = false + reason = "mute: no one was listening for this" + } else { + reason = "broadcasted to " + strconv.Itoa(n) + " listeners" + } + } + } + } else { + ok = false + reason = writeErr.Error() + if strings.HasPrefix(reason, "auth-required:") { + RequestAuth(ctx) + } + } + ws.WriteJSON(nostr.OKEnvelope{EventID: env.Event.ID, OK: ok, Reason: reason}) + case *nostr.CountEnvelope: + if rl.CountEvents == nil && rl.CountEventsHLL == nil { + ws.WriteJSON(nostr.ClosedEnvelope{SubscriptionID: env.SubscriptionID, Reason: "unsupported: this relay does not support NIP-45"}) + return + } + + var total int64 + var hll *hyperloglog.HyperLogLog + + srl := rl + if rl.getSubRelayFromFilter != nil { + srl = rl.getSubRelayFromFilter(env.Filter) + } + + if offset := nip45.HyperLogLogEventPubkeyOffsetForFilter(env.Filter); offset != -1 { + total, hll = srl.handleCountRequestWithHLL(ctx, ws, env.Filter, offset) + } else { + total = srl.handleCountRequest(ctx, ws, env.Filter) + } + + resp := nostr.CountEnvelope{ + SubscriptionID: env.SubscriptionID, + Count: &total, + } + if hll != nil { + resp.HyperLogLog = hll.GetRegisters() + } + + ws.WriteJSON(resp) + + case *nostr.ReqEnvelope: + eose := sync.WaitGroup{} + eose.Add(len(env.Filters)) + + // a context just for the "stored events" request handler + reqCtx, cancelReqCtx := context.WithCancelCause(ctx) + + // expose subscription id in the context + reqCtx = context.WithValue(reqCtx, subscriptionIdKey, env.SubscriptionID) + + // handle each filter separately -- dispatching events as they're loaded from databases + for _, filter := range env.Filters { + srl := rl + if rl.getSubRelayFromFilter != nil { + srl = rl.getSubRelayFromFilter(filter) + } + err := srl.handleRequest(reqCtx, env.SubscriptionID, &eose, ws, filter) + if err != nil { + // fail everything if any filter is rejected + reason := err.Error() + if strings.HasPrefix(reason, "auth-required:") { + RequestAuth(ctx) + } + ws.WriteJSON(nostr.ClosedEnvelope{SubscriptionID: env.SubscriptionID, Reason: reason}) + cancelReqCtx(errors.New("filter rejected")) + return + } else { + rl.addListener(ws, env.SubscriptionID, srl, filter, cancelReqCtx) + } + } + + go func() { + // when all events have been loaded from databases and dispatched we can fire the EOSE message + eose.Wait() + ws.WriteJSON(nostr.EOSEEnvelope(env.SubscriptionID)) + }() + case *nostr.CloseEnvelope: + id := string(*env) + rl.removeListenerId(ws, id) + case *nostr.AuthEnvelope: + wsBaseUrl := strings.Replace(rl.getBaseURL(r), "http", "ws", 1) + if pubkey, ok := nip42.ValidateAuthEvent(&env.Event, ws.Challenge, wsBaseUrl); ok { + ws.AuthedPublicKey = pubkey + ws.authLock.Lock() + if ws.Authed != nil { + close(ws.Authed) + ws.Authed = nil + } + ws.authLock.Unlock() + ws.WriteJSON(nostr.OKEnvelope{EventID: env.Event.ID, OK: true}) + } else { + ws.WriteJSON(nostr.OKEnvelope{EventID: env.Event.ID, OK: false, Reason: "error: failed to authenticate"}) + } + case *nip77.OpenEnvelope: + srl := rl + if rl.getSubRelayFromFilter != nil { + srl = rl.getSubRelayFromFilter(env.Filter) + if !srl.Negentropy { + // ignore + return + } + } + vec, err := srl.startNegentropySession(ctx, env.Filter) + if err != nil { + // fail everything if any filter is rejected + reason := err.Error() + if strings.HasPrefix(reason, "auth-required:") { + RequestAuth(ctx) + } + ws.WriteJSON(nip77.ErrorEnvelope{SubscriptionID: env.SubscriptionID, Reason: reason}) + return + } + + // reconcile to get the next message and return it + neg := negentropy.New(vec, 1024*1024) + out, err := neg.Reconcile(env.Message) + if err != nil { + ws.WriteJSON(nip77.ErrorEnvelope{SubscriptionID: env.SubscriptionID, Reason: err.Error()}) + return + } + ws.WriteJSON(nip77.MessageEnvelope{SubscriptionID: env.SubscriptionID, Message: out}) + + // if the message is not empty that means we'll probably have more reconciliation sessions, so store this + if out != "" { + deb := debounce.New(time.Second * 7) + negSession := &NegentropySession{ + neg: neg, + postponeClose: func() { + deb(func() { + ws.negentropySessions.Delete(env.SubscriptionID) + }) + }, + } + negSession.postponeClose() + + ws.negentropySessions.Store(env.SubscriptionID, negSession) + } + case *nip77.MessageEnvelope: + negSession, ok := ws.negentropySessions.Load(env.SubscriptionID) + if !ok { + // bad luck, your request was destroyed + ws.WriteJSON(nip77.ErrorEnvelope{SubscriptionID: env.SubscriptionID, Reason: "CLOSED"}) + return + } + // reconcile to get the next message and return it + out, err := negSession.neg.Reconcile(env.Message) + if err != nil { + ws.WriteJSON(nip77.ErrorEnvelope{SubscriptionID: env.SubscriptionID, Reason: err.Error()}) + ws.negentropySessions.Delete(env.SubscriptionID) + return + } + ws.WriteJSON(nip77.MessageEnvelope{SubscriptionID: env.SubscriptionID, Message: out}) + + // if there is more reconciliation to do, postpone this + if out != "" { + negSession.postponeClose() + } else { + // otherwise we can just close it + ws.negentropySessions.Delete(env.SubscriptionID) + } + case *nip77.CloseEnvelope: + ws.negentropySessions.Delete(env.SubscriptionID) + } + }(message) + } + }() + + go func() { + defer kill() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + err := ws.WriteMessage(websocket.PingMessage, nil) + if err != nil { + if !strings.HasSuffix(err.Error(), "use of closed network connection") { + rl.Log.Printf("error writing ping: %v; closing websocket\n", err) + } + return + } + } + } + }() +} diff --git a/khatru/helpers.go b/khatru/helpers.go new file mode 100644 index 0000000..e42583a --- /dev/null +++ b/khatru/helpers.go @@ -0,0 +1,54 @@ +package khatru + +import ( + "net" + "net/http" + "strings" + + "github.com/nbd-wtf/go-nostr" +) + +func isOlder(previous, next *nostr.Event) bool { + return previous.CreatedAt < next.CreatedAt || + (previous.CreatedAt == next.CreatedAt && previous.ID > next.ID) +} + +var privateMasks = func() []net.IPNet { + privateCIDRs := []string{ + "127.0.0.0/8", + "10.0.0.0/8", + "172.16.0.0/12", + "192.168.0.0/16", + "fc00::/7", + } + masks := make([]net.IPNet, len(privateCIDRs)) + for i, cidr := range privateCIDRs { + _, netw, err := net.ParseCIDR(cidr) + if err != nil { + return nil + } + masks[i] = *netw + } + return masks +}() + +func isPrivate(ip net.IP) bool { + for _, mask := range privateMasks { + if mask.Contains(ip) { + return true + } + } + return false +} + +func GetIPFromRequest(r *http.Request) string { + if xffh := r.Header.Get("X-Forwarded-For"); xffh != "" { + for _, v := range strings.Split(xffh, ",") { + if ip := net.ParseIP(strings.TrimSpace(v)); ip != nil && ip.IsGlobalUnicast() && !isPrivate(ip) { + return ip.String() + } + } + } + ip, _, _ := net.SplitHostPort(r.RemoteAddr) + return ip +} diff --git a/khatru/listener.go b/khatru/listener.go new file mode 100644 index 0000000..8a55793 --- /dev/null +++ b/khatru/listener.go @@ -0,0 +1,151 @@ +package khatru + +import ( + "context" + "errors" + "slices" + + "github.com/nbd-wtf/go-nostr" +) + +var ErrSubscriptionClosedByClient = errors.New("subscription closed by client") + +type listenerSpec struct { + id string // kept here so we can easily match against it removeListenerId + cancel context.CancelCauseFunc + index int + subrelay *Relay // this is important when we're dealing with routing, otherwise it will be always the same +} + +type listener struct { + id string // duplicated here so we can easily send it on notifyListeners + filter nostr.Filter + ws *WebSocket +} + +func (rl *Relay) GetListeningFilters() []nostr.Filter { + respfilters := make([]nostr.Filter, len(rl.listeners)) + for i, l := range rl.listeners { + respfilters[i] = l.filter + } + return respfilters +} + +// addListener may be called multiple times for each id and ws -- in which case each filter will +// be added as an independent listener +func (rl *Relay) addListener( + ws *WebSocket, + id string, + subrelay *Relay, + filter nostr.Filter, + cancel context.CancelCauseFunc, +) { + rl.clientsMutex.Lock() + defer rl.clientsMutex.Unlock() + + if specs, ok := rl.clients[ws]; ok /* this will always be true unless client has disconnected very rapidly */ { + idx := len(subrelay.listeners) + rl.clients[ws] = append(specs, listenerSpec{ + id: id, + cancel: cancel, + subrelay: subrelay, + index: idx, + }) + subrelay.listeners = append(subrelay.listeners, listener{ + ws: ws, + id: id, + filter: filter, + }) + } +} + +// remove a specific subscription id from listeners for a given ws client +// and cancel its specific context +func (rl *Relay) removeListenerId(ws *WebSocket, id string) { + rl.clientsMutex.Lock() + defer rl.clientsMutex.Unlock() + + if specs, ok := rl.clients[ws]; ok { + // swap delete specs that match this id + for s := len(specs) - 1; s >= 0; s-- { + spec := specs[s] + if spec.id == id { + spec.cancel(ErrSubscriptionClosedByClient) + specs[s] = specs[len(specs)-1] + specs = specs[0 : len(specs)-1] + rl.clients[ws] = specs + + // swap delete listeners one at a time, as they may be each in a different subrelay + srl := spec.subrelay // == rl in normal cases, but different when this came from a route + + if spec.index != len(srl.listeners)-1 { + movedFromIndex := len(srl.listeners) - 1 + moved := srl.listeners[movedFromIndex] // this wasn't removed, but will be moved + srl.listeners[spec.index] = moved + + // now we must update the the listener we just moved + // so its .index reflects its new position on srl.listeners + movedSpecs := rl.clients[moved.ws] + idx := slices.IndexFunc(movedSpecs, func(ls listenerSpec) bool { + return ls.index == movedFromIndex && ls.subrelay == srl + }) + movedSpecs[idx].index = spec.index + rl.clients[moved.ws] = movedSpecs + } + srl.listeners = srl.listeners[0 : len(srl.listeners)-1] // finally reduce the slice length + } + } + } +} + +func (rl *Relay) removeClientAndListeners(ws *WebSocket) { + rl.clientsMutex.Lock() + defer rl.clientsMutex.Unlock() + if specs, ok := rl.clients[ws]; ok { + // swap delete listeners and delete client (all specs will be deleted) + for s, spec := range specs { + // no need to cancel contexts since they inherit from the main connection context + // just delete the listeners (swap-delete) + srl := spec.subrelay + + if spec.index != len(srl.listeners)-1 { + movedFromIndex := len(srl.listeners) - 1 + moved := srl.listeners[movedFromIndex] // this wasn't removed, but will be moved + srl.listeners[spec.index] = moved + + // temporarily update the spec of the listener being removed to have index == -1 + // (since it was removed) so it doesn't match in the search below + rl.clients[ws][s].index = -1 + + // now we must update the the listener we just moved + // so its .index reflects its new position on srl.listeners + movedSpecs := rl.clients[moved.ws] + idx := slices.IndexFunc(movedSpecs, func(ls listenerSpec) bool { + return ls.index == movedFromIndex && ls.subrelay == srl + }) + movedSpecs[idx].index = spec.index + rl.clients[moved.ws] = movedSpecs + } + srl.listeners = srl.listeners[0 : len(srl.listeners)-1] // finally reduce the slice length + } + } + delete(rl.clients, ws) +} + +// returns how many listeners were notified +func (rl *Relay) notifyListeners(event *nostr.Event) int { + count := 0 +listenersloop: + for _, listener := range rl.listeners { + if listener.filter.Matches(event) { + for _, pb := range rl.PreventBroadcast { + if pb(listener.ws, event) { + continue listenersloop + } + } + listener.ws.WriteJSON(nostr.EventEnvelope{SubscriptionID: &listener.id, Event: *event}) + count++ + } + } + return count +} diff --git a/khatru/listener_fuzz_test.go b/khatru/listener_fuzz_test.go new file mode 100644 index 0000000..bbb87aa --- /dev/null +++ b/khatru/listener_fuzz_test.go @@ -0,0 +1,188 @@ +package khatru + +import ( + "math/rand" + "testing" + + "github.com/nbd-wtf/go-nostr" + "github.com/stretchr/testify/require" +) + +func FuzzRandomListenerClientRemoving(f *testing.F) { + f.Add(uint(20), uint(20), uint(1)) + f.Fuzz(func(t *testing.T, utw uint, ubs uint, ualf uint) { + totalWebsockets := int(utw) + baseSubs := int(ubs) + addListenerFreq := int(ualf) + 1 + + rl := NewRelay() + + f := nostr.Filter{Kinds: []int{1}} + cancel := func(cause error) {} + + websockets := make([]*WebSocket, 0, totalWebsockets*baseSubs) + + l := 0 + + for i := 0; i < totalWebsockets; i++ { + ws := &WebSocket{} + websockets = append(websockets, ws) + rl.clients[ws] = nil + } + + s := 0 + for j := 0; j < baseSubs; j++ { + for i := 0; i < totalWebsockets; i++ { + ws := websockets[i] + w := idFromSeqUpper(i) + + if s%addListenerFreq == 0 { + l++ + rl.addListener(ws, w+":"+idFromSeqLower(j), rl, f, cancel) + } + + s++ + } + } + + require.Len(t, rl.clients, totalWebsockets) + require.Len(t, rl.listeners, l) + + for ws := range rl.clients { + rl.removeClientAndListeners(ws) + } + + require.Len(t, rl.clients, 0) + require.Len(t, rl.listeners, 0) + }) +} + +func FuzzRandomListenerIdRemoving(f *testing.F) { + f.Add(uint(20), uint(20), uint(1), uint(4)) + f.Fuzz(func(t *testing.T, utw uint, ubs uint, ualf uint, ualef uint) { + totalWebsockets := int(utw) + baseSubs := int(ubs) + addListenerFreq := int(ualf) + 1 + addExtraListenerFreq := int(ualef) + 1 + + if totalWebsockets > 1024 || baseSubs > 1024 { + return + } + + rl := NewRelay() + + f := nostr.Filter{Kinds: []int{1}} + cancel := func(cause error) {} + websockets := make([]*WebSocket, 0, totalWebsockets) + + type wsid struct { + ws *WebSocket + id string + } + + subs := make([]wsid, 0, totalWebsockets*baseSubs) + extra := 0 + + for i := 0; i < totalWebsockets; i++ { + ws := &WebSocket{} + websockets = append(websockets, ws) + rl.clients[ws] = nil + } + + s := 0 + for j := 0; j < baseSubs; j++ { + for i := 0; i < totalWebsockets; i++ { + ws := websockets[i] + w := idFromSeqUpper(i) + + if s%addListenerFreq == 0 { + id := w + ":" + idFromSeqLower(j) + rl.addListener(ws, id, rl, f, cancel) + subs = append(subs, wsid{ws, id}) + + if s%addExtraListenerFreq == 0 { + rl.addListener(ws, id, rl, f, cancel) + extra++ + } + } + + s++ + } + } + + require.Len(t, rl.clients, totalWebsockets) + require.Len(t, rl.listeners, len(subs)+extra) + + rand.Shuffle(len(subs), func(i, j int) { + subs[i], subs[j] = subs[j], subs[i] + }) + for _, wsidToRemove := range subs { + rl.removeListenerId(wsidToRemove.ws, wsidToRemove.id) + } + + require.Len(t, rl.listeners, 0) + require.Len(t, rl.clients, totalWebsockets) + for _, specs := range rl.clients { + require.Len(t, specs, 0) + } + }) +} + +func FuzzRouterListenersPabloCrash(f *testing.F) { + f.Add(uint(3), uint(6), uint(2), uint(20)) + f.Fuzz(func(t *testing.T, totalRelays uint, totalConns uint, subFreq uint, subIterations uint) { + totalRelays++ + totalConns++ + subFreq++ + subIterations++ + + rl := NewRelay() + + relays := make([]*Relay, int(totalRelays)) + for i := 0; i < int(totalRelays); i++ { + relays[i] = NewRelay() + } + + conns := make([]*WebSocket, int(totalConns)) + for i := 0; i < int(totalConns); i++ { + ws := &WebSocket{} + conns[i] = ws + rl.clients[ws] = make([]listenerSpec, 0, subIterations) + } + + f := nostr.Filter{Kinds: []int{1}} + cancel := func(cause error) {} + + type wsid struct { + ws *WebSocket + id string + } + + s := 0 + subs := make([]wsid, 0, subIterations*totalConns*totalRelays) + for i, conn := range conns { + w := idFromSeqUpper(i) + for j := 0; j < int(subIterations); j++ { + id := w + ":" + idFromSeqLower(j) + for _, rlt := range relays { + if s%int(subFreq) == 0 { + rl.addListener(conn, id, rlt, f, cancel) + subs = append(subs, wsid{conn, id}) + } + s++ + } + } + } + + for _, wsid := range subs { + rl.removeListenerId(wsid.ws, wsid.id) + } + + for _, wsid := range subs { + require.Len(t, rl.clients[wsid.ws], 0) + } + for _, rlt := range relays { + require.Len(t, rlt.listeners, 0) + } + }) +} diff --git a/khatru/listener_test.go b/khatru/listener_test.go new file mode 100644 index 0000000..3afbd2a --- /dev/null +++ b/khatru/listener_test.go @@ -0,0 +1,545 @@ +package khatru + +import ( + "math/rand" + "strings" + "testing" + + "github.com/nbd-wtf/go-nostr" + "github.com/stretchr/testify/require" +) + +func idFromSeqUpper(seq int) string { return idFromSeq(seq, 65, 90) } +func idFromSeqLower(seq int) string { return idFromSeq(seq, 97, 122) } +func idFromSeq(seq int, min, max int) string { + maxSeq := max - min + 1 + nLetters := seq/maxSeq + 1 + result := strings.Builder{} + result.Grow(nLetters) + for l := 0; l < nLetters; l++ { + letter := rune(seq%maxSeq + min) + result.WriteRune(letter) + } + return result.String() +} + +func TestListenerSetupAndRemoveOnce(t *testing.T) { + rl := NewRelay() + + ws1 := &WebSocket{} + ws2 := &WebSocket{} + + f1 := nostr.Filter{Kinds: []int{1}} + f2 := nostr.Filter{Kinds: []int{2}} + f3 := nostr.Filter{Kinds: []int{3}} + + rl.clients[ws1] = nil + rl.clients[ws2] = nil + + var cancel func(cause error) = nil + + t.Run("adding listeners", func(t *testing.T) { + rl.addListener(ws1, "1a", rl, f1, cancel) + rl.addListener(ws1, "1b", rl, f2, cancel) + rl.addListener(ws2, "2a", rl, f3, cancel) + rl.addListener(ws1, "1c", rl, f3, cancel) + + require.Equal(t, map[*WebSocket][]listenerSpec{ + ws1: { + {"1a", cancel, 0, rl}, + {"1b", cancel, 1, rl}, + {"1c", cancel, 3, rl}, + }, + ws2: { + {"2a", cancel, 2, rl}, + }, + }, rl.clients) + + require.Equal(t, []listener{ + {"1a", f1, ws1}, + {"1b", f2, ws1}, + {"2a", f3, ws2}, + {"1c", f3, ws1}, + }, rl.listeners) + }) + + t.Run("removing a client", func(t *testing.T) { + rl.removeClientAndListeners(ws1) + + require.Equal(t, map[*WebSocket][]listenerSpec{ + ws2: { + {"2a", cancel, 0, rl}, + }, + }, rl.clients) + + require.Equal(t, []listener{ + {"2a", f3, ws2}, + }, rl.listeners) + }) +} + +func TestListenerMoreConvolutedCase(t *testing.T) { + rl := NewRelay() + + ws1 := &WebSocket{} + ws2 := &WebSocket{} + ws3 := &WebSocket{} + ws4 := &WebSocket{} + + f1 := nostr.Filter{Kinds: []int{1}} + f2 := nostr.Filter{Kinds: []int{2}} + f3 := nostr.Filter{Kinds: []int{3}} + + rl.clients[ws1] = nil + rl.clients[ws2] = nil + rl.clients[ws3] = nil + rl.clients[ws4] = nil + + var cancel func(cause error) = nil + + t.Run("adding listeners", func(t *testing.T) { + rl.addListener(ws1, "c", rl, f1, cancel) + rl.addListener(ws2, "b", rl, f2, cancel) + rl.addListener(ws3, "a", rl, f3, cancel) + rl.addListener(ws4, "d", rl, f3, cancel) + rl.addListener(ws2, "b", rl, f1, cancel) + + require.Equal(t, map[*WebSocket][]listenerSpec{ + ws1: { + {"c", cancel, 0, rl}, + }, + ws2: { + {"b", cancel, 1, rl}, + {"b", cancel, 4, rl}, + }, + ws3: { + {"a", cancel, 2, rl}, + }, + ws4: { + {"d", cancel, 3, rl}, + }, + }, rl.clients) + + require.Equal(t, []listener{ + {"c", f1, ws1}, + {"b", f2, ws2}, + {"a", f3, ws3}, + {"d", f3, ws4}, + {"b", f1, ws2}, + }, rl.listeners) + }) + + t.Run("removing a client", func(t *testing.T) { + rl.removeClientAndListeners(ws2) + + require.Equal(t, map[*WebSocket][]listenerSpec{ + ws1: { + {"c", cancel, 0, rl}, + }, + ws3: { + {"a", cancel, 2, rl}, + }, + ws4: { + {"d", cancel, 1, rl}, + }, + }, rl.clients) + + require.Equal(t, []listener{ + {"c", f1, ws1}, + {"d", f3, ws4}, + {"a", f3, ws3}, + }, rl.listeners) + }) + + t.Run("reorganize the first case differently and then remove again", func(t *testing.T) { + rl.clients = map[*WebSocket][]listenerSpec{ + ws1: { + {"c", cancel, 1, rl}, + }, + ws2: { + {"b", cancel, 2, rl}, + {"b", cancel, 4, rl}, + }, + ws3: { + {"a", cancel, 0, rl}, + }, + ws4: { + {"d", cancel, 3, rl}, + }, + } + rl.listeners = []listener{ + {"a", f3, ws3}, + {"c", f1, ws1}, + {"b", f2, ws2}, + {"d", f3, ws4}, + {"b", f1, ws2}, + } + + rl.removeClientAndListeners(ws2) + + require.Equal(t, map[*WebSocket][]listenerSpec{ + ws1: { + {"c", cancel, 1, rl}, + }, + ws3: { + {"a", cancel, 0, rl}, + }, + ws4: { + {"d", cancel, 2, rl}, + }, + }, rl.clients) + + require.Equal(t, []listener{ + {"a", f3, ws3}, + {"c", f1, ws1}, + {"d", f3, ws4}, + }, rl.listeners) + }) +} + +func TestListenerMoreStuffWithMultipleRelays(t *testing.T) { + rl := NewRelay() + + ws1 := &WebSocket{} + ws2 := &WebSocket{} + ws3 := &WebSocket{} + ws4 := &WebSocket{} + + f1 := nostr.Filter{Kinds: []int{1}} + f2 := nostr.Filter{Kinds: []int{2}} + f3 := nostr.Filter{Kinds: []int{3}} + + rlx := NewRelay() + rly := NewRelay() + rlz := NewRelay() + + rl.clients[ws1] = nil + rl.clients[ws2] = nil + rl.clients[ws3] = nil + rl.clients[ws4] = nil + + var cancel func(cause error) = nil + + t.Run("adding listeners", func(t *testing.T) { + rl.addListener(ws1, "c", rlx, f1, cancel) + rl.addListener(ws2, "b", rly, f2, cancel) + rl.addListener(ws3, "a", rlz, f3, cancel) + rl.addListener(ws4, "d", rlx, f3, cancel) + rl.addListener(ws4, "e", rlx, f3, cancel) + rl.addListener(ws3, "a", rlx, f3, cancel) + rl.addListener(ws4, "e", rly, f3, cancel) + rl.addListener(ws3, "f", rly, f3, cancel) + rl.addListener(ws1, "g", rlz, f1, cancel) + rl.addListener(ws2, "g", rlz, f2, cancel) + + require.Equal(t, map[*WebSocket][]listenerSpec{ + ws1: { + {"c", cancel, 0, rlx}, + {"g", cancel, 1, rlz}, + }, + ws2: { + {"b", cancel, 0, rly}, + {"g", cancel, 2, rlz}, + }, + ws3: { + {"a", cancel, 0, rlz}, + {"a", cancel, 3, rlx}, + {"f", cancel, 2, rly}, + }, + ws4: { + {"d", cancel, 1, rlx}, + {"e", cancel, 2, rlx}, + {"e", cancel, 1, rly}, + }, + }, rl.clients) + + require.Equal(t, []listener{ + {"c", f1, ws1}, + {"d", f3, ws4}, + {"e", f3, ws4}, + {"a", f3, ws3}, + }, rlx.listeners) + + require.Equal(t, []listener{ + {"b", f2, ws2}, + {"e", f3, ws4}, + {"f", f3, ws3}, + }, rly.listeners) + + require.Equal(t, []listener{ + {"a", f3, ws3}, + {"g", f1, ws1}, + {"g", f2, ws2}, + }, rlz.listeners) + }) + + t.Run("removing a subscription id", func(t *testing.T) { + // removing 'd' from ws4 + rl.clients[ws4][0].cancel = func(cause error) {} // set since removing will call it + rl.removeListenerId(ws4, "d") + + require.Equal(t, map[*WebSocket][]listenerSpec{ + ws1: { + {"c", cancel, 0, rlx}, + {"g", cancel, 1, rlz}, + }, + ws2: { + {"b", cancel, 0, rly}, + {"g", cancel, 2, rlz}, + }, + ws3: { + {"a", cancel, 0, rlz}, + {"a", cancel, 1, rlx}, + {"f", cancel, 2, rly}, + }, + ws4: { + {"e", cancel, 1, rly}, + {"e", cancel, 2, rlx}, + }, + }, rl.clients) + + require.Equal(t, []listener{ + {"c", f1, ws1}, + {"a", f3, ws3}, + {"e", f3, ws4}, + }, rlx.listeners) + + require.Equal(t, []listener{ + {"b", f2, ws2}, + {"e", f3, ws4}, + {"f", f3, ws3}, + }, rly.listeners) + + require.Equal(t, []listener{ + {"a", f3, ws3}, + {"g", f1, ws1}, + {"g", f2, ws2}, + }, rlz.listeners) + }) + + t.Run("removing another subscription id", func(t *testing.T) { + // removing 'a' from ws3 + rl.clients[ws3][0].cancel = func(cause error) {} // set since removing will call it + rl.clients[ws3][1].cancel = func(cause error) {} // set since removing will call it + rl.removeListenerId(ws3, "a") + + require.Equal(t, map[*WebSocket][]listenerSpec{ + ws1: { + {"c", cancel, 0, rlx}, + {"g", cancel, 1, rlz}, + }, + ws2: { + {"b", cancel, 0, rly}, + {"g", cancel, 0, rlz}, + }, + ws3: { + {"f", cancel, 2, rly}, + }, + ws4: { + {"e", cancel, 1, rly}, + {"e", cancel, 1, rlx}, + }, + }, rl.clients) + + require.Equal(t, []listener{ + {"c", f1, ws1}, + {"e", f3, ws4}, + }, rlx.listeners) + + require.Equal(t, []listener{ + {"b", f2, ws2}, + {"e", f3, ws4}, + {"f", f3, ws3}, + }, rly.listeners) + + require.Equal(t, []listener{ + {"g", f2, ws2}, + {"g", f1, ws1}, + }, rlz.listeners) + }) + + t.Run("removing a connection", func(t *testing.T) { + rl.removeClientAndListeners(ws2) + + require.Equal(t, map[*WebSocket][]listenerSpec{ + ws1: { + {"c", cancel, 0, rlx}, + {"g", cancel, 0, rlz}, + }, + ws3: { + {"f", cancel, 0, rly}, + }, + ws4: { + {"e", cancel, 1, rly}, + {"e", cancel, 1, rlx}, + }, + }, rl.clients) + + require.Equal(t, []listener{ + {"c", f1, ws1}, + {"e", f3, ws4}, + }, rlx.listeners) + + require.Equal(t, []listener{ + {"f", f3, ws3}, + {"e", f3, ws4}, + }, rly.listeners) + + require.Equal(t, []listener{ + {"g", f1, ws1}, + }, rlz.listeners) + }) + + t.Run("removing another subscription id", func(t *testing.T) { + // removing 'e' from ws4 + rl.clients[ws4][0].cancel = func(cause error) {} // set since removing will call it + rl.clients[ws4][1].cancel = func(cause error) {} // set since removing will call it + rl.removeListenerId(ws4, "e") + + require.Equal(t, map[*WebSocket][]listenerSpec{ + ws1: { + {"c", cancel, 0, rlx}, + {"g", cancel, 0, rlz}, + }, + ws3: { + {"f", cancel, 0, rly}, + }, + ws4: {}, + }, rl.clients) + + require.Equal(t, []listener{ + {"c", f1, ws1}, + }, rlx.listeners) + + require.Equal(t, []listener{ + {"f", f3, ws3}, + }, rly.listeners) + + require.Equal(t, []listener{ + {"g", f1, ws1}, + }, rlz.listeners) + }) +} + +func TestRandomListenerClientRemoving(t *testing.T) { + rl := NewRelay() + + f := nostr.Filter{Kinds: []int{1}} + cancel := func(cause error) {} + + websockets := make([]*WebSocket, 0, 20) + + l := 0 + + for i := 0; i < 20; i++ { + ws := &WebSocket{} + websockets = append(websockets, ws) + rl.clients[ws] = nil + } + + for j := 0; j < 20; j++ { + for i := 0; i < 20; i++ { + ws := websockets[i] + w := idFromSeqUpper(i) + + if rand.Intn(2) < 1 { + l++ + rl.addListener(ws, w+":"+idFromSeqLower(j), rl, f, cancel) + } + } + } + + require.Len(t, rl.clients, 20) + require.Len(t, rl.listeners, l) + + for ws := range rl.clients { + rl.removeClientAndListeners(ws) + } + + require.Len(t, rl.clients, 0) + require.Len(t, rl.listeners, 0) +} + +func TestRandomListenerIdRemoving(t *testing.T) { + rl := NewRelay() + + f := nostr.Filter{Kinds: []int{1}} + cancel := func(cause error) {} + + websockets := make([]*WebSocket, 0, 20) + + type wsid struct { + ws *WebSocket + id string + } + + subs := make([]wsid, 0, 20*20) + extra := 0 + + for i := 0; i < 20; i++ { + ws := &WebSocket{} + websockets = append(websockets, ws) + rl.clients[ws] = nil + } + + for j := 0; j < 20; j++ { + for i := 0; i < 20; i++ { + ws := websockets[i] + w := idFromSeqUpper(i) + + if rand.Intn(2) < 1 { + id := w + ":" + idFromSeqLower(j) + rl.addListener(ws, id, rl, f, cancel) + subs = append(subs, wsid{ws, id}) + + if rand.Intn(5) < 1 { + rl.addListener(ws, id, rl, f, cancel) + extra++ + } + } + } + } + + require.Len(t, rl.clients, 20) + require.Len(t, rl.listeners, len(subs)+extra) + + rand.Shuffle(len(subs), func(i, j int) { + subs[i], subs[j] = subs[j], subs[i] + }) + for _, wsidToRemove := range subs { + rl.removeListenerId(wsidToRemove.ws, wsidToRemove.id) + } + + require.Len(t, rl.listeners, 0) + require.Len(t, rl.clients, 20) + for _, specs := range rl.clients { + require.Len(t, specs, 0) + } +} + +func TestRouterListenersPabloCrash(t *testing.T) { + rl := NewRelay() + + rla := NewRelay() + rlb := NewRelay() + + ws1 := &WebSocket{} + ws2 := &WebSocket{} + ws3 := &WebSocket{} + + rl.clients[ws1] = nil + rl.clients[ws2] = nil + rl.clients[ws3] = nil + + f := nostr.Filter{Kinds: []int{1}} + cancel := func(cause error) {} + + rl.addListener(ws1, ":1", rla, f, cancel) + rl.addListener(ws2, ":1", rlb, f, cancel) + rl.addListener(ws3, "a", rlb, f, cancel) + rl.addListener(ws3, "b", rla, f, cancel) + rl.addListener(ws3, "c", rlb, f, cancel) + + rl.removeClientAndListeners(ws1) + rl.removeClientAndListeners(ws3) +} diff --git a/khatru/negentropy.go b/khatru/negentropy.go new file mode 100644 index 0000000..5b3e19c --- /dev/null +++ b/khatru/negentropy.go @@ -0,0 +1,53 @@ +package khatru + +import ( + "context" + "errors" + "fmt" + + "github.com/fiatjaf/eventstore" + "github.com/nbd-wtf/go-nostr" + "github.com/nbd-wtf/go-nostr/nip77/negentropy" + "github.com/nbd-wtf/go-nostr/nip77/negentropy/storage/vector" +) + +type NegentropySession struct { + neg *negentropy.Negentropy + postponeClose func() +} + +func (rl *Relay) startNegentropySession(ctx context.Context, filter nostr.Filter) (*vector.Vector, error) { + ctx = eventstore.SetNegentropy(ctx) + + // do the same overwrite/reject flow we do in normal REQs + for _, ovw := range rl.OverwriteFilter { + ovw(ctx, &filter) + } + if filter.LimitZero { + return nil, fmt.Errorf("invalid limit 0") + } + for _, reject := range rl.RejectFilter { + if reject, msg := reject(ctx, filter); reject { + return nil, errors.New(nostr.NormalizeOKMessage(msg, "blocked")) + } + } + + // fetch events and add them to a negentropy Vector store + vec := vector.New() + for _, query := range rl.QueryEvents { + ch, err := query(ctx, filter) + if err != nil { + continue + } else if ch == nil { + continue + } + + for event := range ch { + // since the goal here is to sync databases we won't do fancy stuff like overwrite events + vec.Insert(event.CreatedAt, event.ID) + } + } + vec.Seal() + + return vec, nil +} diff --git a/khatru/nip11.go b/khatru/nip11.go new file mode 100644 index 0000000..2226a2f --- /dev/null +++ b/khatru/nip11.go @@ -0,0 +1,38 @@ +package khatru + +import ( + "encoding/json" + "net/http" + "strings" +) + +func (rl *Relay) HandleNIP11(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/nostr+json") + + info := *rl.Info + + if len(rl.DeleteEvent) > 0 { + info.AddSupportedNIP(9) + } + if len(rl.CountEvents) > 0 { + info.AddSupportedNIP(45) + } + if rl.Negentropy { + info.AddSupportedNIP(77) + } + + // resolve relative icon and banner URLs against base URL + baseURL := rl.getBaseURL(r) + if info.Icon != "" && !strings.HasPrefix(info.Icon, "http://") && !strings.HasPrefix(info.Icon, "https://") { + info.Icon = strings.TrimSuffix(baseURL, "/") + "/" + strings.TrimPrefix(info.Icon, "/") + } + if info.Banner != "" && !strings.HasPrefix(info.Banner, "http://") && !strings.HasPrefix(info.Banner, "https://") { + info.Banner = strings.TrimSuffix(baseURL, "/") + "/" + strings.TrimPrefix(info.Banner, "/") + } + + for _, ovw := range rl.OverwriteRelayInformation { + info = ovw(r.Context(), r, info) + } + + json.NewEncoder(w).Encode(info) +} diff --git a/khatru/nip86.go b/khatru/nip86.go new file mode 100644 index 0000000..e287054 --- /dev/null +++ b/khatru/nip86.go @@ -0,0 +1,328 @@ +package khatru + +import ( + "context" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "reflect" + "strings" + + "github.com/nbd-wtf/go-nostr" + "github.com/nbd-wtf/go-nostr/nip86" +) + +type RelayManagementAPI struct { + RejectAPICall []func(ctx context.Context, mp nip86.MethodParams) (reject bool, msg string) + + BanPubKey func(ctx context.Context, pubkey string, reason string) error + ListBannedPubKeys func(ctx context.Context) ([]nip86.PubKeyReason, error) + AllowPubKey func(ctx context.Context, pubkey string, reason string) error + ListAllowedPubKeys func(ctx context.Context) ([]nip86.PubKeyReason, error) + ListEventsNeedingModeration func(ctx context.Context) ([]nip86.IDReason, error) + AllowEvent func(ctx context.Context, id string, reason string) error + BanEvent func(ctx context.Context, id string, reason string) error + ListBannedEvents func(ctx context.Context) ([]nip86.IDReason, error) + ListAllowedEvents func(ctx context.Context) ([]nip86.IDReason, error) + ChangeRelayName func(ctx context.Context, name string) error + ChangeRelayDescription func(ctx context.Context, desc string) error + ChangeRelayIcon func(ctx context.Context, icon string) error + AllowKind func(ctx context.Context, kind int) error + DisallowKind func(ctx context.Context, kind int) error + ListAllowedKinds func(ctx context.Context) ([]int, error) + ListDisAllowedKinds func(ctx context.Context) ([]int, error) + BlockIP func(ctx context.Context, ip net.IP, reason string) error + UnblockIP func(ctx context.Context, ip net.IP, reason string) error + ListBlockedIPs func(ctx context.Context) ([]nip86.IPReason, error) + Stats func(ctx context.Context) (nip86.Response, error) + GrantAdmin func(ctx context.Context, pubkey string, methods []string) error + RevokeAdmin func(ctx context.Context, pubkey string, methods []string) error + Generic func(ctx context.Context, request nip86.Request) (nip86.Response, error) +} + +func (rl *Relay) HandleNIP86(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/nostr+json+rpc") + + var ( + resp nip86.Response + ctx = r.Context() + req nip86.Request + mp nip86.MethodParams + evt nostr.Event + payloadHash [32]byte + ) + + payload, err := io.ReadAll(r.Body) + if err != nil { + resp.Error = "empty request" + goto respond + } + payloadHash = sha256.Sum256(payload) + + { + auth := r.Header.Get("Authorization") + spl := strings.Split(auth, "Nostr ") + if len(spl) != 2 { + resp.Error = "missing auth" + goto respond + } + + evtj, err := base64.StdEncoding.DecodeString(spl[1]) + if err != nil { + resp.Error = "invalid base64 auth" + goto respond + } + if err := json.Unmarshal(evtj, &evt); err != nil { + resp.Error = "invalid auth event json" + goto respond + } + if ok, _ := evt.CheckSignature(); !ok { + resp.Error = "invalid auth event" + goto respond + } + + if uTag := evt.Tags.Find("u"); uTag == nil || rl.getBaseURL(r) != uTag[1] { + resp.Error = "invalid 'u' tag" + goto respond + } else if pht := evt.Tags.FindWithValue("payload", hex.EncodeToString(payloadHash[:])); pht == nil { + resp.Error = "invalid auth event payload hash" + goto respond + } else if evt.CreatedAt < nostr.Now()-30 { + resp.Error = "auth event is too old" + goto respond + } + } + + if err := json.Unmarshal(payload, &req); err != nil { + resp.Error = "invalid json body" + goto respond + } + + mp, err = nip86.DecodeRequest(req) + if err != nil { + resp.Error = fmt.Sprintf("invalid params: %s", err) + goto respond + } + + ctx = context.WithValue(ctx, nip86HeaderAuthKey, evt.PubKey) + for _, rac := range rl.ManagementAPI.RejectAPICall { + if reject, msg := rac(ctx, mp); reject { + resp.Error = msg + goto respond + } + } + + if _, ok := mp.(nip86.SupportedMethods); ok { + mat := reflect.TypeOf(rl.ManagementAPI) + mav := reflect.ValueOf(rl.ManagementAPI) + + methods := make([]string, 0, mat.NumField()) + for i := 0; i < mat.NumField(); i++ { + field := mat.Field(i) + + // danger: this assumes the struct fields are appropriately named + methodName := strings.ToLower(field.Name) + + // assign this only if the function was defined + if mav.Field(i).Interface() != nil { + methods[i] = methodName + } + } + resp.Result = methods + } else { + switch thing := mp.(type) { + case nip86.BanPubKey: + if rl.ManagementAPI.BanPubKey == nil { + resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName()) + } else if err := rl.ManagementAPI.BanPubKey(ctx, thing.PubKey, thing.Reason); err != nil { + resp.Error = err.Error() + } else { + resp.Result = true + } + case nip86.ListBannedPubKeys: + if rl.ManagementAPI.ListBannedPubKeys == nil { + resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName()) + } else if result, err := rl.ManagementAPI.ListBannedPubKeys(ctx); err != nil { + resp.Error = err.Error() + } else { + resp.Result = result + } + case nip86.AllowPubKey: + if rl.ManagementAPI.AllowPubKey == nil { + resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName()) + } else if err := rl.ManagementAPI.AllowPubKey(ctx, thing.PubKey, thing.Reason); err != nil { + resp.Error = err.Error() + } else { + resp.Result = true + } + case nip86.ListAllowedPubKeys: + if rl.ManagementAPI.ListAllowedPubKeys == nil { + resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName()) + } else if result, err := rl.ManagementAPI.ListAllowedPubKeys(ctx); err != nil { + resp.Error = err.Error() + } else { + resp.Result = result + } + case nip86.BanEvent: + if rl.ManagementAPI.BanEvent == nil { + resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName()) + } else if err := rl.ManagementAPI.BanEvent(ctx, thing.ID, thing.Reason); err != nil { + resp.Error = err.Error() + } else { + resp.Result = true + } + case nip86.AllowEvent: + if rl.ManagementAPI.AllowEvent == nil { + resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName()) + } else if err := rl.ManagementAPI.AllowEvent(ctx, thing.ID, thing.Reason); err != nil { + resp.Error = err.Error() + } else { + resp.Result = true + } + case nip86.ListEventsNeedingModeration: + if rl.ManagementAPI.ListEventsNeedingModeration == nil { + resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName()) + } else if result, err := rl.ManagementAPI.ListEventsNeedingModeration(ctx); err != nil { + resp.Error = err.Error() + } else { + resp.Result = result + } + case nip86.ListBannedEvents: + if rl.ManagementAPI.ListBannedEvents == nil { + resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName()) + } else if result, err := rl.ManagementAPI.ListEventsNeedingModeration(ctx); err != nil { + resp.Error = err.Error() + } else { + resp.Result = result + } + case nip86.ChangeRelayName: + if rl.ManagementAPI.ChangeRelayName == nil { + resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName()) + } else if err := rl.ManagementAPI.ChangeRelayName(ctx, thing.Name); err != nil { + resp.Error = err.Error() + } else { + resp.Result = true + } + case nip86.ChangeRelayDescription: + if rl.ManagementAPI.ChangeRelayDescription == nil { + resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName()) + } else if err := rl.ManagementAPI.ChangeRelayDescription(ctx, thing.Description); err != nil { + resp.Error = err.Error() + } else { + resp.Result = true + } + case nip86.ChangeRelayIcon: + if rl.ManagementAPI.ChangeRelayIcon == nil { + resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName()) + } else if err := rl.ManagementAPI.ChangeRelayIcon(ctx, thing.IconURL); err != nil { + resp.Error = err.Error() + } else { + resp.Result = true + } + case nip86.AllowKind: + if rl.ManagementAPI.AllowKind == nil { + resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName()) + } else if err := rl.ManagementAPI.AllowKind(ctx, thing.Kind); err != nil { + resp.Error = err.Error() + } else { + resp.Result = true + } + case nip86.DisallowKind: + if rl.ManagementAPI.DisallowKind == nil { + resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName()) + } else if err := rl.ManagementAPI.DisallowKind(ctx, thing.Kind); err != nil { + resp.Error = err.Error() + } else { + resp.Result = true + } + case nip86.ListAllowedKinds: + if rl.ManagementAPI.ListAllowedKinds == nil { + resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName()) + } else if result, err := rl.ManagementAPI.ListAllowedKinds(ctx); err != nil { + resp.Error = err.Error() + } else { + resp.Result = result + } + case nip86.BlockIP: + if rl.ManagementAPI.BlockIP == nil { + resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName()) + } else if err := rl.ManagementAPI.BlockIP(ctx, thing.IP, thing.Reason); err != nil { + resp.Error = err.Error() + } else { + resp.Result = true + } + case nip86.UnblockIP: + if rl.ManagementAPI.UnblockIP == nil { + resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName()) + } else if err := rl.ManagementAPI.UnblockIP(ctx, thing.IP, thing.Reason); err != nil { + resp.Error = err.Error() + } else { + resp.Result = true + } + case nip86.ListBlockedIPs: + if rl.ManagementAPI.ListBlockedIPs == nil { + resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName()) + } else if result, err := rl.ManagementAPI.ListBlockedIPs(ctx); err != nil { + resp.Error = err.Error() + } else { + resp.Result = result + } + case nip86.Stats: + if rl.ManagementAPI.Stats == nil { + resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName()) + } else if result, err := rl.ManagementAPI.Stats(ctx); err != nil { + resp.Error = err.Error() + } else { + resp.Result = result + } + case nip86.GrantAdmin: + if rl.ManagementAPI.GrantAdmin == nil { + resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName()) + } else if err := rl.ManagementAPI.GrantAdmin(ctx, thing.Pubkey, thing.AllowMethods); err != nil { + resp.Error = err.Error() + } else { + resp.Result = true + } + case nip86.RevokeAdmin: + if rl.ManagementAPI.RevokeAdmin == nil { + resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName()) + } else if err := rl.ManagementAPI.RevokeAdmin(ctx, thing.Pubkey, thing.DisallowMethods); err != nil { + resp.Error = err.Error() + } else { + resp.Result = true + } + case nip86.ListDisallowedKinds: + if rl.ManagementAPI.ListDisAllowedKinds == nil { + resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName()) + } else if result, err := rl.ManagementAPI.ListDisAllowedKinds(ctx); err != nil { + resp.Error = err.Error() + } else { + resp.Result = result + } + case nip86.ListAllowedEvents: + if rl.ManagementAPI.ListAllowedEvents == nil { + resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName()) + } else if result, err := rl.ManagementAPI.ListAllowedEvents(ctx); err != nil { + resp.Error = err.Error() + } else { + resp.Result = result + } + default: + if rl.ManagementAPI.Generic == nil { + resp.Error = fmt.Sprintf("method '%s' not known", mp.MethodName()) + } else if result, err := rl.ManagementAPI.Generic(ctx, req); err != nil { + resp.Error = err.Error() + } else { + resp.Result = result + } + } + } + +respond: + json.NewEncoder(w).Encode(resp) +} diff --git a/khatru/policies/events.go b/khatru/policies/events.go new file mode 100644 index 0000000..5ca069c --- /dev/null +++ b/khatru/policies/events.go @@ -0,0 +1,117 @@ +package policies + +import ( + "context" + "fmt" + "slices" + "strings" + "time" + + "github.com/nbd-wtf/go-nostr" + "github.com/nbd-wtf/go-nostr/nip70" +) + +// PreventTooManyIndexableTags returns a function that can be used as a RejectFilter that will reject +// events with more indexable (single-character) tags than the specified number. +// +// If ignoreKinds is given this restriction will not apply to these kinds (useful for allowing a bigger). +// If onlyKinds is given then all other kinds will be ignored. +func PreventTooManyIndexableTags(max int, ignoreKinds []int, onlyKinds []int) func(context.Context, *nostr.Event) (bool, string) { + slices.Sort(ignoreKinds) + slices.Sort(onlyKinds) + + ignore := func(kind int) bool { return false } + if len(ignoreKinds) > 0 { + ignore = func(kind int) bool { + _, isIgnored := slices.BinarySearch(ignoreKinds, kind) + return isIgnored + } + } + if len(onlyKinds) > 0 { + ignore = func(kind int) bool { + _, isApplicable := slices.BinarySearch(onlyKinds, kind) + return !isApplicable + } + } + + return func(ctx context.Context, event *nostr.Event) (reject bool, msg string) { + if ignore(event.Kind) { + return false, "" + } + + ntags := 0 + for _, tag := range event.Tags { + if len(tag) > 0 && len(tag[0]) == 1 { + ntags++ + } + } + if ntags > max { + return true, "too many indexable tags" + } + return false, "" + } +} + +// PreventLargeTags rejects events that have indexable tag values greater than maxTagValueLen. +func PreventLargeTags(maxTagValueLen int) func(context.Context, *nostr.Event) (bool, string) { + return func(ctx context.Context, event *nostr.Event) (reject bool, msg string) { + for _, tag := range event.Tags { + if len(tag) > 1 && len(tag[0]) == 1 { + if len(tag[1]) > maxTagValueLen { + return true, "event contains too large tags" + } + } + } + return false, "" + } +} + +// RestrictToSpecifiedKinds returns a function that can be used as a RejectFilter that will reject +// any events with kinds different than the specified ones. +func RestrictToSpecifiedKinds(allowEphemeral bool, kinds ...uint16) func(context.Context, *nostr.Event) (bool, string) { + // sort the kinds in increasing order + slices.Sort(kinds) + + return func(ctx context.Context, event *nostr.Event) (reject bool, msg string) { + if allowEphemeral && nostr.IsEphemeralKind(event.Kind) { + return false, "" + } + + if _, allowed := slices.BinarySearch(kinds, uint16(event.Kind)); allowed { + return false, "" + } + + return true, fmt.Sprintf("received event kind %d not allowed", event.Kind) + } +} + +func PreventTimestampsInThePast(threshold time.Duration) func(context.Context, *nostr.Event) (bool, string) { + thresholdSeconds := nostr.Timestamp(threshold.Seconds()) + return func(ctx context.Context, event *nostr.Event) (reject bool, msg string) { + if nostr.Now()-event.CreatedAt > thresholdSeconds { + return true, "event too old" + } + return false, "" + } +} + +func PreventTimestampsInTheFuture(threshold time.Duration) func(context.Context, *nostr.Event) (bool, string) { + thresholdSeconds := nostr.Timestamp(threshold.Seconds()) + return func(ctx context.Context, event *nostr.Event) (reject bool, msg string) { + if event.CreatedAt-nostr.Now() > thresholdSeconds { + return true, "event too much in the future" + } + return false, "" + } +} + +func RejectEventsWithBase64Media(ctx context.Context, evt *nostr.Event) (bool, string) { + return strings.Contains(evt.Content, "data:image/") || strings.Contains(evt.Content, "data:video/"), "event with base64 media" +} + +func OnlyAllowNIP70ProtectedEvents(ctx context.Context, event *nostr.Event) (reject bool, msg string) { + if nip70.IsProtected(*event) { + return false, "" + } + return true, "blocked: we only accept events protected with the nip70 \"-\" tag" +} diff --git a/khatru/policies/filters.go b/khatru/policies/filters.go new file mode 100644 index 0000000..a3bc213 --- /dev/null +++ b/khatru/policies/filters.go @@ -0,0 +1,93 @@ +package policies + +import ( + "context" + "slices" + + "github.com/fiatjaf/khatru" + "github.com/nbd-wtf/go-nostr" +) + +// NoComplexFilters disallows filters with more than 2 tags. +func NoComplexFilters(ctx context.Context, filter nostr.Filter) (reject bool, msg string) { + items := len(filter.Tags) + len(filter.Kinds) + + if items > 4 && len(filter.Tags) > 2 { + return true, "too many things to filter for" + } + + return false, "" +} + +// MustAuth requires all subscribers to be authenticated +func MustAuth(ctx context.Context, filter nostr.Filter) (reject bool, msg string) { + if khatru.GetAuthed(ctx) == "" { + return true, "auth-required: all requests must be authenticated" + } + return false, "" +} + +// NoEmptyFilters disallows filters that don't have at least a tag, a kind, an author or an id. +func NoEmptyFilters(ctx context.Context, filter nostr.Filter) (reject bool, msg string) { + c := len(filter.Kinds) + len(filter.IDs) + len(filter.Authors) + for _, tagItems := range filter.Tags { + c += len(tagItems) + } + if c == 0 { + return true, "can't handle empty filters" + } + return false, "" +} + +// AntiSyncBots tries to prevent people from syncing kind:1s from this relay to else by always +// requiring an author parameter at least. +func AntiSyncBots(ctx context.Context, filter nostr.Filter) (reject bool, msg string) { + return (len(filter.Kinds) == 0 || slices.Contains(filter.Kinds, 1)) && + len(filter.Authors) == 0, "an author must be specified to get their kind:1 notes" +} + +func NoSearchQueries(ctx context.Context, filter nostr.Filter) (reject bool, msg string) { + if filter.Search != "" { + return true, "search is not supported" + } + return false, "" +} + +func RemoveSearchQueries(ctx context.Context, filter *nostr.Filter) { + if filter.Search != "" { + filter.Search = "" + filter.LimitZero = true // signals that this query should be just skipped + } +} + +func RemoveAllButKinds(kinds ...uint16) func(context.Context, *nostr.Filter) { + return func(ctx context.Context, filter *nostr.Filter) { + if n := len(filter.Kinds); n > 0 { + newKinds := make([]int, 0, n) + for i := 0; i < n; i++ { + if k := filter.Kinds[i]; slices.Contains(kinds, uint16(k)) { + newKinds = append(newKinds, k) + } + } + filter.Kinds = newKinds + if len(filter.Kinds) == 0 { + filter.LimitZero = true // signals that this query should be just skipped + } + } + } +} + +func RemoveAllButTags(tagNames ...string) func(context.Context, *nostr.Filter) { + return func(ctx context.Context, filter *nostr.Filter) { + if n := len(filter.Tags); n > 0 { + for tagName := range filter.Tags { + if !slices.Contains(tagNames, tagName) { + delete(filter.Tags, tagName) + } + } + if len(filter.Tags) == 0 { + filter.LimitZero = true // signals that this query should be just skipped + } + } + } +} diff --git a/khatru/policies/helpers.go b/khatru/policies/helpers.go new file mode 100644 index 0000000..bae06d3 --- /dev/null +++ b/khatru/policies/helpers.go @@ -0,0 +1,42 @@ +package policies + +import ( + "sync/atomic" + "time" + + "github.com/puzpuzpuz/xsync/v3" +) + +func startRateLimitSystem[K comparable]( + tokensPerInterval int, + interval time.Duration, + maxTokens int, +) func(key K) (ratelimited bool) { + negativeBuckets := xsync.NewMapOf[K, *atomic.Int32]() + maxTokensInt32 := int32(maxTokens) + + go func() { + for { + time.Sleep(interval) + for key, bucket := range negativeBuckets.Range { + newv := bucket.Add(int32(-tokensPerInterval)) + if newv <= 0 { + negativeBuckets.Delete(key) + } + } + } + }() + + return func(key K) bool { + nb, _ := negativeBuckets.LoadOrStore(key, &atomic.Int32{}) + + if nb.Load() < maxTokensInt32 { + nb.Add(1) + // rate limit not reached yet + return false + } + + // rate limit reached + return true + } +} diff --git a/khatru/policies/kind_validation.go b/khatru/policies/kind_validation.go new file mode 100644 index 0000000..e3506ce --- /dev/null +++ b/khatru/policies/kind_validation.go @@ -0,0 +1,29 @@ +package policies + +import ( + "context" + "encoding/json" + + "github.com/nbd-wtf/go-nostr" +) + +func ValidateKind(ctx context.Context, evt *nostr.Event) (bool, string) { + switch evt.Kind { + case 0: + var m struct { + Name string `json:"name"` + } + json.Unmarshal([]byte(evt.Content), &m) + if m.Name == "" { + return true, "missing json name in kind 0" + } + case 1: + return false, "" + case 2: + return true, "this kind has been deprecated" + } + + // TODO: all other kinds + + return false, "" +} diff --git a/khatru/policies/nip04.go b/khatru/policies/nip04.go new file mode 100644 index 0000000..d97f767 --- /dev/null +++ b/khatru/policies/nip04.go @@ -0,0 +1,38 @@ +package policies + +import ( + "context" + "slices" + + "github.com/fiatjaf/khatru" + "github.com/nbd-wtf/go-nostr" +) + +// RejectKind04Snoopers prevents reading NIP-04 messages from people not involved in the conversation. +func RejectKind04Snoopers(ctx context.Context, filter nostr.Filter) (bool, string) { + // prevent kind-4 events from being returned to unauthed users, + // only when authentication is a thing + if !slices.Contains(filter.Kinds, 4) { + return false, "" + } + + ws := khatru.GetConnection(ctx) + senders := filter.Authors + receivers, _ := filter.Tags["p"] + switch { + case ws.AuthedPublicKey == "": + // not authenticated + return true, "restricted: this relay does not serve kind-4 to unauthenticated users, does your client implement NIP-42?" + case len(senders) == 1 && len(receivers) < 2 && (senders[0] == ws.AuthedPublicKey): + // allowed filter: ws.authed is sole sender (filter specifies one or all receivers) + return false, "" + case len(receivers) == 1 && len(senders) < 2 && (receivers[0] == ws.AuthedPublicKey): + // allowed filter: ws.authed is sole receiver (filter specifies one or all senders) + return false, "" + default: + // restricted filter: do not return any events, + // even if other elements in filters array were not restricted). + // client should know better. + return true, "restricted: authenticated user does not have authorization for requested filters." + } +} diff --git a/khatru/policies/ratelimits.go b/khatru/policies/ratelimits.go new file mode 100644 index 0000000..9f9c497 --- /dev/null +++ b/khatru/policies/ratelimits.go @@ -0,0 +1,46 @@ +package policies + +import ( + "context" + "net/http" + "time" + + "github.com/fiatjaf/khatru" + "github.com/nbd-wtf/go-nostr" +) + +func EventIPRateLimiter(tokensPerInterval int, interval time.Duration, maxTokens int) func(ctx context.Context, _ *nostr.Event) (reject bool, msg string) { + rl := startRateLimitSystem[string](tokensPerInterval, interval, maxTokens) + + return func(ctx context.Context, _ *nostr.Event) (reject bool, msg string) { + ip := khatru.GetIP(ctx) + if ip == "" { + return false, "" + } + return rl(ip), "rate-limited: slow down, please" + } +} + +func EventPubKeyRateLimiter(tokensPerInterval int, interval time.Duration, maxTokens int) func(ctx context.Context, _ *nostr.Event) (reject bool, msg string) { + rl := startRateLimitSystem[string](tokensPerInterval, interval, maxTokens) + + return func(ctx context.Context, evt *nostr.Event) (reject bool, msg string) { + return rl(evt.PubKey), "rate-limited: slow down, please" + } +} + +func ConnectionRateLimiter(tokensPerInterval int, interval time.Duration, maxTokens int) func(r *http.Request) bool { + rl := startRateLimitSystem[string](tokensPerInterval, interval, maxTokens) + + return func(r *http.Request) bool { + return rl(khatru.GetIPFromRequest(r)) + } +} + +func FilterIPRateLimiter(tokensPerInterval int, interval time.Duration, maxTokens int) func(ctx context.Context, _ nostr.Filter) (reject bool, msg string) { + rl := startRateLimitSystem[string](tokensPerInterval, interval, maxTokens) + + return func(ctx context.Context, _ nostr.Filter) (reject bool, msg string) { + return rl(khatru.GetIP(ctx)), "rate-limited: there is a bug in the client, no one should be making so many requests" + } +} diff --git a/khatru/policies/sane_defaults.go b/khatru/policies/sane_defaults.go new file mode 100644 index 0000000..d76e9d9 --- /dev/null +++ b/khatru/policies/sane_defaults.go @@ -0,0 +1,23 @@ +package policies + +import ( + "time" + + "github.com/fiatjaf/khatru" +) + +func ApplySaneDefaults(relay *khatru.Relay) { + relay.RejectEvent = append(relay.RejectEvent, + RejectEventsWithBase64Media, + EventIPRateLimiter(2, time.Minute*3, 10), + ) + + relay.RejectFilter = append(relay.RejectFilter, + NoComplexFilters, + FilterIPRateLimiter(20, time.Minute, 100), + ) + + relay.RejectConnection = append(relay.RejectConnection, + ConnectionRateLimiter(1, time.Minute*5, 100), + ) +} diff --git a/khatru/relay.go b/khatru/relay.go new file mode 100644 index 0000000..a94fcdc --- /dev/null +++ b/khatru/relay.go @@ -0,0 +1,145 @@ +package khatru + +import ( + "context" + "log" + "net/http" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/fasthttp/websocket" + "github.com/nbd-wtf/go-nostr" + "github.com/nbd-wtf/go-nostr/nip11" + "github.com/nbd-wtf/go-nostr/nip45/hyperloglog" +) + +func NewRelay() *Relay { + ctx := context.Background() + + rl := &Relay{ + Log: log.New(os.Stderr, "[khatru-relay] ", log.LstdFlags), + + Info: &nip11.RelayInformationDocument{ + Software: "https://github.com/fiatjaf/khatru", + Version: "n/a", + SupportedNIPs: []any{1, 11, 40, 42, 70, 86}, + }, + + upgrader: websocket.Upgrader{ + ReadBufferSize: 1024, + WriteBufferSize: 1024, + CheckOrigin: func(r *http.Request) bool { return true }, + }, + + clients: make(map[*WebSocket][]listenerSpec, 100), + listeners: make([]listener, 0, 100), + + serveMux: &http.ServeMux{}, + + WriteWait: 10 * time.Second, + PongWait: 60 * time.Second, + PingPeriod: 30 * time.Second, + MaxMessageSize: 512000, + } + + rl.expirationManager = newExpirationManager(rl) + go rl.expirationManager.start(ctx) + + return rl +} + +type Relay struct { + // setting this variable overwrites the hackish workaround we do to try to figure out our own base URL + ServiceURL string + + // hooks that will be called at various times + RejectEvent []func(ctx context.Context, event *nostr.Event) (reject bool, msg string) + OverwriteDeletionOutcome []func(ctx context.Context, target *nostr.Event, deletion *nostr.Event) (acceptDeletion bool, msg string) + StoreEvent []func(ctx context.Context, event *nostr.Event) error + ReplaceEvent []func(ctx context.Context, event *nostr.Event) error + DeleteEvent []func(ctx context.Context, event *nostr.Event) error + OnEventSaved []func(ctx context.Context, event *nostr.Event) + OnEphemeralEvent []func(ctx context.Context, event *nostr.Event) + RejectFilter []func(ctx context.Context, filter nostr.Filter) (reject bool, msg string) + RejectCountFilter []func(ctx context.Context, filter nostr.Filter) (reject bool, msg string) + OverwriteFilter []func(ctx context.Context, filter *nostr.Filter) + QueryEvents []func(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) + CountEvents []func(ctx context.Context, filter nostr.Filter) (int64, error) + CountEventsHLL []func(ctx context.Context, filter nostr.Filter, offset int) (int64, *hyperloglog.HyperLogLog, error) + RejectConnection []func(r *http.Request) bool + OnConnect []func(ctx context.Context) + OnDisconnect []func(ctx context.Context) + OverwriteRelayInformation []func(ctx context.Context, r *http.Request, info nip11.RelayInformationDocument) nip11.RelayInformationDocument + OverwriteResponseEvent []func(ctx context.Context, event *nostr.Event) + PreventBroadcast []func(ws *WebSocket, event *nostr.Event) bool + + // these are used when this relays acts as a router + routes []Route + getSubRelayFromEvent func(*nostr.Event) *Relay // used for handling EVENTs + getSubRelayFromFilter func(nostr.Filter) *Relay // used for handling REQs + + // setting up handlers here will enable these methods + ManagementAPI RelayManagementAPI + + // editing info will affect the NIP-11 responses + Info *nip11.RelayInformationDocument + + // Default logger, as set by NewServer, is a stdlib logger prefixed with "[khatru-relay] ", + // outputting to stderr. + Log *log.Logger + + // for establishing websockets + upgrader websocket.Upgrader + + // keep a connection reference to all connected clients for Server.Shutdown + // also used for keeping track of who is listening to what + clients map[*WebSocket][]listenerSpec + listeners []listener + clientsMutex sync.Mutex + + // set this to true to support negentropy + Negentropy bool + + // in case you call Server.Start + Addr string + serveMux *http.ServeMux + httpServer *http.Server + + // websocket options + WriteWait time.Duration // Time allowed to write a message to the peer. + PongWait time.Duration // Time allowed to read the next pong message from the peer. + PingPeriod time.Duration // Send pings to peer with this period. Must be less than pongWait. + MaxMessageSize int64 // Maximum message size allowed from peer. + + // NIP-40 expiration manager + expirationManager *expirationManager +} + +func (rl *Relay) getBaseURL(r *http.Request) string { + if rl.ServiceURL != "" { + return rl.ServiceURL + } + + host := r.Header.Get("X-Forwarded-Host") + if host == "" { + host = r.Host + } + proto := r.Header.Get("X-Forwarded-Proto") + if proto == "" { + if host == "localhost" { + proto = "http" + } else if strings.Contains(host, ":") { + // has a port number + proto = "http" + } else if _, err := strconv.Atoi(strings.ReplaceAll(host, ".", "")); err == nil { + // it's a naked IP + proto = "http" + } else { + proto = "https" + } + } + return proto + "://" + host +} diff --git a/khatru/relay_test.go b/khatru/relay_test.go new file mode 100644 index 0000000..09e7cbb --- /dev/null +++ b/khatru/relay_test.go @@ -0,0 +1,361 @@ +package khatru + +import ( + "context" + "net/http/httptest" + "strconv" + "testing" + "time" + + "github.com/fiatjaf/eventstore/slicestore" + "github.com/nbd-wtf/go-nostr" +) + +func TestBasicRelayFunctionality(t *testing.T) { + // setup relay with in-memory store + relay := NewRelay() + store := slicestore.SliceStore{} + store.Init() + relay.StoreEvent = append(relay.StoreEvent, store.SaveEvent) + relay.QueryEvents = append(relay.QueryEvents, store.QueryEvents) + relay.DeleteEvent = append(relay.DeleteEvent, store.DeleteEvent) + + // start test server + server := httptest.NewServer(relay) + defer server.Close() + + // create test keys + sk1 := nostr.GeneratePrivateKey() + pk1, err := nostr.GetPublicKey(sk1) + if err != nil { + t.Fatalf("Failed to get public key 1: %v", err) + } + sk2 := nostr.GeneratePrivateKey() + pk2, err := nostr.GetPublicKey(sk2) + if err != nil { + t.Fatalf("Failed to get public key 2: %v", err) + } + + // helper to create signed events + createEvent := func(sk string, kind int, content string, tags nostr.Tags) nostr.Event { + pk, err := nostr.GetPublicKey(sk) + if err != nil { + t.Fatalf("Failed to get public key: %v", err) + } + evt := nostr.Event{ + PubKey: pk, + CreatedAt: nostr.Now(), + Kind: kind, + Tags: tags, + Content: content, + } + evt.Sign(sk) + return evt + } + + // connect two test clients + url := "ws" + server.URL[4:] + client1, err := nostr.RelayConnect(context.Background(), url) + if err != nil { + t.Fatalf("failed to connect client1: %v", err) + } + defer client1.Close() + + client2, err := nostr.RelayConnect(context.Background(), url) + if err != nil { + t.Fatalf("failed to connect client2: %v", err) + } + defer client2.Close() + + // test 1: store and query events + t.Run("store and query events", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + evt1 := createEvent(sk1, 1, "hello world", nil) + err := client1.Publish(ctx, evt1) + if err != nil { + t.Fatalf("failed to publish event: %v", err) + } + + // Query the event back + sub, err := client2.Subscribe(ctx, []nostr.Filter{{ + Authors: []string{pk1}, + Kinds: []int{1}, + }}) + if err != nil { + t.Fatalf("failed to subscribe: %v", err) + } + defer sub.Unsub() + + // Wait for event + select { + case env := <-sub.Events: + if env.ID != evt1.ID { + t.Errorf("got wrong event: %v", env.ID) + } + case <-ctx.Done(): + t.Fatal("timeout waiting for event") + } + }) + + // test 2: live event subscription + t.Run("live event subscription", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Setup subscription first + sub, err := client1.Subscribe(ctx, []nostr.Filter{{ + Authors: []string{pk2}, + Kinds: []int{1}, + }}) + if err != nil { + t.Fatalf("failed to subscribe: %v", err) + } + defer sub.Unsub() + + // Publish event from client2 + evt2 := createEvent(sk2, 1, "testing live events", nil) + err = client2.Publish(ctx, evt2) + if err != nil { + t.Fatalf("failed to publish event: %v", err) + } + + // Wait for event on subscription + select { + case env := <-sub.Events: + if env.ID != evt2.ID { + t.Errorf("got wrong event: %v", env.ID) + } + case <-ctx.Done(): + t.Fatal("timeout waiting for live event") + } + }) + + // test 3: event deletion + t.Run("event deletion", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Create an event to be deleted + evt3 := createEvent(sk1, 1, "delete me", nil) + err = client1.Publish(ctx, evt3) + if err != nil { + t.Fatalf("failed to publish event: %v", err) + } + + // Create deletion event + delEvent := createEvent(sk1, 5, "deleting", nostr.Tags{{"e", evt3.ID}}) + err = client1.Publish(ctx, delEvent) + if err != nil { + t.Fatalf("failed to publish deletion event: %v", err) + } + + // Try to query the deleted event + sub, err := client2.Subscribe(ctx, []nostr.Filter{{ + IDs: []string{evt3.ID}, + }}) + if err != nil { + t.Fatalf("failed to subscribe: %v", err) + } + defer sub.Unsub() + + // Should get EOSE without receiving the deleted event + gotEvent := false + for { + select { + case <-sub.Events: + gotEvent = true + case <-sub.EndOfStoredEvents: + if gotEvent { + t.Error("should not have received deleted event") + } + return + case <-ctx.Done(): + t.Fatal("timeout waiting for EOSE") + } + } + }) + + // test 4: teplaceable events + t.Run("replaceable events", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // create initial kind:0 event + evt1 := createEvent(sk1, 0, `{"name":"initial"}`, nil) + evt1.CreatedAt = 1000 // Set specific timestamp for testing + evt1.Sign(sk1) + err = client1.Publish(ctx, evt1) + if err != nil { + t.Fatalf("failed to publish initial event: %v", err) + } + + // create newer event that should replace the first + evt2 := createEvent(sk1, 0, `{"name":"newer"}`, nil) + evt2.CreatedAt = 2000 // Newer timestamp + evt2.Sign(sk1) + err = client1.Publish(ctx, evt2) + if err != nil { + t.Fatalf("failed to publish newer event: %v", err) + } + + // create older event that should not replace the current one + evt3 := createEvent(sk1, 0, `{"name":"older"}`, nil) + evt3.CreatedAt = 1500 // Older than evt2 + evt3.Sign(sk1) + err = client1.Publish(ctx, evt3) + if err != nil { + t.Fatalf("failed to publish older event: %v", err) + } + + // query to verify only the newest event exists + sub, err := client2.Subscribe(ctx, []nostr.Filter{{ + Authors: []string{pk1}, + Kinds: []int{0}, + }}) + if err != nil { + t.Fatalf("failed to subscribe: %v", err) + } + defer sub.Unsub() + + // should only get one event back (the newest one) + var receivedEvents []*nostr.Event + for { + select { + case env := <-sub.Events: + receivedEvents = append(receivedEvents, env) + case <-sub.EndOfStoredEvents: + if len(receivedEvents) != 1 { + t.Errorf("expected exactly 1 event, got %d", len(receivedEvents)) + } + if len(receivedEvents) > 0 && receivedEvents[0].Content != `{"name":"newer"}` { + t.Errorf("expected newest event content, got %s", receivedEvents[0].Content) + } + return + case <-ctx.Done(): + t.Fatal("timeout waiting for events") + } + } + }) + + // test 5: event expiration + t.Run("event expiration", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // create a new relay with shorter expiration check interval + relay := NewRelay() + relay.expirationManager.interval = 3 * time.Second // check every 3 seconds + store := slicestore.SliceStore{} + store.Init() + relay.StoreEvent = append(relay.StoreEvent, store.SaveEvent) + relay.QueryEvents = append(relay.QueryEvents, store.QueryEvents) + relay.DeleteEvent = append(relay.DeleteEvent, store.DeleteEvent) + + // start test server + server := httptest.NewServer(relay) + defer server.Close() + + // connect test client + url := "ws" + server.URL[4:] + client, err := nostr.RelayConnect(context.Background(), url) + if err != nil { + t.Fatalf("failed to connect client: %v", err) + } + defer client.Close() + + // create event that expires in 2 seconds + expiration := strconv.FormatInt(int64(nostr.Now()+2), 10) + evt := createEvent(sk1, 1, "i will expire soon", nostr.Tags{{"expiration", expiration}}) + err = client.Publish(ctx, evt) + if err != nil { + t.Fatalf("failed to publish event: %v", err) + } + + // verify event exists initially + sub, err := client.Subscribe(ctx, []nostr.Filter{{ + IDs: []string{evt.ID}, + }}) + if err != nil { + t.Fatalf("failed to subscribe: %v", err) + } + + // should get the event + select { + case env := <-sub.Events: + if env.ID != evt.ID { + t.Error("got wrong event") + } + case <-ctx.Done(): + t.Fatal("timeout waiting for event") + } + sub.Unsub() + + // wait for expiration check (>3 seconds) + time.Sleep(4 * time.Second) + + // verify event no longer exists + sub, err = client.Subscribe(ctx, []nostr.Filter{{ + IDs: []string{evt.ID}, + }}) + if err != nil { + t.Fatalf("failed to subscribe: %v", err) + } + defer sub.Unsub() + + // should get EOSE without receiving the expired event + gotEvent := false + for { + select { + case <-sub.Events: + gotEvent = true + case <-sub.EndOfStoredEvents: + if gotEvent { + t.Error("should not have received expired event") + } + return + case <-ctx.Done(): + t.Fatal("timeout waiting for EOSE") + } + } + }) + + // test 6: unauthorized deletion + t.Run("unauthorized deletion", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // create an event from client1 + evt4 := createEvent(sk1, 1, "try to delete me", nil) + err = client1.Publish(ctx, evt4) + if err != nil { + t.Fatalf("failed to publish event: %v", err) + } + + // Try to delete it with client2 + delEvent := createEvent(sk2, 5, "trying to delete", nostr.Tags{{"e", evt4.ID}}) + err = client2.Publish(ctx, delEvent) + if err == nil { + t.Fatalf("should have failed to publish deletion event: %v", err) + } + + // Verify event still exists + sub, err := client1.Subscribe(ctx, []nostr.Filter{{ + IDs: []string{evt4.ID}, + }}) + if err != nil { + t.Fatalf("failed to subscribe: %v", err) + } + defer sub.Unsub() + + select { + case env := <-sub.Events: + if env.ID != evt4.ID { + t.Error("got wrong event") + } + case <-ctx.Done(): + t.Fatal("event should still exist") + } + }) +} diff --git a/khatru/responding.go b/khatru/responding.go new file mode 100644 index 0000000..0cae183 --- /dev/null +++ b/khatru/responding.go @@ -0,0 +1,119 @@ +package khatru + +import ( + "context" + "errors" + "sync" + + "github.com/nbd-wtf/go-nostr" + "github.com/nbd-wtf/go-nostr/nip45/hyperloglog" +) + +func (rl *Relay) handleRequest(ctx context.Context, id string, eose *sync.WaitGroup, ws *WebSocket, filter nostr.Filter) error { + defer eose.Done() + + // overwrite the filter (for example, to eliminate some kinds or + // that we know we don't support) + for _, ovw := range rl.OverwriteFilter { + ovw(ctx, &filter) + } + + if filter.LimitZero { + // don't do any queries, just subscribe to future events + return nil + } + + // then check if we'll reject this filter (we apply this after overwriting + // because we may, for example, remove some things from the incoming filters + // that we know we don't support, and then if the end result is an empty + // filter we can just reject it) + for _, reject := range rl.RejectFilter { + if reject, msg := reject(ctx, filter); reject { + return errors.New(nostr.NormalizeOKMessage(msg, "blocked")) + } + } + + // run the functions to query events (generally just one, + // but we might be fetching stuff from multiple places) + eose.Add(len(rl.QueryEvents)) + for _, query := range rl.QueryEvents { + ch, err := query(ctx, filter) + if err != nil { + ws.WriteJSON(nostr.NoticeEnvelope(err.Error())) + eose.Done() + continue + } else if ch == nil { + eose.Done() + continue + } + + go func(ch chan *nostr.Event) { + for event := range ch { + for _, ovw := range rl.OverwriteResponseEvent { + ovw(ctx, event) + } + ws.WriteJSON(nostr.EventEnvelope{SubscriptionID: &id, Event: *event}) + } + eose.Done() + }(ch) + } + + return nil +} + +func (rl *Relay) handleCountRequest(ctx context.Context, ws *WebSocket, filter nostr.Filter) int64 { + // check if we'll reject this filter + for _, reject := range rl.RejectCountFilter { + if rejecting, msg := reject(ctx, filter); rejecting { + ws.WriteJSON(nostr.NoticeEnvelope(msg)) + return 0 + } + } + + // run the functions to count (generally it will be just one) + var subtotal int64 = 0 + for _, count := range rl.CountEvents { + res, err := count(ctx, filter) + if err != nil { + ws.WriteJSON(nostr.NoticeEnvelope(err.Error())) + } + subtotal += res + } + + return subtotal +} + +func (rl *Relay) handleCountRequestWithHLL( + ctx context.Context, + ws *WebSocket, + filter nostr.Filter, + offset int, +) (int64, *hyperloglog.HyperLogLog) { + // check if we'll reject this filter + for _, reject := range rl.RejectCountFilter { + if rejecting, msg := reject(ctx, filter); rejecting { + ws.WriteJSON(nostr.NoticeEnvelope(msg)) + return 0, nil + } + } + + // run the functions to count (generally it will be just one) + var subtotal int64 = 0 + var hll *hyperloglog.HyperLogLog + for _, countHLL := range rl.CountEventsHLL { + res, fhll, err := countHLL(ctx, filter, offset) + if err != nil { + ws.WriteJSON(nostr.NoticeEnvelope(err.Error())) + } + subtotal += res + if fhll != nil { + if hll == nil { + hll = fhll + } else { + hll.Merge(fhll) + } + } + } + + return subtotal, hll +} diff --git a/khatru/router.go b/khatru/router.go new file mode 100644 index 0000000..bab833b --- /dev/null +++ b/khatru/router.go @@ -0,0 +1,67 @@ +package khatru + +import ( + "github.com/nbd-wtf/go-nostr" +) + +type Router struct{ *Relay } + +type Route struct { + eventMatcher func(*nostr.Event) bool + filterMatcher func(nostr.Filter) bool + relay *Relay +} + +type routeBuilder struct { + router *Router + eventMatcher func(*nostr.Event) bool + filterMatcher func(nostr.Filter) bool +} + +func NewRouter() *Router { + rr := &Router{Relay: NewRelay()} + rr.routes = make([]Route, 0, 3) + rr.getSubRelayFromFilter = func(f nostr.Filter) *Relay { + for _, route := range rr.routes { + if route.filterMatcher(f) { + return route.relay + } + } + return rr.Relay + } + rr.getSubRelayFromEvent = func(e *nostr.Event) *Relay { + for _, route := range rr.routes { + if route.eventMatcher(e) { + return route.relay + } + } + return rr.Relay + } + return rr +} + +func (rr *Router) Route() routeBuilder { + return routeBuilder{ + router: rr, + filterMatcher: func(f nostr.Filter) bool { return false }, + eventMatcher: func(e *nostr.Event) bool { return false }, + } +} + +func (rb routeBuilder) Req(fn func(nostr.Filter) bool) routeBuilder { + rb.filterMatcher = fn + return rb +} + +func (rb routeBuilder) Event(fn func(*nostr.Event) bool) routeBuilder { + rb.eventMatcher = fn + return rb +} + +func (rb routeBuilder) Relay(relay *Relay) { + rb.router.routes = append(rb.router.routes, Route{ + filterMatcher: rb.filterMatcher, + eventMatcher: rb.eventMatcher, + relay: relay, + }) +} diff --git a/khatru/utils.go b/khatru/utils.go new file mode 100644 index 0000000..131f360 --- /dev/null +++ b/khatru/utils.go @@ -0,0 +1,61 @@ +package khatru + +import ( + "context" + + "github.com/nbd-wtf/go-nostr" +) + +const ( + wsKey = iota + subscriptionIdKey + nip86HeaderAuthKey + internalCallKey +) + +func RequestAuth(ctx context.Context) { + ws := GetConnection(ctx) + ws.authLock.Lock() + if ws.Authed == nil { + ws.Authed = make(chan struct{}) + } + ws.authLock.Unlock() + ws.WriteJSON(nostr.AuthEnvelope{Challenge: &ws.Challenge}) +} + +func GetConnection(ctx context.Context) *WebSocket { + wsi := ctx.Value(wsKey) + if wsi != nil { + return wsi.(*WebSocket) + } + return nil +} + +func GetAuthed(ctx context.Context) string { + if conn := GetConnection(ctx); conn != nil { + return conn.AuthedPublicKey + } + if nip86Auth := ctx.Value(nip86HeaderAuthKey); nip86Auth != nil { + return nip86Auth.(string) + } + return "" +} + +// IsInternalCall returns true when a call to QueryEvents, for example, is being made because of a deletion +// or expiration request. +func IsInternalCall(ctx context.Context) bool { + return ctx.Value(internalCallKey) != nil +} + +func GetIP(ctx context.Context) string { + conn := GetConnection(ctx) + if conn == nil { + return "" + } + + return GetIPFromRequest(conn.Request) +} + +func GetSubscriptionID(ctx context.Context) string { + return ctx.Value(subscriptionIdKey).(string) +} diff --git a/khatru/websocket.go b/khatru/websocket.go new file mode 100644 index 0000000..8888f6a --- /dev/null +++ b/khatru/websocket.go @@ -0,0 +1,46 @@ +package khatru + +import ( + "context" + "net/http" + "sync" + + "github.com/fasthttp/websocket" + "github.com/puzpuzpuz/xsync/v3" +) + +type WebSocket struct { + conn *websocket.Conn + mutex sync.Mutex + + // original request + Request *http.Request + + // this Context will be canceled whenever the connection is closed from the client side or server-side. + Context context.Context + cancel context.CancelFunc + + // nip42 + Challenge string + AuthedPublicKey string + Authed chan struct{} + + // nip77 + negentropySessions *xsync.MapOf[string, *NegentropySession] + + authLock sync.Mutex +} + +func (ws *WebSocket) WriteJSON(any any) error { + ws.mutex.Lock() + err := ws.conn.WriteJSON(any) + ws.mutex.Unlock() + return err +} + +func (ws *WebSocket) WriteMessage(t int, b []byte) error { + ws.mutex.Lock() + err := ws.conn.WriteMessage(t, b) + ws.mutex.Unlock() + return err +}