diff --git a/README.md b/README.md
index df2b90d..7e342f4 100644
--- a/README.md
+++ b/README.md
@@ -1,164 +1,3 @@
-[](https://fiatjaf.com/nostrlib/actions/workflows/test.yml)
-[](https://pkg.go.dev/fiatjaf.com/nostrlib)
-[](https://goreportcard.com/report/fiatjaf.com/nostrlib)
+nostr library
-
-
-go-nostr
-========
-
-A set of useful things for [Nostr](https://github.com/nostr-protocol/nostr)-related software.
-
-```bash
-go get fiatjaf.com/nostrlib
-```
-
-### Generating a key
-
-``` go
-package main
-
-import (
- "fmt"
-
- "fiatjaf.com/nostrlib"
- "fiatjaf.com/nostrlib/nip19"
-)
-
-func main() {
- sk := nostr.GeneratePrivateKey()
- pk, _ := nostr.GetPublicKey(sk)
- nsec, _ := nip19.EncodePrivateKey(sk)
- npub, _ := nip19.EncodePublicKey(pk)
-
- fmt.Println("sk:", sk)
- fmt.Println("pk:", pk)
- fmt.Println(nsec)
- fmt.Println(npub)
-}
-```
-
-### Subscribing to a single relay
-
-``` go
-ctx := context.Background()
-relay, err := nostr.RelayConnect(ctx, "wss://relay.stoner.com")
-if err != nil {
- panic(err)
-}
-
-npub := "npub1422a7ws4yul24p0pf7cacn7cghqkutdnm35z075vy68ggqpqjcyswn8ekc"
-
-var filters nostr.Filters
-if _, v, err := nip19.Decode(npub); err == nil {
- pub := v.(string)
- filters = []nostr.Filter{{
- Kinds: []int{nostr.KindTextNote},
- Authors: []string{pub},
- Limit: 1,
- }}
-} else {
- panic(err)
-}
-
-ctx, cancel := context.WithTimeout(ctx, 3*time.Second)
-defer cancel()
-
-sub, err := relay.Subscribe(ctx, filters)
-if err != nil {
- panic(err)
-}
-
-for ev := range sub.Events {
- // handle returned event.
- // channel will stay open until the ctx is cancelled (in this case, context timeout)
- fmt.Println(ev.ID)
-}
-```
-
-### Publishing to two relays
-
-``` go
-sk := nostr.GeneratePrivateKey()
-pub, _ := nostr.GetPublicKey(sk)
-
-ev := nostr.Event{
- PubKey: pub,
- CreatedAt: nostr.Now(),
- Kind: nostr.KindTextNote,
- Tags: nil,
- Content: "Hello World!",
-}
-
-// calling Sign sets the event ID field and the event Sig field
-ev.Sign(sk)
-
-// publish the event to two relays
-ctx := context.Background()
-for _, url := range []string{"wss://relay.stoner.com", "wss://nostr-pub.wellorder.net"} {
- relay, err := nostr.RelayConnect(ctx, url)
- if err != nil {
- fmt.Println(err)
- continue
- }
- if err := relay.Publish(ctx, ev); err != nil {
- fmt.Println(err)
- continue
- }
-
- fmt.Printf("published to %s\n", url)
-}
-```
-
-### Logging
-
-To get more logs from the interaction with relays printed to STDOUT you can compile or run your program with `-tags debug`.
-
-To remove the info logs completely, replace `nostr.InfoLogger` with something that prints nothing, like
-
-``` go
-nostr.InfoLogger = log.New(io.Discard, "", 0)
-```
-
-### Example script
-
-```
-go run example/example.go
-```
-
-### Using [`libsecp256k1`](https://github.com/bitcoin-core/secp256k1)
-
-[`libsecp256k1`](https://github.com/bitcoin-core/secp256k1) is very fast:
-
-```
-goos: linux
-goarch: amd64
-cpu: Intel(R) Core(TM) i5-2400 CPU @ 3.10GHz
-BenchmarkWithoutLibsecp256k1/sign-4 2794 434114 ns/op
-BenchmarkWithoutLibsecp256k1/check-4 4352 297416 ns/op
-BenchmarkWithLibsecp256k1/sign-4 12559 94607 ns/op
-BenchmarkWithLibsecp256k1/check-4 13761 84595 ns/op
-PASS
-```
-
-But to use it you need the host to have it installed as a shared library and CGO to be supported, so we don't compile against it by default.
-
-To use it, use `-tags=libsecp256k1` whenever you're compiling your program that uses this library.
-
-### Test for Wasm
-
-Install [wasmbrowsertest](https://github.com/agnivade/wasmbrowsertest), then run tests:
-
-```sh
-GOOS=js GOARCH=wasm go test -short ./...
-```
-
-## Warning: risk of goroutine bloat (if used incorrectly)
-
-Remember to cancel subscriptions, either by calling `.Unsub()` on them or ensuring their `context.Context` will be canceled at some point.
-If you don't do that they will keep creating a new goroutine for every new event that arrives and if you have stopped listening on the
-`sub.Events` channel that will cause chaos and doom in your program.
-
-## Contributing to this repository
-
-Use NIP-34 to send your patches to `naddr1qqyxwmeddehhxarjqy28wumn8ghj7un9d3shjtnyv9kh2uewd9hsz9nhwden5te0wfjkccte9ehx7um5wghxyctwvsq3vamnwvaz7tmjv4kxz7fwwpexjmtpdshxuet5qgsrhuxx8l9ex335q7he0f09aej04zpazpl0ne2cgukyawd24mayt8grqsqqqaueuwmljc`.
+do not use yet
diff --git a/event.go b/event.go
index c7144e2..15163b8 100644
--- a/event.go
+++ b/event.go
@@ -25,24 +25,24 @@ func (evt Event) String() string {
}
// GetID serializes and returns the event ID as a string.
-func (evt *Event) GetID() ID {
+func (evt Event) GetID() ID {
return sha256.Sum256(evt.Serialize())
}
// CheckID checks if the implied ID matches the given ID more efficiently.
-func (evt *Event) CheckID() bool {
+func (evt Event) CheckID() bool {
return evt.GetID() == evt.ID
}
// Serialize outputs a byte array that can be hashed to produce the canonical event "id".
-func (evt *Event) Serialize() []byte {
+func (evt Event) Serialize() []byte {
// the serialization process is just putting everything into a JSON array
// so the order is kept. See NIP-01
dst := make([]byte, 0, 100+len(evt.Content)+len(evt.Tags)*80)
return serializeEventInto(evt, dst)
}
-func serializeEventInto(evt *Event, dst []byte) []byte {
+func serializeEventInto(evt Event, dst []byte) []byte {
// the header portion is easy to serialize
// [0,"pubkey",created_at,kind,[
dst = append(dst, `[0,"`...)
diff --git a/event_test.go b/event_test.go
index 0a0e0a5..9ce10a2 100644
--- a/event_test.go
+++ b/event_test.go
@@ -25,7 +25,7 @@ func TestEventParsingAndVerifying(t *testing.T) {
assert.Equal(t, ev.ID, ev.GetID())
- ok, _ := ev.CheckSignature()
+ ok := ev.VerifySignature()
assert.True(t, ok, "signature verification failed when it should have succeeded")
asJSON, err := json.Marshal(ev)
diff --git a/eventstore/badger/count.go b/eventstore/badger/count.go
index 2fa14f7..259a5d6 100644
--- a/eventstore/badger/count.go
+++ b/eventstore/badger/count.go
@@ -1,17 +1,16 @@
package badger
import (
- "context"
"encoding/binary"
"log"
- "github.com/dgraph-io/badger/v4"
- bin "fiatjaf.com/nostr/eventstore/internal/binary"
"fiatjaf.com/nostr"
+ "fiatjaf.com/nostr/eventstore/codec/betterbinary"
"fiatjaf.com/nostr/nip45/hyperloglog"
+ "github.com/dgraph-io/badger/v4"
)
-func (b *BadgerBackend) CountEvents(ctx context.Context, filter nostr.Filter) (int64, error) {
+func (b *BadgerBackend) CountEvents(filter nostr.Filter) (int64, error) {
var count int64 = 0
queries, extraFilter, since, err := prepareQueries(filter)
@@ -62,8 +61,8 @@ func (b *BadgerBackend) CountEvents(ctx context.Context, filter nostr.Filter) (i
}
err = item.Value(func(val []byte) error {
- evt := &nostr.Event{}
- if err := bin.Unmarshal(val, evt); err != nil {
+ evt := nostr.Event{}
+ if err := betterbinary.Unmarshal(val, &evt); err != nil {
return err
}
@@ -87,7 +86,7 @@ func (b *BadgerBackend) CountEvents(ctx context.Context, filter nostr.Filter) (i
return count, err
}
-func (b *BadgerBackend) CountEventsHLL(ctx context.Context, filter nostr.Filter, offset int) (int64, *hyperloglog.HyperLogLog, error) {
+func (b *BadgerBackend) CountEventsHLL(filter nostr.Filter, offset int) (int64, *hyperloglog.HyperLogLog, error) {
var count int64 = 0
queries, extraFilter, since, err := prepareQueries(filter)
@@ -138,13 +137,13 @@ func (b *BadgerBackend) CountEventsHLL(ctx context.Context, filter nostr.Filter,
err = item.Value(func(val []byte) error {
if extraFilter == nil {
- hll.AddBytes(val[32:64])
+ hll.AddBytes([32]byte(val[32:64]))
count++
return nil
}
- evt := &nostr.Event{}
- if err := bin.Unmarshal(val, evt); err != nil {
+ evt := nostr.Event{}
+ if err := betterbinary.Unmarshal(val, &evt); err != nil {
return err
}
if extraFilter.Matches(evt) {
diff --git a/eventstore/badger/delete.go b/eventstore/badger/delete.go
index cec84cb..3ddda6e 100644
--- a/eventstore/badger/delete.go
+++ b/eventstore/badger/delete.go
@@ -1,22 +1,22 @@
package badger
import (
- "context"
- "encoding/hex"
+ "fmt"
"log"
- "github.com/dgraph-io/badger/v4"
"fiatjaf.com/nostr"
+ "fiatjaf.com/nostr/eventstore/codec/betterbinary"
+ "github.com/dgraph-io/badger/v4"
)
var serialDelete uint32 = 0
-func (b *BadgerBackend) DeleteEvent(ctx context.Context, evt *nostr.Event) error {
+func (b *BadgerBackend) DeleteEvent(id nostr.ID) error {
deletionHappened := false
err := b.Update(func(txn *badger.Txn) error {
var err error
- deletionHappened, err = b.delete(txn, evt)
+ deletionHappened, err = b.delete(txn, id)
return err
})
if err != nil {
@@ -36,22 +36,30 @@ func (b *BadgerBackend) DeleteEvent(ctx context.Context, evt *nostr.Event) error
return nil
}
-func (b *BadgerBackend) delete(txn *badger.Txn, evt *nostr.Event) (bool, error) {
+func (b *BadgerBackend) delete(txn *badger.Txn, id nostr.ID) (bool, error) {
idx := make([]byte, 1, 5)
idx[0] = rawEventStorePrefix
// query event by id to get its idx
- idPrefix8, _ := hex.DecodeString(evt.ID[0 : 8*2])
prefix := make([]byte, 1+8)
prefix[0] = indexIdPrefix
- copy(prefix[1:], idPrefix8)
+ copy(prefix[1:], id[0:8])
opts := badger.IteratorOptions{
PrefetchValues: false,
}
+
+ // also grab the actual event so we can calculate its indexes
+ var evt nostr.Event
+
it := txn.NewIterator(opts)
it.Seek(prefix)
if it.ValidForPrefix(prefix) {
idx = append(idx, it.Item().Key()[1+8:]...)
+ if err := it.Item().Value(func(val []byte) error {
+ return betterbinary.Unmarshal(val, &evt)
+ }); err != nil {
+ return false, fmt.Errorf("failed to unmarshal event %x to delete: %w", id[:], err)
+ }
}
it.Close()
diff --git a/eventstore/badger/helpers.go b/eventstore/badger/helpers.go
index 3191064..2ebb559 100644
--- a/eventstore/badger/helpers.go
+++ b/eventstore/badger/helpers.go
@@ -40,14 +40,13 @@ func getTagIndexPrefix(tagValue string) ([]byte, int) {
return k, offset
}
-func (b *BadgerBackend) getIndexKeysForEvent(evt *nostr.Event, idx []byte) iter.Seq[[]byte] {
+func (b *BadgerBackend) getIndexKeysForEvent(evt nostr.Event, idx []byte) iter.Seq[[]byte] {
return func(yield func([]byte) bool) {
{
// ~ by id
- idPrefix8, _ := hex.DecodeString(evt.ID[0 : 8*2])
k := make([]byte, 1+8+4)
k[0] = indexIdPrefix
- copy(k[1:], idPrefix8)
+ copy(k[1:], evt.ID[0:8])
copy(k[1+8:], idx)
if !yield(k) {
return
@@ -56,10 +55,9 @@ func (b *BadgerBackend) getIndexKeysForEvent(evt *nostr.Event, idx []byte) iter.
{
// ~ by pubkey+date
- pubkeyPrefix8, _ := hex.DecodeString(evt.PubKey[0 : 8*2])
k := make([]byte, 1+8+4+4)
k[0] = indexPubkeyPrefix
- copy(k[1:], pubkeyPrefix8)
+ copy(k[1:], evt.PubKey[0:8])
binary.BigEndian.PutUint32(k[1+8:], uint32(evt.CreatedAt))
copy(k[1+8+4:], idx)
if !yield(k) {
@@ -81,10 +79,9 @@ func (b *BadgerBackend) getIndexKeysForEvent(evt *nostr.Event, idx []byte) iter.
{
// ~ by pubkey+kind+date
- pubkeyPrefix8, _ := hex.DecodeString(evt.PubKey[0 : 8*2])
k := make([]byte, 1+8+2+4+4)
k[0] = indexPubkeyKindPrefix
- copy(k[1:], pubkeyPrefix8)
+ copy(k[1:], evt.PubKey[0:8])
binary.BigEndian.PutUint16(k[1+8:], uint16(evt.Kind))
binary.BigEndian.PutUint32(k[1+8+2:], uint32(evt.CreatedAt))
copy(k[1+8+2+4:], idx)
@@ -152,7 +149,7 @@ func getAddrTagElements(tagValue string) (kind uint16, pkb []byte, d string) {
return 0, nil, ""
}
-func filterMatchesTags(ef *nostr.Filter, event *nostr.Event) bool {
+func filterMatchesTags(ef nostr.Filter, event nostr.Event) bool {
for f, v := range ef.Tags {
if v != nil && !event.Tags.ContainsAny(f, v) {
return false
diff --git a/eventstore/badger/lib.go b/eventstore/badger/lib.go
index 5f7b205..fefb01d 100644
--- a/eventstore/badger/lib.go
+++ b/eventstore/badger/lib.go
@@ -5,9 +5,9 @@ import (
"fmt"
"sync/atomic"
- "github.com/dgraph-io/badger/v4"
- "fiatjaf.com/nostr/eventstore"
"fiatjaf.com/nostr"
+ "fiatjaf.com/nostr/eventstore"
+ "github.com/dgraph-io/badger/v4"
)
const (
@@ -32,9 +32,9 @@ type BadgerBackend struct {
BadgerOptionsModifier func(badger.Options) badger.Options
// Experimental
- SkipIndexingTag func(event *nostr.Event, tagName string, tagValue string) bool
+ SkipIndexingTag func(event nostr.Event, tagName string, tagValue string) bool
// Experimental
- IndexLongerTag func(event *nostr.Event, tagName string, tagValue string) bool
+ IndexLongerTag func(event nostr.Event, tagName string, tagValue string) bool
*badger.DB
diff --git a/eventstore/badger/migrations.go b/eventstore/badger/migrations.go
index bdfb3a8..90cf1e8 100644
--- a/eventstore/badger/migrations.go
+++ b/eventstore/badger/migrations.go
@@ -2,7 +2,6 @@ package badger
import (
"encoding/binary"
- "fmt"
"github.com/dgraph-io/badger/v4"
)
@@ -26,35 +25,12 @@ func (b *BadgerBackend) runMigrations() error {
// do the migrations in increasing steps (there is no rollback)
//
- // the 3 first migrations go to trash because on version 3 we need to export and import all the data anyway
- if version < 3 {
- // if there is any data in the relay we will stop and notify the user,
- // otherwise we just set version to 3 and proceed
- prefix := []byte{indexIdPrefix}
- it := txn.NewIterator(badger.IteratorOptions{
- PrefetchValues: true,
- PrefetchSize: 100,
- Prefix: prefix,
- })
- defer it.Close()
-
- hasAnyEntries := false
- for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
- hasAnyEntries = true
- break
- }
-
- if hasAnyEntries {
- return fmt.Errorf("your database is at version %d, but in order to migrate up to version 3 you must manually export all the events and then import again: run an old version of this software, export the data, then delete the database files, run the new version, import the data back in.", version)
- }
-
- b.bumpVersion(txn, 3)
- }
-
- if version < 4 {
+ if version < 1 {
// ...
}
+ // b.bumpVersion(txn, 1)
+
return nil
})
}
diff --git a/eventstore/badger/query.go b/eventstore/badger/query.go
index 08f9afb..04fb2cb 100644
--- a/eventstore/badger/query.go
+++ b/eventstore/badger/query.go
@@ -1,68 +1,54 @@
package badger
import (
- "context"
"encoding/binary"
- "encoding/hex"
"errors"
"fmt"
+ "iter"
"log"
- "github.com/dgraph-io/badger/v4"
- "fiatjaf.com/nostr/eventstore"
- "fiatjaf.com/nostr/eventstore/internal"
- bin "fiatjaf.com/nostr/eventstore/internal/binary"
"fiatjaf.com/nostr"
+ "fiatjaf.com/nostr/eventstore/codec/betterbinary"
+ "fiatjaf.com/nostr/eventstore/internal"
+ "github.com/dgraph-io/badger/v4"
"golang.org/x/exp/slices"
)
var batchFilled = errors.New("batch-filled")
-func (b *BadgerBackend) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) {
- ch := make(chan *nostr.Event)
-
- if filter.Search != "" {
- close(ch)
- return ch, nil
- }
-
- // max number of events we'll return
- maxLimit := b.MaxLimit
- var limit int
- if eventstore.IsNegentropySession(ctx) {
- maxLimit = b.MaxLimitNegentropy
- limit = maxLimit
- } else {
- limit = maxLimit / 4
- }
- if filter.Limit > 0 && filter.Limit <= maxLimit {
- limit = filter.Limit
- }
- if tlimit := nostr.GetTheoreticalLimit(filter); tlimit == 0 {
- close(ch)
- return ch, nil
- } else if tlimit > 0 {
- limit = tlimit
- }
-
- // fmt.Println("limit", limit)
-
- go b.View(func(txn *badger.Txn) error {
- defer close(ch)
-
- results, err := b.query(txn, filter, limit)
- if err != nil {
- return err
+func (b *BadgerBackend) QueryEvents(filter nostr.Filter) iter.Seq[nostr.Event] {
+ return func(yield func(nostr.Event) bool) {
+ if filter.Search != "" {
+ return
}
- for _, evt := range results {
- ch <- evt.Event
+ // max number of events we'll return
+ limit := b.MaxLimit / 4
+ if filter.Limit > 0 && filter.Limit <= b.MaxLimit {
+ limit = filter.Limit
+ }
+ if tlimit := nostr.GetTheoreticalLimit(filter); tlimit == 0 {
+ return
+ } else if tlimit > 0 {
+ limit = tlimit
}
- return nil
- })
+ // fmt.Println("limit", limit)
+ b.View(func(txn *badger.Txn) error {
+ results, err := b.query(txn, filter, limit)
+ if err != nil {
+ return err
+ }
- return ch, nil
+ for _, evt := range results {
+ if !yield(evt.Event) {
+ return nil
+ }
+ }
+
+ return nil
+ })
+ }
}
func (b *BadgerBackend) query(txn *badger.Txn, filter nostr.Filter, limit int) ([]internal.IterEvent, error) {
@@ -81,16 +67,16 @@ func (b *BadgerBackend) query(txn *badger.Txn, filter nostr.Filter, limit int) (
// we will continue to pull from it as soon as some other iterator takes the position
oldest := internal.IterEvent{Q: -1}
- secondPhase := false // after we have gathered enough events we will change the way we iterate
+ sndPhase := false // after we have gathered enough events we will change the way we iterate
secondBatch := make([][]internal.IterEvent, 0, len(queries)+1)
- secondPhaseParticipants := make([]int, 0, len(queries)+1)
+ sndPhaseParticipants := make([]int, 0, len(queries)+1)
// while merging results in the second phase we will alternate between these two lists
// to avoid having to create new lists all the time
- var secondPhaseResultsA []internal.IterEvent
- var secondPhaseResultsB []internal.IterEvent
- var secondPhaseResultsToggle bool // this is just a dummy thing we use to keep track of the alternating
- var secondPhaseHasResultsPending bool
+ var sndPhaseResultsA []internal.IterEvent
+ var sndPhaseResultsB []internal.IterEvent
+ var sndPhaseResultsToggle bool // this is just a dummy thing we use to keep track of the alternating
+ var sndPhaseHasResultsPending bool
remainingUnexhausted := len(queries) // when all queries are exhausted we can finally end this thing
batchSizePerQuery := internal.BatchSizePerNumberOfQueries(limit, remainingUnexhausted)
@@ -180,26 +166,26 @@ func (b *BadgerBackend) query(txn *badger.Txn, filter nostr.Filter, limit int) (
// check it against pubkeys without decoding the entire thing
if extraFilter != nil && extraFilter.Authors != nil &&
- !slices.Contains(extraFilter.Authors, hex.EncodeToString(val[32:64])) {
+ !nostr.ContainsPubKey(extraFilter.Authors, nostr.PubKey(val[32:64])) {
// fmt.Println(" skipped (authors)")
return nil
}
// check it against kinds without decoding the entire thing
if extraFilter != nil && extraFilter.Kinds != nil &&
- !slices.Contains(extraFilter.Kinds, int(binary.BigEndian.Uint16(val[132:134]))) {
+ !slices.Contains(extraFilter.Kinds, binary.BigEndian.Uint16(val[132:134])) {
// fmt.Println(" skipped (kinds)")
return nil
}
- event := &nostr.Event{}
- if err := bin.Unmarshal(val, event); err != nil {
+ event := nostr.Event{}
+ if err := betterbinary.Unmarshal(val, &event); err != nil {
log.Printf("badger: value read error (id %x): %s\n", val[0:32], err)
return err
}
// check if this matches the other filters that were not part of the index
- if extraFilter != nil && !filterMatchesTags(extraFilter, event) {
+ if extraFilter != nil && !filterMatchesTags(*extraFilter, event) {
// fmt.Println(" skipped (filter)", extraFilter, event)
return nil
}
@@ -208,18 +194,18 @@ func (b *BadgerBackend) query(txn *badger.Txn, filter nostr.Filter, limit int) (
evt := internal.IterEvent{Event: event, Q: q}
//
//
- if secondPhase {
+ if sndPhase {
// do the process described below at HIWAWVRTP.
// if we've reached here this means we've already passed the `since` check.
// now we have to eliminate the event currently at the `since` threshold.
nextThreshold := firstPhaseResults[len(firstPhaseResults)-2]
- if oldest.Event == nil {
+ if oldest.Event.ID == nostr.ZeroID {
// fmt.Println(" b1")
// BRANCH WHEN WE DON'T HAVE THE OLDEST EVENT (BWWDHTOE)
// when we don't have the oldest set, we will keep the results
// and not change the cutting point -- it's bad, but hopefully not that bad.
results[q] = append(results[q], evt)
- secondPhaseHasResultsPending = true
+ sndPhaseHasResultsPending = true
} else if nextThreshold.CreatedAt > oldest.CreatedAt {
// fmt.Println(" b2", nextThreshold.CreatedAt, ">", oldest.CreatedAt)
// one of the events we have stored is the actual next threshold
@@ -236,7 +222,7 @@ func (b *BadgerBackend) query(txn *badger.Txn, filter nostr.Filter, limit int) (
// finally
// add this to the results to be merged later
results[q] = append(results[q], evt)
- secondPhaseHasResultsPending = true
+ sndPhaseHasResultsPending = true
} else if nextThreshold.CreatedAt < evt.CreatedAt {
// the next last event in the firstPhaseResults is the next threshold
// fmt.Println(" b3", nextThreshold.CreatedAt, "<", oldest.CreatedAt)
@@ -246,7 +232,7 @@ func (b *BadgerBackend) query(txn *badger.Txn, filter nostr.Filter, limit int) (
// fmt.Println(" new since", since)
// add this to the results to be merged later
results[q] = append(results[q], evt)
- secondPhaseHasResultsPending = true
+ sndPhaseHasResultsPending = true
// update the oldest event
if evt.CreatedAt < oldest.CreatedAt {
oldest = evt
@@ -265,7 +251,7 @@ func (b *BadgerBackend) query(txn *badger.Txn, filter nostr.Filter, limit int) (
firstPhaseTotalPulled++
// update the oldest event
- if oldest.Event == nil || evt.CreatedAt < oldest.CreatedAt {
+ if oldest.Event.ID == nostr.ZeroID || evt.CreatedAt < oldest.CreatedAt {
oldest = evt
}
}
@@ -295,20 +281,20 @@ func (b *BadgerBackend) query(txn *badger.Txn, filter nostr.Filter, limit int) (
// we will do this check if we don't accumulated the requested number of events yet
// fmt.Println("oldest", oldest.Event, "from iter", oldest.Q)
- if secondPhase && secondPhaseHasResultsPending && (oldest.Event == nil || remainingUnexhausted == 0) {
+ if sndPhase && sndPhaseHasResultsPending && (oldest.Event.ID == nostr.ZeroID || remainingUnexhausted == 0) {
// fmt.Println("second phase aggregation!")
// when we are in the second phase we will aggressively aggregate results on every iteration
//
secondBatch = secondBatch[:0]
- for s := 0; s < len(secondPhaseParticipants); s++ {
- q := secondPhaseParticipants[s]
+ for s := 0; s < len(sndPhaseParticipants); s++ {
+ q := sndPhaseParticipants[s]
if len(results[q]) > 0 {
secondBatch = append(secondBatch, results[q])
}
if exhausted[q] {
- secondPhaseParticipants = internal.SwapDelete(secondPhaseParticipants, s)
+ sndPhaseParticipants = internal.SwapDelete(sndPhaseParticipants, s)
s--
}
}
@@ -316,29 +302,29 @@ func (b *BadgerBackend) query(txn *badger.Txn, filter nostr.Filter, limit int) (
// every time we get here we will alternate between these A and B lists
// combining everything we have into a new partial results list.
// after we've done that we can again set the oldest.
- // fmt.Println(" xxx", secondPhaseResultsToggle)
- if secondPhaseResultsToggle {
- secondBatch = append(secondBatch, secondPhaseResultsB)
- secondPhaseResultsA = internal.MergeSortMultiple(secondBatch, limit, secondPhaseResultsA)
- oldest = secondPhaseResultsA[len(secondPhaseResultsA)-1]
- // fmt.Println(" new aggregated a", len(secondPhaseResultsB))
+ // fmt.Println(" xxx", sndPhaseResultsToggle)
+ if sndPhaseResultsToggle {
+ secondBatch = append(secondBatch, sndPhaseResultsB)
+ sndPhaseResultsA = internal.MergeSortMultiple(secondBatch, limit, sndPhaseResultsA)
+ oldest = sndPhaseResultsA[len(sndPhaseResultsA)-1]
+ // fmt.Println(" new aggregated a", len(sndPhaseResultsB))
} else {
- secondBatch = append(secondBatch, secondPhaseResultsA)
- secondPhaseResultsB = internal.MergeSortMultiple(secondBatch, limit, secondPhaseResultsB)
- oldest = secondPhaseResultsB[len(secondPhaseResultsB)-1]
- // fmt.Println(" new aggregated b", len(secondPhaseResultsB))
+ secondBatch = append(secondBatch, sndPhaseResultsA)
+ sndPhaseResultsB = internal.MergeSortMultiple(secondBatch, limit, sndPhaseResultsB)
+ oldest = sndPhaseResultsB[len(sndPhaseResultsB)-1]
+ // fmt.Println(" new aggregated b", len(sndPhaseResultsB))
}
- secondPhaseResultsToggle = !secondPhaseResultsToggle
+ sndPhaseResultsToggle = !sndPhaseResultsToggle
since = uint32(oldest.CreatedAt)
// fmt.Println(" new since", since)
// reset the `results` list so we can keep using it
results = results[:len(queries)]
- for _, q := range secondPhaseParticipants {
+ for _, q := range sndPhaseParticipants {
results[q] = results[q][:0]
}
- } else if !secondPhase && firstPhaseTotalPulled >= limit && remainingUnexhausted > 0 {
+ } else if !sndPhase && firstPhaseTotalPulled >= limit && remainingUnexhausted > 0 {
// fmt.Println("have enough!", firstPhaseTotalPulled, "/", limit, "remaining", remainingUnexhausted)
// we will exclude this oldest number as it is not relevant anymore
@@ -382,16 +368,16 @@ func (b *BadgerBackend) query(txn *badger.Txn, filter nostr.Filter, limit int) (
results[q] = results[q][:0]
// build this index of indexes with everybody who remains
- secondPhaseParticipants = append(secondPhaseParticipants, q)
+ sndPhaseParticipants = append(sndPhaseParticipants, q)
}
// we create these two lists and alternate between them so we don't have to create a
// a new one every time
- secondPhaseResultsA = make([]internal.IterEvent, 0, limit*2)
- secondPhaseResultsB = make([]internal.IterEvent, 0, limit*2)
+ sndPhaseResultsA = make([]internal.IterEvent, 0, limit*2)
+ sndPhaseResultsB = make([]internal.IterEvent, 0, limit*2)
// from now on we won't run this block anymore
- secondPhase = true
+ sndPhase = true
}
// fmt.Println("remaining", remainingUnexhausted)
@@ -400,27 +386,27 @@ func (b *BadgerBackend) query(txn *badger.Txn, filter nostr.Filter, limit int) (
}
}
- // fmt.Println("is secondPhase?", secondPhase)
+ // fmt.Println("is sndPhase?", sndPhase)
var combinedResults []internal.IterEvent
- if secondPhase {
+ if sndPhase {
// fmt.Println("ending second phase")
- // when we reach this point either secondPhaseResultsA or secondPhaseResultsB will be full of stuff,
+ // when we reach this point either sndPhaseResultsA or sndPhaseResultsB will be full of stuff,
// the other will be empty
- var secondPhaseResults []internal.IterEvent
- // fmt.Println("xxx", secondPhaseResultsToggle, len(secondPhaseResultsA), len(secondPhaseResultsB))
- if secondPhaseResultsToggle {
- secondPhaseResults = secondPhaseResultsB
- combinedResults = secondPhaseResultsA[0:limit] // reuse this
- // fmt.Println(" using b", len(secondPhaseResultsA))
+ var sndPhaseResults []internal.IterEvent
+ // fmt.Println("xxx", sndPhaseResultsToggle, len(sndPhaseResultsA), len(sndPhaseResultsB))
+ if sndPhaseResultsToggle {
+ sndPhaseResults = sndPhaseResultsB
+ combinedResults = sndPhaseResultsA[0:limit] // reuse this
+ // fmt.Println(" using b", len(sndPhaseResultsA))
} else {
- secondPhaseResults = secondPhaseResultsA
- combinedResults = secondPhaseResultsB[0:limit] // reuse this
- // fmt.Println(" using a", len(secondPhaseResultsA))
+ sndPhaseResults = sndPhaseResultsA
+ combinedResults = sndPhaseResultsB[0:limit] // reuse this
+ // fmt.Println(" using a", len(sndPhaseResultsA))
}
- all := [][]internal.IterEvent{firstPhaseResults, secondPhaseResults}
+ all := [][]internal.IterEvent{firstPhaseResults, sndPhaseResults}
combinedResults = internal.MergeSortMultiple(all, limit, combinedResults)
// fmt.Println("final combinedResults", len(combinedResults), cap(combinedResults), limit)
} else {
diff --git a/eventstore/badger/replace.go b/eventstore/badger/replace.go
index 4b8c07c..3644b2a 100644
--- a/eventstore/badger/replace.go
+++ b/eventstore/badger/replace.go
@@ -1,23 +1,22 @@
package badger
import (
- "context"
"fmt"
"math"
- "github.com/dgraph-io/badger/v4"
- "fiatjaf.com/nostr/eventstore/internal"
"fiatjaf.com/nostr"
+ "fiatjaf.com/nostr/eventstore/internal"
+ "github.com/dgraph-io/badger/v4"
)
-func (b *BadgerBackend) ReplaceEvent(ctx context.Context, evt *nostr.Event) error {
+func (b *BadgerBackend) ReplaceEvent(evt nostr.Event) error {
// sanity checking
if evt.CreatedAt > math.MaxUint32 || evt.Kind > math.MaxUint16 {
return fmt.Errorf("event with values out of expected boundaries")
}
return b.Update(func(txn *badger.Txn) error {
- filter := nostr.Filter{Limit: 1, Kinds: []int{evt.Kind}, Authors: []string{evt.PubKey}}
+ filter := nostr.Filter{Limit: 1, Kinds: []uint16{evt.Kind}, Authors: []nostr.PubKey{evt.PubKey}}
if nostr.IsAddressableKind(evt.Kind) {
// when addressable, add the "d" tag to the filter
filter.Tags = nostr.TagMap{"d": []string{evt.Tags.GetD()}}
@@ -32,7 +31,7 @@ func (b *BadgerBackend) ReplaceEvent(ctx context.Context, evt *nostr.Event) erro
shouldStore := true
for _, previous := range results {
if internal.IsOlder(previous.Event, evt) {
- if _, err := b.delete(txn, previous.Event); err != nil {
+ if _, err := b.delete(txn, previous.Event.ID); err != nil {
return fmt.Errorf("failed to delete event %s for replacing: %w", previous.Event.ID, err)
}
} else {
diff --git a/eventstore/badger/save.go b/eventstore/badger/save.go
index 4ff3389..fd51655 100644
--- a/eventstore/badger/save.go
+++ b/eventstore/badger/save.go
@@ -1,18 +1,16 @@
package badger
import (
- "context"
- "encoding/hex"
"fmt"
"math"
- "github.com/dgraph-io/badger/v4"
- "fiatjaf.com/nostr/eventstore"
- bin "fiatjaf.com/nostr/eventstore/internal/binary"
"fiatjaf.com/nostr"
+ "fiatjaf.com/nostr/eventstore"
+ "fiatjaf.com/nostr/eventstore/codec/betterbinary"
+ "github.com/dgraph-io/badger/v4"
)
-func (b *BadgerBackend) SaveEvent(ctx context.Context, evt *nostr.Event) error {
+func (b *BadgerBackend) SaveEvent(evt nostr.Event) error {
// sanity checking
if evt.CreatedAt > math.MaxUint32 || evt.Kind > math.MaxUint16 {
return fmt.Errorf("event with values out of expected boundaries")
@@ -20,10 +18,9 @@ func (b *BadgerBackend) SaveEvent(ctx context.Context, evt *nostr.Event) error {
return b.Update(func(txn *badger.Txn) error {
// query event by id to ensure we don't save duplicates
- id, _ := hex.DecodeString(evt.ID)
prefix := make([]byte, 1+8)
prefix[0] = indexIdPrefix
- copy(prefix[1:], id)
+ copy(prefix[1:], evt.ID[0:8])
it := txn.NewIterator(badger.IteratorOptions{})
defer it.Close()
it.Seek(prefix)
@@ -36,16 +33,16 @@ func (b *BadgerBackend) SaveEvent(ctx context.Context, evt *nostr.Event) error {
})
}
-func (b *BadgerBackend) save(txn *badger.Txn, evt *nostr.Event) error {
+func (b *BadgerBackend) save(txn *badger.Txn, evt nostr.Event) error {
// encode to binary
- bin, err := bin.Marshal(evt)
- if err != nil {
+ buf := make([]byte, betterbinary.Measure(evt))
+ if err := betterbinary.Marshal(evt, buf); err != nil {
return err
}
idx := b.Serial()
// raw event store
- if err := txn.Set(idx, bin); err != nil {
+ if err := txn.Set(idx, buf); err != nil {
return err
}
diff --git a/eventstore/bluge/delete.go b/eventstore/bluge/delete.go
index dfdc0af..ea42f7a 100644
--- a/eventstore/bluge/delete.go
+++ b/eventstore/bluge/delete.go
@@ -1,11 +1,9 @@
package bluge
import (
- "context"
-
"fiatjaf.com/nostr"
)
-func (b *BlugeBackend) DeleteEvent(ctx context.Context, evt *nostr.Event) error {
- return b.writer.Delete(eventIdentifier(evt.ID))
+func (b *BlugeBackend) DeleteEvent(id nostr.ID) error {
+ return b.writer.Delete(eventIdentifier(id))
}
diff --git a/eventstore/bluge/helpers.go b/eventstore/bluge/helpers.go
index ab421d6..4468112 100644
--- a/eventstore/bluge/helpers.go
+++ b/eventstore/bluge/helpers.go
@@ -1,6 +1,8 @@
package bluge
-import "encoding/hex"
+import (
+ "fiatjaf.com/nostr"
+)
const (
contentField = "c"
@@ -9,7 +11,7 @@ const (
pubkeyField = "p"
)
-type eventIdentifier string
+type eventIdentifier nostr.ID
const idField = "i"
@@ -18,6 +20,5 @@ func (id eventIdentifier) Field() string {
}
func (id eventIdentifier) Term() []byte {
- v, _ := hex.DecodeString(string(id))
- return v
+ return id[:]
}
diff --git a/eventstore/bluge/query.go b/eventstore/bluge/query.go
index 2cf723a..23c5175 100644
--- a/eventstore/bluge/query.go
+++ b/eventstore/bluge/query.go
@@ -2,108 +2,96 @@ package bluge
import (
"context"
- "encoding/hex"
- "fmt"
+ "iter"
"strconv"
+ "fiatjaf.com/nostr"
"github.com/blugelabs/bluge"
"github.com/blugelabs/bluge/search"
- "fiatjaf.com/nostr"
)
-func (b *BlugeBackend) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) {
- ch := make(chan *nostr.Event)
-
- if len(filter.Search) < 2 {
- close(ch)
- return ch, nil
- }
-
- reader, err := b.writer.Reader()
- if err != nil {
- close(ch)
- return nil, fmt.Errorf("unable to open reader: %w", err)
- }
-
- searchQ := bluge.NewMatchQuery(filter.Search)
- searchQ.SetField(contentField)
- var q bluge.Query = searchQ
-
- complicatedQuery := bluge.NewBooleanQuery().AddMust(searchQ)
-
- if len(filter.Kinds) > 0 {
- eitherKind := bluge.NewBooleanQuery()
- eitherKind.SetMinShould(1)
- for _, kind := range filter.Kinds {
- kindQ := bluge.NewTermQuery(strconv.Itoa(kind))
- kindQ.SetField(kindField)
- eitherKind.AddShould(kindQ)
+func (b *BlugeBackend) QueryEvents(filter nostr.Filter) iter.Seq[nostr.Event] {
+ return func(yield func(nostr.Event) bool) {
+ if len(filter.Search) < 2 {
+ return
}
- complicatedQuery.AddMust(eitherKind)
- q = complicatedQuery
- }
- if len(filter.Authors) > 0 {
- eitherPubkey := bluge.NewBooleanQuery()
- eitherPubkey.SetMinShould(1)
- for _, pubkey := range filter.Authors {
- if len(pubkey) != 64 {
- continue
+ reader, err := b.writer.Reader()
+ if err != nil {
+ return
+ }
+
+ searchQ := bluge.NewMatchQuery(filter.Search)
+ searchQ.SetField(contentField)
+ var q bluge.Query = searchQ
+
+ complicatedQuery := bluge.NewBooleanQuery().AddMust(searchQ)
+
+ if len(filter.Kinds) > 0 {
+ eitherKind := bluge.NewBooleanQuery()
+ eitherKind.SetMinShould(1)
+ for _, kind := range filter.Kinds {
+ kindQ := bluge.NewTermQuery(strconv.Itoa(int(kind)))
+ kindQ.SetField(kindField)
+ eitherKind.AddShould(kindQ)
}
- pubkeyQ := bluge.NewTermQuery(pubkey[56:])
- pubkeyQ.SetField(pubkeyField)
- eitherPubkey.AddShould(pubkeyQ)
+ complicatedQuery.AddMust(eitherKind)
+ q = complicatedQuery
}
- complicatedQuery.AddMust(eitherPubkey)
- q = complicatedQuery
- }
- if filter.Since != nil || filter.Until != nil {
- min := 0.0
- if filter.Since != nil {
- min = float64(*filter.Since)
+ if len(filter.Authors) > 0 {
+ eitherPubkey := bluge.NewBooleanQuery()
+ eitherPubkey.SetMinShould(1)
+ for _, pubkey := range filter.Authors {
+ if len(pubkey) != 64 {
+ continue
+ }
+ pubkeyQ := bluge.NewTermQuery(pubkey.Hex()[56:])
+ pubkeyQ.SetField(pubkeyField)
+ eitherPubkey.AddShould(pubkeyQ)
+ }
+ complicatedQuery.AddMust(eitherPubkey)
+ q = complicatedQuery
}
- max := float64(nostr.Now())
- if filter.Until != nil {
- max = float64(*filter.Until)
+
+ if filter.Since != nil || filter.Until != nil {
+ min := 0.0
+ if filter.Since != nil {
+ min = float64(*filter.Since)
+ }
+ max := float64(nostr.Now())
+ if filter.Until != nil {
+ max = float64(*filter.Until)
+ }
+ dateRangeQ := bluge.NewNumericRangeInclusiveQuery(min, max, true, true)
+ dateRangeQ.SetField(createdAtField)
+ complicatedQuery.AddMust(dateRangeQ)
+ q = complicatedQuery
}
- dateRangeQ := bluge.NewNumericRangeInclusiveQuery(min, max, true, true)
- dateRangeQ.SetField(createdAtField)
- complicatedQuery.AddMust(dateRangeQ)
- q = complicatedQuery
- }
- limit := 40
- if filter.Limit != 0 {
- limit = filter.Limit
- if filter.Limit > 150 {
- limit = 150
+ limit := 40
+ if filter.Limit != 0 {
+ limit = filter.Limit
+ if filter.Limit > 150 {
+ limit = 150
+ }
}
- }
- req := bluge.NewTopNSearch(limit, q)
+ req := bluge.NewTopNSearch(limit, q)
- dmi, err := reader.Search(context.Background(), req)
- if err != nil {
- close(ch)
- reader.Close()
- return ch, fmt.Errorf("error executing search: %w", err)
- }
+ dmi, err := reader.Search(context.Background(), req)
+ if err != nil {
+ reader.Close()
+ return
+ }
- go func() {
defer reader.Close()
- defer close(ch)
var next *search.DocumentMatch
for next, err = dmi.Next(); next != nil; next, err = dmi.Next() {
next.VisitStoredFields(func(field string, value []byte) bool {
- id := hex.EncodeToString(value)
- rawch, err := b.RawEventStore.QueryEvents(ctx, nostr.Filter{IDs: []string{id}})
- if err != nil {
- return false
- }
- for evt := range rawch {
- ch <- evt
+ for evt := range b.RawEventStore.QueryEvents(nostr.Filter{IDs: []nostr.ID{nostr.ID(value)}}) {
+ yield(evt)
}
return false
})
@@ -111,7 +99,5 @@ func (b *BlugeBackend) QueryEvents(ctx context.Context, filter nostr.Filter) (ch
if err != nil {
return
}
- }()
-
- return ch, nil
+ }
}
diff --git a/eventstore/bluge/replace.go b/eventstore/bluge/replace.go
index 014ef57..96dfde0 100644
--- a/eventstore/bluge/replace.go
+++ b/eventstore/bluge/replace.go
@@ -4,29 +4,24 @@ import (
"context"
"fmt"
+ "fiatjaf.com/nostr"
"fiatjaf.com/nostr/eventstore"
"fiatjaf.com/nostr/eventstore/internal"
- "fiatjaf.com/nostr"
)
-func (b *BlugeBackend) ReplaceEvent(ctx context.Context, evt *nostr.Event) error {
+func (b *BlugeBackend) ReplaceEvent(ctx context.Context, evt nostr.Event) error {
b.Lock()
defer b.Unlock()
- filter := nostr.Filter{Limit: 1, Kinds: []int{evt.Kind}, Authors: []string{evt.PubKey}}
+ filter := nostr.Filter{Limit: 1, Kinds: []uint16{evt.Kind}, Authors: []nostr.PubKey{evt.PubKey}}
if nostr.IsAddressableKind(evt.Kind) {
filter.Tags = nostr.TagMap{"d": []string{evt.Tags.GetD()}}
}
- ch, err := b.QueryEvents(ctx, filter)
- if err != nil {
- return fmt.Errorf("failed to query before replacing: %w", err)
- }
-
shouldStore := true
- for previous := range ch {
+ for previous := range b.QueryEvents(filter) {
if internal.IsOlder(previous, evt) {
- if err := b.DeleteEvent(ctx, previous); err != nil {
+ if err := b.DeleteEvent(previous.ID); err != nil {
return fmt.Errorf("failed to delete event for replacing: %w", err)
}
} else {
diff --git a/eventstore/bluge/save.go b/eventstore/bluge/save.go
index f2334d0..e087cf1 100644
--- a/eventstore/bluge/save.go
+++ b/eventstore/bluge/save.go
@@ -1,23 +1,22 @@
package bluge
import (
- "context"
"fmt"
"strconv"
- "github.com/blugelabs/bluge"
"fiatjaf.com/nostr"
+ "github.com/blugelabs/bluge"
)
-func (b *BlugeBackend) SaveEvent(ctx context.Context, evt *nostr.Event) error {
+func (b *BlugeBackend) SaveEvent(evt nostr.Event) error {
id := eventIdentifier(evt.ID)
doc := &bluge.Document{
bluge.NewKeywordFieldBytes(id.Field(), id.Term()).Sortable().StoreValue(),
}
doc.AddField(bluge.NewTextField(contentField, evt.Content))
- doc.AddField(bluge.NewTextField(kindField, strconv.Itoa(evt.Kind)))
- doc.AddField(bluge.NewTextField(pubkeyField, evt.PubKey[56:]))
+ doc.AddField(bluge.NewTextField(kindField, strconv.Itoa(int(evt.Kind))))
+ doc.AddField(bluge.NewTextField(pubkeyField, evt.PubKey.Hex()[56:]))
doc.AddField(bluge.NewNumericField(createdAtField, float64(evt.CreatedAt)))
if err := b.writer.Update(doc.ID(), doc); err != nil {
diff --git a/eventstore/mmm/betterbinary/codec.go b/eventstore/codec/betterbinary/codec.go
similarity index 91%
rename from eventstore/mmm/betterbinary/codec.go
rename to eventstore/codec/betterbinary/codec.go
index cad3d3a..92fd615 100644
--- a/eventstore/mmm/betterbinary/codec.go
+++ b/eventstore/codec/betterbinary/codec.go
@@ -2,7 +2,6 @@ package betterbinary
import (
"encoding/binary"
- "encoding/hex"
"fmt"
"math"
@@ -50,9 +49,9 @@ func Marshal(evt nostr.Event, buf []byte) error {
}
binary.LittleEndian.PutUint32(buf[3:7], uint32(evt.CreatedAt))
- hex.Decode(buf[7:39], []byte(evt.ID))
- hex.Decode(buf[39:71], []byte(evt.PubKey))
- hex.Decode(buf[71:135], []byte(evt.Sig))
+ copy(buf[7:39], evt.ID[:])
+ copy(buf[39:71], evt.PubKey[:])
+ copy(buf[71:135], evt.Sig[:])
tagBase := 135
// buf[135:137] (tagsSectionLength) will be set later when we know the absolute size of the tags section
@@ -108,11 +107,11 @@ func Unmarshal(data []byte, evt *nostr.Event) (err error) {
}
}()
- evt.Kind = int(binary.LittleEndian.Uint16(data[1:3]))
+ evt.Kind = uint16(binary.LittleEndian.Uint16(data[1:3]))
evt.CreatedAt = nostr.Timestamp(binary.LittleEndian.Uint32(data[3:7]))
- evt.ID = hex.EncodeToString(data[7:39])
- evt.PubKey = hex.EncodeToString(data[39:71])
- evt.Sig = hex.EncodeToString(data[71:135])
+ evt.ID = nostr.ID(data[7:39])
+ evt.PubKey = nostr.PubKey(data[39:71])
+ evt.Sig = [64]byte(data[71:135])
const tagbase = 135
tagsSectionLength := binary.LittleEndian.Uint16(data[tagbase:])
diff --git a/eventstore/mmm/betterbinary/codec_test.go b/eventstore/codec/betterbinary/codec_test.go
similarity index 100%
rename from eventstore/mmm/betterbinary/codec_test.go
rename to eventstore/codec/betterbinary/codec_test.go
diff --git a/eventstore/mmm/betterbinary/filtering.go b/eventstore/codec/betterbinary/filtering.go
similarity index 100%
rename from eventstore/mmm/betterbinary/filtering.go
rename to eventstore/codec/betterbinary/filtering.go
diff --git a/eventstore/mmm/betterbinary/filtering_test.go b/eventstore/codec/betterbinary/filtering_test.go
similarity index 100%
rename from eventstore/mmm/betterbinary/filtering_test.go
rename to eventstore/codec/betterbinary/filtering_test.go
diff --git a/eventstore/internal/binary/cmd/decode-binary/.gitignore b/eventstore/internal/binary/cmd/decode-binary/.gitignore
deleted file mode 100644
index e5d9c00..0000000
--- a/eventstore/internal/binary/cmd/decode-binary/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-decode-binary
diff --git a/eventstore/internal/binary/cmd/decode-binary/main.go b/eventstore/internal/binary/cmd/decode-binary/main.go
deleted file mode 100644
index 182619e..0000000
--- a/eventstore/internal/binary/cmd/decode-binary/main.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package main
-
-import (
- "bytes"
- "encoding/hex"
- "fmt"
- "io"
- "os"
-
- "fiatjaf.com/nostr/eventstore/internal/binary"
- "fiatjaf.com/nostr"
-)
-
-func main() {
- b, err := io.ReadAll(os.Stdin)
- if err != nil {
- fmt.Fprintf(os.Stderr, "failed to read from stdin: %s\n", err)
- os.Exit(1)
- return
- }
- b = bytes.TrimSpace(b)
-
- if bytes.HasPrefix(b, []byte("0x")) {
- fromHex := make([]byte, (len(b)-2)/2)
- _, err := hex.Decode(fromHex, b[2:])
- if err == nil {
- b = fromHex
- }
- }
-
- var evt nostr.Event
- err = binary.Unmarshal(b, &evt)
- if err != nil {
- fmt.Fprintf(os.Stderr, "failed to decode: %s\n", err)
- os.Exit(1)
- return
- }
- fmt.Println(evt.String())
-}
diff --git a/eventstore/internal/binary/hybrid.go b/eventstore/internal/binary/hybrid.go
deleted file mode 100644
index 30c6b89..0000000
--- a/eventstore/internal/binary/hybrid.go
+++ /dev/null
@@ -1,103 +0,0 @@
-package binary
-
-import (
- "encoding/binary"
- "encoding/hex"
- "fmt"
-
- "fiatjaf.com/nostr"
-)
-
-// Deprecated -- the encoding used here is not very elegant, we'll have a better binary format later.
-func Unmarshal(data []byte, evt *nostr.Event) (err error) {
- defer func() {
- if r := recover(); r != nil {
- err = fmt.Errorf("failed to decode binary for event %s from %s at %d: %v", evt.ID, evt.PubKey, evt.CreatedAt, r)
- }
- }()
-
- evt.ID = hex.EncodeToString(data[0:32])
- evt.PubKey = hex.EncodeToString(data[32:64])
- evt.Sig = hex.EncodeToString(data[64:128])
- evt.CreatedAt = nostr.Timestamp(binary.BigEndian.Uint32(data[128:132]))
- evt.Kind = int(binary.BigEndian.Uint16(data[132:134]))
- contentLength := int(binary.BigEndian.Uint16(data[134:136]))
- evt.Content = string(data[136 : 136+contentLength])
-
- curr := 136 + contentLength
-
- nTags := binary.BigEndian.Uint16(data[curr : curr+2])
- curr++
- evt.Tags = make(nostr.Tags, nTags)
-
- for t := range evt.Tags {
- curr++
- nItems := int(data[curr])
- tag := make(nostr.Tag, nItems)
- for i := range tag {
- curr = curr + 1
- itemSize := int(binary.BigEndian.Uint16(data[curr : curr+2]))
- itemStart := curr + 2
- item := string(data[itemStart : itemStart+itemSize])
- tag[i] = item
- curr = itemStart + itemSize
- }
- evt.Tags[t] = tag
- }
-
- return err
-}
-
-// Deprecated -- the encoding used here is not very elegant, we'll have a better binary format later.
-func Marshal(evt *nostr.Event) ([]byte, error) {
- content := []byte(evt.Content)
- buf := make([]byte, 32+32+64+4+2+2+len(content)+65536+len(evt.Tags)*40 /* blergh */)
-
- hex.Decode(buf[0:32], []byte(evt.ID))
- hex.Decode(buf[32:64], []byte(evt.PubKey))
- hex.Decode(buf[64:128], []byte(evt.Sig))
-
- if evt.CreatedAt > MaxCreatedAt {
- return nil, fmt.Errorf("created_at is too big: %d", evt.CreatedAt)
- }
- binary.BigEndian.PutUint32(buf[128:132], uint32(evt.CreatedAt))
-
- if evt.Kind > MaxKind {
- return nil, fmt.Errorf("kind is too big: %d, max is %d", evt.Kind, MaxKind)
- }
- binary.BigEndian.PutUint16(buf[132:134], uint16(evt.Kind))
-
- if contentLength := len(content); contentLength > MaxContentSize {
- return nil, fmt.Errorf("content is too large: %d, max is %d", contentLength, MaxContentSize)
- } else {
- binary.BigEndian.PutUint16(buf[134:136], uint16(contentLength))
- }
- copy(buf[136:], content)
-
- if tagCount := len(evt.Tags); tagCount > MaxTagCount {
- return nil, fmt.Errorf("can't encode too many tags: %d, max is %d", tagCount, MaxTagCount)
- } else {
- binary.BigEndian.PutUint16(buf[136+len(content):136+len(content)+2], uint16(tagCount))
- }
-
- buf = buf[0 : 136+len(content)+2]
-
- for _, tag := range evt.Tags {
- if itemCount := len(tag); itemCount > MaxTagItemCount {
- return nil, fmt.Errorf("can't encode a tag with so many items: %d, max is %d", itemCount, MaxTagItemCount)
- } else {
- buf = append(buf, uint8(itemCount))
- }
- for _, item := range tag {
- itemb := []byte(item)
- itemSize := len(itemb)
- if itemSize > MaxTagItemSize {
- return nil, fmt.Errorf("tag item is too large: %d, max is %d", itemSize, MaxTagItemSize)
- }
- buf = binary.BigEndian.AppendUint16(buf, uint16(itemSize))
- buf = append(buf, itemb...)
- buf = append(buf, 0)
- }
- }
- return buf, nil
-}
diff --git a/eventstore/internal/binary/limits.go b/eventstore/internal/binary/limits.go
deleted file mode 100644
index 8383c07..0000000
--- a/eventstore/internal/binary/limits.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package binary
-
-import (
- "math"
-
- "fiatjaf.com/nostr"
-)
-
-const (
- MaxKind = math.MaxUint16
- MaxCreatedAt = math.MaxUint32
- MaxContentSize = math.MaxUint16
- MaxTagCount = math.MaxUint16
- MaxTagItemCount = math.MaxUint8
- MaxTagItemSize = math.MaxUint16
-)
-
-func EventEligibleForBinaryEncoding(event *nostr.Event) bool {
- if len(event.Content) > MaxContentSize || event.Kind > MaxKind || event.CreatedAt > MaxCreatedAt || len(event.Tags) > MaxTagCount {
- return false
- }
-
- for _, tag := range event.Tags {
- if len(tag) > MaxTagItemCount {
- return false
- }
- for _, item := range tag {
- if len(item) > MaxTagItemSize {
- return false
- }
- }
- }
-
- return true
-}
diff --git a/eventstore/internal/checks/interface.go b/eventstore/internal/checks/interface.go
index 4381aa3..69ef970 100644
--- a/eventstore/internal/checks/interface.go
+++ b/eventstore/internal/checks/interface.go
@@ -4,12 +4,7 @@ import (
"fiatjaf.com/nostr/eventstore"
"fiatjaf.com/nostr/eventstore/badger"
"fiatjaf.com/nostr/eventstore/bluge"
- "fiatjaf.com/nostr/eventstore/edgedb"
"fiatjaf.com/nostr/eventstore/lmdb"
- "fiatjaf.com/nostr/eventstore/mongo"
- "fiatjaf.com/nostr/eventstore/mysql"
- "fiatjaf.com/nostr/eventstore/postgresql"
- "fiatjaf.com/nostr/eventstore/sqlite3"
"fiatjaf.com/nostr/eventstore/strfry"
)
@@ -17,11 +12,6 @@ import (
var (
_ eventstore.Store = (*badger.BadgerBackend)(nil)
_ eventstore.Store = (*lmdb.LMDBBackend)(nil)
- _ eventstore.Store = (*edgedb.EdgeDBBackend)(nil)
- _ eventstore.Store = (*postgresql.PostgresBackend)(nil)
- _ eventstore.Store = (*mongo.MongoDBBackend)(nil)
- _ eventstore.Store = (*sqlite3.SQLite3Backend)(nil)
_ eventstore.Store = (*strfry.StrfryBackend)(nil)
_ eventstore.Store = (*bluge.BlugeBackend)(nil)
- _ eventstore.Store = (*mysql.MySQLBackend)(nil)
)
diff --git a/eventstore/internal/helpers.go b/eventstore/internal/helpers.go
index 2830d5f..d661c65 100644
--- a/eventstore/internal/helpers.go
+++ b/eventstore/internal/helpers.go
@@ -1,18 +1,18 @@
package internal
import (
+ "bytes"
"cmp"
"math"
"slices"
- "strings"
mergesortedslices "fiatjaf.com/lib/merge-sorted-slices"
"fiatjaf.com/nostr"
)
-func IsOlder(previous, next *nostr.Event) bool {
+func IsOlder(previous, next nostr.Event) bool {
return previous.CreatedAt < next.CreatedAt ||
- (previous.CreatedAt == next.CreatedAt && previous.ID > next.ID)
+ (previous.CreatedAt == next.CreatedAt && bytes.Compare(previous.ID[:], next.ID[:]) == 1)
}
func ChooseNarrowestTag(filter nostr.Filter) (key string, values []string, goodness int) {
@@ -80,7 +80,7 @@ func CopyMapWithoutKey[K comparable, V any](originalMap map[K]V, key K) map[K]V
}
type IterEvent struct {
- *nostr.Event
+ nostr.Event
Q int
}
@@ -166,18 +166,18 @@ func SwapDelete[A any](arr []A, i int) []A {
}
func compareIterEvent(a, b IterEvent) int {
- if a.Event == nil {
- if b.Event == nil {
+ if a.Event.ID == nostr.ZeroID {
+ if b.Event.ID == nostr.ZeroID {
return 0
} else {
return -1
}
- } else if b.Event == nil {
+ } else if b.Event.ID == nostr.ZeroID {
return 1
}
if a.CreatedAt == b.CreatedAt {
- return strings.Compare(a.ID, b.ID)
+ return slices.Compare(a.ID[:], b.ID[:])
}
return cmp.Compare(a.CreatedAt, b.CreatedAt)
}
diff --git a/eventstore/internal/testdata/fuzz/FuzzQuery/2387982a59ec5d22 b/eventstore/internal/testdata/fuzz/FuzzQuery/2387982a59ec5d22
deleted file mode 100644
index 35c2e7c..0000000
--- a/eventstore/internal/testdata/fuzz/FuzzQuery/2387982a59ec5d22
+++ /dev/null
@@ -1,8 +0,0 @@
-go test fuzz v1
-uint(256)
-uint(31)
-uint(260)
-uint(2)
-uint(69)
-uint(385)
-uint(1)
diff --git a/eventstore/internal/testdata/fuzz/FuzzQuery/25234b78dd36a5fd b/eventstore/internal/testdata/fuzz/FuzzQuery/25234b78dd36a5fd
deleted file mode 100644
index 46eee56..0000000
--- a/eventstore/internal/testdata/fuzz/FuzzQuery/25234b78dd36a5fd
+++ /dev/null
@@ -1,8 +0,0 @@
-go test fuzz v1
-uint(267)
-uint(50)
-uint(355)
-uint(2)
-uint(69)
-uint(213)
-uint(1)
diff --git a/eventstore/internal/testdata/fuzz/FuzzQuery/35a474e7be3cdc57 b/eventstore/internal/testdata/fuzz/FuzzQuery/35a474e7be3cdc57
deleted file mode 100644
index f668119..0000000
--- a/eventstore/internal/testdata/fuzz/FuzzQuery/35a474e7be3cdc57
+++ /dev/null
@@ -1,8 +0,0 @@
-go test fuzz v1
-uint(280)
-uint(0)
-uint(13)
-uint(2)
-uint(2)
-uint(0)
-uint(0)
diff --git a/eventstore/internal/testdata/fuzz/FuzzQuery/6e88633b00eff43d b/eventstore/internal/testdata/fuzz/FuzzQuery/6e88633b00eff43d
deleted file mode 100644
index 3a4b052..0000000
--- a/eventstore/internal/testdata/fuzz/FuzzQuery/6e88633b00eff43d
+++ /dev/null
@@ -1,8 +0,0 @@
-go test fuzz v1
-uint(259)
-uint(126)
-uint(5)
-uint(23)
-uint(0)
-uint(0)
-uint(92)
diff --git a/eventstore/internal/testdata/fuzz/FuzzQuery/70a3844d6c7ec116 b/eventstore/internal/testdata/fuzz/FuzzQuery/70a3844d6c7ec116
deleted file mode 100644
index 2b67e29..0000000
--- a/eventstore/internal/testdata/fuzz/FuzzQuery/70a3844d6c7ec116
+++ /dev/null
@@ -1,8 +0,0 @@
-go test fuzz v1
-uint(201)
-uint(50)
-uint(13)
-uint(97)
-uint(0)
-uint(0)
-uint(77)
diff --git a/eventstore/internal/testdata/fuzz/FuzzQuery/98cca88a26b20e30 b/eventstore/internal/testdata/fuzz/FuzzQuery/98cca88a26b20e30
deleted file mode 100644
index 9445b8e..0000000
--- a/eventstore/internal/testdata/fuzz/FuzzQuery/98cca88a26b20e30
+++ /dev/null
@@ -1,8 +0,0 @@
-go test fuzz v1
-uint(164)
-uint(50)
-uint(13)
-uint(1)
-uint(2)
-uint(13)
-uint(0)
diff --git a/eventstore/internal/testdata/fuzz/FuzzQuery/dabb8bfe01b215a2 b/eventstore/internal/testdata/fuzz/FuzzQuery/dabb8bfe01b215a2
deleted file mode 100644
index ac26f30..0000000
--- a/eventstore/internal/testdata/fuzz/FuzzQuery/dabb8bfe01b215a2
+++ /dev/null
@@ -1,8 +0,0 @@
-go test fuzz v1
-uint(200)
-uint(50)
-uint(13)
-uint(8)
-uint(2)
-uint(0)
-uint(1)
diff --git a/eventstore/internal/testdata/fuzz/FuzzQuery/debae0ec843d23ec b/eventstore/internal/testdata/fuzz/FuzzQuery/debae0ec843d23ec
deleted file mode 100644
index 5676736..0000000
--- a/eventstore/internal/testdata/fuzz/FuzzQuery/debae0ec843d23ec
+++ /dev/null
@@ -1,8 +0,0 @@
-go test fuzz v1
-uint(200)
-uint(117)
-uint(13)
-uint(2)
-uint(2)
-uint(0)
-uint(1)
diff --git a/eventstore/internal/testdata/fuzz/FuzzQuery/f6d74a34318165c2 b/eventstore/internal/testdata/fuzz/FuzzQuery/f6d74a34318165c2
deleted file mode 100644
index ad6fd8f..0000000
--- a/eventstore/internal/testdata/fuzz/FuzzQuery/f6d74a34318165c2
+++ /dev/null
@@ -1,8 +0,0 @@
-go test fuzz v1
-uint(200)
-uint(50)
-uint(13)
-uint(2)
-uint(2)
-uint(0)
-uint(0)
diff --git a/eventstore/lmdb/count.go b/eventstore/lmdb/count.go
index b90e40c..b564c4d 100644
--- a/eventstore/lmdb/count.go
+++ b/eventstore/lmdb/count.go
@@ -2,19 +2,18 @@ package lmdb
import (
"bytes"
- "context"
"encoding/binary"
"encoding/hex"
- "github.com/PowerDNS/lmdb-go/lmdb"
- bin "fiatjaf.com/nostr/eventstore/internal/binary"
"fiatjaf.com/nostr"
+ "fiatjaf.com/nostr/eventstore/codec/betterbinary"
"fiatjaf.com/nostr/nip45"
"fiatjaf.com/nostr/nip45/hyperloglog"
+ "github.com/PowerDNS/lmdb-go/lmdb"
"golang.org/x/exp/slices"
)
-func (b *LMDBBackend) CountEvents(ctx context.Context, filter nostr.Filter) (int64, error) {
+func (b *LMDBBackend) CountEvents(filter nostr.Filter) (int64, error) {
var count int64 = 0
queries, extraAuthors, extraKinds, extraTagKey, extraTagValues, since, err := b.prepareQueries(filter)
@@ -72,7 +71,7 @@ func (b *LMDBBackend) CountEvents(ctx context.Context, filter nostr.Filter) (int
}
evt := &nostr.Event{}
- if err := bin.Unmarshal(val, evt); err != nil {
+ if err := betterbinary.Unmarshal(val, evt); err != nil {
it.next()
continue
}
@@ -94,8 +93,9 @@ func (b *LMDBBackend) CountEvents(ctx context.Context, filter nostr.Filter) (int
return count, err
}
-// CountEventsHLL is like CountEvents, but it will build a hyperloglog value while iterating through results, following NIP-45
-func (b *LMDBBackend) CountEventsHLL(ctx context.Context, filter nostr.Filter, offset int) (int64, *hyperloglog.HyperLogLog, error) {
+// CountEventsHLL is like CountEvents, but it will build a hyperloglog value while iterating through results,
+// following NIP-45
+func (b *LMDBBackend) CountEventsHLL(filter nostr.Filter, offset int) (int64, *hyperloglog.HyperLogLog, error) {
if useCache, _ := b.EnableHLLCacheFor(filter.Kinds[0]); useCache {
return b.countEventsHLLCached(filter)
}
@@ -147,7 +147,7 @@ func (b *LMDBBackend) CountEventsHLL(ctx context.Context, filter nostr.Filter, o
if extraKinds == nil && extraTagValues == nil {
// nothing extra to check
count++
- hll.AddBytes(val[32:64])
+ hll.AddBytes(nostr.PubKey(val[32:64]))
} else {
// check it against kinds without decoding the entire thing
if !slices.Contains(extraKinds, [2]byte(val[132:134])) {
@@ -156,7 +156,7 @@ func (b *LMDBBackend) CountEventsHLL(ctx context.Context, filter nostr.Filter, o
}
evt := &nostr.Event{}
- if err := bin.Unmarshal(val, evt); err != nil {
+ if err := betterbinary.Unmarshal(val, evt); err != nil {
it.next()
continue
}
@@ -211,7 +211,7 @@ func (b *LMDBBackend) countEventsHLLCached(filter nostr.Filter) (int64, *hyperlo
return count, hll, err
}
-func (b *LMDBBackend) updateHyperLogLogCachedValues(txn *lmdb.Txn, evt *nostr.Event) error {
+func (b *LMDBBackend) updateHyperLogLogCachedValues(txn *lmdb.Txn, evt nostr.Event) error {
cacheKey := make([]byte, 2+8)
binary.BigEndian.PutUint16(cacheKey[0:2], uint16(evt.Kind))
diff --git a/eventstore/lmdb/delete.go b/eventstore/lmdb/delete.go
index 1c64a32..3d85de3 100644
--- a/eventstore/lmdb/delete.go
+++ b/eventstore/lmdb/delete.go
@@ -1,29 +1,39 @@
package lmdb
import (
- "context"
- "encoding/hex"
"fmt"
- "github.com/PowerDNS/lmdb-go/lmdb"
"fiatjaf.com/nostr"
+ "fiatjaf.com/nostr/eventstore/codec/betterbinary"
+ "github.com/PowerDNS/lmdb-go/lmdb"
)
-func (b *LMDBBackend) DeleteEvent(ctx context.Context, evt *nostr.Event) error {
+func (b *LMDBBackend) DeleteEvent(id nostr.ID) error {
return b.lmdbEnv.Update(func(txn *lmdb.Txn) error {
- return b.delete(txn, evt)
+ return b.delete(txn, id)
})
}
-func (b *LMDBBackend) delete(txn *lmdb.Txn, evt *nostr.Event) error {
- idPrefix8, _ := hex.DecodeString(evt.ID[0 : 8*2])
- idx, err := txn.Get(b.indexId, idPrefix8)
+func (b *LMDBBackend) delete(txn *lmdb.Txn, id nostr.ID) error {
+ // check if we have this actually
+ idx, err := txn.Get(b.indexId, id[0:8])
if lmdb.IsNotFound(err) {
// we already do not have this
return nil
}
if err != nil {
- return fmt.Errorf("failed to get current idx for deleting %x: %w", evt.ID[0:8*2], err)
+ return fmt.Errorf("failed to get current idx for deleting %x: %w", id[0:8], err)
+ }
+
+ // if we do, get it so we can compute the indexes
+ buf, err := txn.Get(b.rawEventStore, idx)
+ if err != nil {
+ return fmt.Errorf("failed to get raw event %x to delete: %w", id, err)
+ }
+
+ var evt nostr.Event
+ if err := betterbinary.Unmarshal(buf, &evt); err != nil {
+ return fmt.Errorf("failed to unmarshal raw event %x to delete: %w", id, err)
}
// calculate all index keys we have for this event and delete them
diff --git a/eventstore/lmdb/helpers.go b/eventstore/lmdb/helpers.go
index 4ef445c..2147f49 100644
--- a/eventstore/lmdb/helpers.go
+++ b/eventstore/lmdb/helpers.go
@@ -9,8 +9,8 @@ import (
"strconv"
"strings"
- "github.com/PowerDNS/lmdb-go/lmdb"
"fiatjaf.com/nostr"
+ "github.com/PowerDNS/lmdb-go/lmdb"
"golang.org/x/exp/slices"
)
@@ -52,13 +52,11 @@ func (b *LMDBBackend) keyName(key key) string {
return fmt.Sprintf("", b.dbiName(key.dbi), key.key)
}
-func (b *LMDBBackend) getIndexKeysForEvent(evt *nostr.Event) iter.Seq[key] {
+func (b *LMDBBackend) getIndexKeysForEvent(evt nostr.Event) iter.Seq[key] {
return func(yield func(key) bool) {
{
// ~ by id
- k := make([]byte, 8)
- hex.Decode(k[0:8], []byte(evt.ID[0:8*2]))
- if !yield(key{dbi: b.indexId, key: k[0:8]}) {
+ if !yield(key{dbi: b.indexId, key: evt.ID[0:8]}) {
return
}
}
diff --git a/eventstore/lmdb/lib.go b/eventstore/lmdb/lib.go
index 6beda14..d11d562 100644
--- a/eventstore/lmdb/lib.go
+++ b/eventstore/lmdb/lib.go
@@ -6,8 +6,8 @@ import (
"os"
"sync/atomic"
- "github.com/PowerDNS/lmdb-go/lmdb"
"fiatjaf.com/nostr/eventstore"
+ "github.com/PowerDNS/lmdb-go/lmdb"
)
var _ eventstore.Store = (*LMDBBackend)(nil)
@@ -34,7 +34,7 @@ type LMDBBackend struct {
indexPTagKind lmdb.DBI
hllCache lmdb.DBI
- EnableHLLCacheFor func(kind int) (useCache bool, skipSavingActualEvent bool)
+ EnableHLLCacheFor func(kind uint16) (useCache bool, skipSavingActualEvent bool)
lastId atomic.Uint32
}
diff --git a/eventstore/lmdb/migration.go b/eventstore/lmdb/migration.go
index b3746cb..0d477b0 100644
--- a/eventstore/lmdb/migration.go
+++ b/eventstore/lmdb/migration.go
@@ -3,11 +3,8 @@ package lmdb
import (
"encoding/binary"
"fmt"
- "log"
"github.com/PowerDNS/lmdb-go/lmdb"
- bin "fiatjaf.com/nostr/eventstore/internal/binary"
- "fiatjaf.com/nostr"
)
const (
@@ -28,114 +25,18 @@ func (b *LMDBBackend) runMigrations() error {
version = binary.BigEndian.Uint16(v)
}
- // all previous migrations are useless because we will just reindex everything
- if version == 0 {
- // if there is any data in the relay we will just set the version to the max without saying anything
- cursor, err := txn.OpenCursor(b.rawEventStore)
- if err != nil {
- return fmt.Errorf("failed to open cursor in migration: %w", err)
- }
- defer cursor.Close()
-
- hasAnyEntries := false
- _, _, err = cursor.Get(nil, nil, lmdb.First)
- for err == nil {
- hasAnyEntries = true
- break
- }
-
- if !hasAnyEntries {
- b.setVersion(txn, 8)
- version = 8
- return nil
- }
- }
-
// do the migrations in increasing steps (there is no rollback)
//
// this is when we reindex everything
- if version < 8 {
- log.Println("[lmdb] migration 8: reindex everything")
-
- if err := txn.Drop(b.indexId, false); err != nil {
- return err
- }
- if err := txn.Drop(b.indexCreatedAt, false); err != nil {
- return err
- }
- if err := txn.Drop(b.indexKind, false); err != nil {
- return err
- }
- if err := txn.Drop(b.indexPTagKind, false); err != nil {
- return err
- }
- if err := txn.Drop(b.indexPubkey, false); err != nil {
- return err
- }
- if err := txn.Drop(b.indexPubkeyKind, false); err != nil {
- return err
- }
- if err := txn.Drop(b.indexTag, false); err != nil {
- return err
- }
- if err := txn.Drop(b.indexTag32, false); err != nil {
- return err
- }
- if err := txn.Drop(b.indexTagAddr, false); err != nil {
- return err
- }
-
- cursor, err := txn.OpenCursor(b.rawEventStore)
- if err != nil {
- return fmt.Errorf("failed to open cursor in migration 8: %w", err)
- }
- defer cursor.Close()
-
- seen := make(map[[32]byte]struct{})
-
- idx, val, err := cursor.Get(nil, nil, lmdb.First)
- for err == nil {
- idp := *(*[32]byte)(val[0:32])
- if _, isDup := seen[idp]; isDup {
- // do not index, but delete this entry
- if err := txn.Del(b.rawEventStore, idx, nil); err != nil {
- return err
- }
-
- // next
- idx, val, err = cursor.Get(nil, nil, lmdb.Next)
- continue
- }
-
- seen[idp] = struct{}{}
-
- evt := &nostr.Event{}
- if err := bin.Unmarshal(val, evt); err != nil {
- return fmt.Errorf("error decoding event %x on migration 5: %w", idx, err)
- }
-
- for key := range b.getIndexKeysForEvent(evt) {
- if err := txn.Put(key.dbi, key.key, idx, 0); err != nil {
- return fmt.Errorf("failed to save index %s for event %s (%v) on migration 8: %w",
- b.keyName(key), evt.ID, idx, err)
- }
- }
-
- // next
- idx, val, err = cursor.Get(nil, nil, lmdb.Next)
- }
- if lmdbErr, ok := err.(*lmdb.OpError); ok && lmdbErr.Errno != lmdb.NotFound {
- // exited the loop with an error different from NOTFOUND
- return err
- }
-
- // bump version
- if err := b.setVersion(txn, 8); err != nil {
- return err
- }
+ if version < 1 {
}
+ // bump version
+ // if err := b.setVersion(txn, 1); err != nil {
+ // return err
+ // }
+
return nil
})
}
diff --git a/eventstore/lmdb/query.go b/eventstore/lmdb/query.go
index c657417..8bab417 100644
--- a/eventstore/lmdb/query.go
+++ b/eventstore/lmdb/query.go
@@ -2,59 +2,49 @@ package lmdb
import (
"bytes"
- "context"
"encoding/binary"
"fmt"
+ "iter"
"log"
"slices"
- "github.com/PowerDNS/lmdb-go/lmdb"
- "fiatjaf.com/nostr/eventstore"
- "fiatjaf.com/nostr/eventstore/internal"
- bin "fiatjaf.com/nostr/eventstore/internal/binary"
"fiatjaf.com/nostr"
+ "fiatjaf.com/nostr/eventstore/codec/betterbinary"
+ "fiatjaf.com/nostr/eventstore/internal"
+ "github.com/PowerDNS/lmdb-go/lmdb"
)
-func (b *LMDBBackend) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) {
- ch := make(chan *nostr.Event)
-
- if filter.Search != "" {
- close(ch)
- return ch, nil
- }
-
- // max number of events we'll return
- maxLimit := b.MaxLimit
- var limit int
- if eventstore.IsNegentropySession(ctx) {
- maxLimit = b.MaxLimitNegentropy
- limit = maxLimit
- } else {
- limit = maxLimit / 4
- }
- if filter.Limit > 0 && filter.Limit <= maxLimit {
- limit = filter.Limit
- }
- if tlimit := nostr.GetTheoreticalLimit(filter); tlimit == 0 {
- close(ch)
- return ch, nil
- } else if tlimit > 0 {
- limit = tlimit
- }
-
- go b.lmdbEnv.View(func(txn *lmdb.Txn) error {
- txn.RawRead = true
- defer close(ch)
- results, err := b.query(txn, filter, limit)
-
- for _, ie := range results {
- ch <- ie.Event
+func (b *LMDBBackend) QueryEvents(filter nostr.Filter) iter.Seq[nostr.Event] {
+ return func(yield func(nostr.Event) bool) {
+ if filter.Search != "" {
+ return
}
- return err
- })
+ // max number of events we'll return
+ var limit int
+ limit = b.MaxLimit / 4
+ if filter.Limit > 0 && filter.Limit <= b.MaxLimit {
+ limit = filter.Limit
+ }
+ if tlimit := nostr.GetTheoreticalLimit(filter); tlimit == 0 {
+ return
+ } else if tlimit > 0 {
+ limit = tlimit
+ }
- return ch, nil
+ b.lmdbEnv.View(func(txn *lmdb.Txn) error {
+ txn.RawRead = true
+ results, err := b.query(txn, filter, limit)
+
+ for _, ie := range results {
+ if !yield(ie.Event) {
+ break
+ }
+ }
+
+ return err
+ })
+ }
}
func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]internal.IterEvent, error) {
@@ -73,16 +63,16 @@ func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]in
// we will continue to pull from it as soon as some other iterator takes the position
oldest := internal.IterEvent{Q: -1}
- secondPhase := false // after we have gathered enough events we will change the way we iterate
+ sndPhase := false // after we have gathered enough events we will change the way we iterate
secondBatch := make([][]internal.IterEvent, 0, len(queries)+1)
- secondPhaseParticipants := make([]int, 0, len(queries)+1)
+ sndPhaseParticipants := make([]int, 0, len(queries)+1)
// while merging results in the second phase we will alternate between these two lists
// to avoid having to create new lists all the time
- var secondPhaseResultsA []internal.IterEvent
- var secondPhaseResultsB []internal.IterEvent
- var secondPhaseResultsToggle bool // this is just a dummy thing we use to keep track of the alternating
- var secondPhaseHasResultsPending bool
+ var sndPhaseResultsA []internal.IterEvent
+ var sndPhaseResultsB []internal.IterEvent
+ var sndPhaseResultsToggle bool // this is just a dummy thing we use to keep track of the alternating
+ var sndPhaseHasResultsPending bool
remainingUnexhausted := len(queries) // when all queries are exhausted we can finally end this thing
batchSizePerQuery := internal.BatchSizePerNumberOfQueries(limit, remainingUnexhausted)
@@ -171,8 +161,8 @@ func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]in
}
// decode the entire thing
- event := &nostr.Event{}
- if err := bin.Unmarshal(val, event); err != nil {
+ event := nostr.Event{}
+ if err := betterbinary.Unmarshal(val, &event); err != nil {
log.Printf("lmdb: value read error (id %x) on query prefix %x sp %x dbi %d: %s\n", val[0:32],
query.prefix, query.startingPoint, query.dbi, err)
return nil, fmt.Errorf("event read error: %w", err)
@@ -190,18 +180,18 @@ func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]in
evt := internal.IterEvent{Event: event, Q: q}
//
//
- if secondPhase {
+ if sndPhase {
// do the process described below at HIWAWVRTP.
// if we've reached here this means we've already passed the `since` check.
// now we have to eliminate the event currently at the `since` threshold.
nextThreshold := firstPhaseResults[len(firstPhaseResults)-2]
- if oldest.Event == nil {
+ if oldest.Event.ID == nostr.ZeroID {
// fmt.Println(" b1", evt.ID[0:8])
// BRANCH WHEN WE DON'T HAVE THE OLDEST EVENT (BWWDHTOE)
// when we don't have the oldest set, we will keep the results
// and not change the cutting point -- it's bad, but hopefully not that bad.
results[q] = append(results[q], evt)
- secondPhaseHasResultsPending = true
+ sndPhaseHasResultsPending = true
} else if nextThreshold.CreatedAt > oldest.CreatedAt {
// fmt.Println(" b2", nextThreshold.CreatedAt, ">", oldest.CreatedAt, evt.ID[0:8])
// one of the events we have stored is the actual next threshold
@@ -218,7 +208,7 @@ func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]in
// finally
// add this to the results to be merged later
results[q] = append(results[q], evt)
- secondPhaseHasResultsPending = true
+ sndPhaseHasResultsPending = true
} else if nextThreshold.CreatedAt < evt.CreatedAt {
// the next last event in the firstPhaseResults is the next threshold
// fmt.Println(" b3", nextThreshold.CreatedAt, "<", oldest.CreatedAt, evt.ID[0:8])
@@ -228,7 +218,7 @@ func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]in
// fmt.Println(" new since", since)
// add this to the results to be merged later
results[q] = append(results[q], evt)
- secondPhaseHasResultsPending = true
+ sndPhaseHasResultsPending = true
// update the oldest event
if evt.CreatedAt < oldest.CreatedAt {
oldest = evt
@@ -247,7 +237,7 @@ func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]in
firstPhaseTotalPulled++
// update the oldest event
- if oldest.Event == nil || evt.CreatedAt < oldest.CreatedAt {
+ if oldest.Event.ID == nostr.ZeroID || evt.CreatedAt < oldest.CreatedAt {
oldest = evt
}
}
@@ -273,20 +263,20 @@ func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]in
// we will do this check if we don't accumulated the requested number of events yet
// fmt.Println("oldest", oldest.Event, "from iter", oldest.Q)
- if secondPhase && secondPhaseHasResultsPending && (oldest.Event == nil || remainingUnexhausted == 0) {
+ if sndPhase && sndPhaseHasResultsPending && (oldest.Event.ID == nostr.ZeroID || remainingUnexhausted == 0) {
// fmt.Println("second phase aggregation!")
// when we are in the second phase we will aggressively aggregate results on every iteration
//
secondBatch = secondBatch[:0]
- for s := 0; s < len(secondPhaseParticipants); s++ {
- q := secondPhaseParticipants[s]
+ for s := 0; s < len(sndPhaseParticipants); s++ {
+ q := sndPhaseParticipants[s]
if len(results[q]) > 0 {
secondBatch = append(secondBatch, results[q])
}
if exhausted[q] {
- secondPhaseParticipants = internal.SwapDelete(secondPhaseParticipants, s)
+ sndPhaseParticipants = internal.SwapDelete(sndPhaseParticipants, s)
s--
}
}
@@ -294,29 +284,29 @@ func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]in
// every time we get here we will alternate between these A and B lists
// combining everything we have into a new partial results list.
// after we've done that we can again set the oldest.
- // fmt.Println(" xxx", secondPhaseResultsToggle)
- if secondPhaseResultsToggle {
- secondBatch = append(secondBatch, secondPhaseResultsB)
- secondPhaseResultsA = internal.MergeSortMultiple(secondBatch, limit, secondPhaseResultsA)
- oldest = secondPhaseResultsA[len(secondPhaseResultsA)-1]
- // fmt.Println(" new aggregated a", len(secondPhaseResultsB))
+ // fmt.Println(" xxx", sndPhaseResultsToggle)
+ if sndPhaseResultsToggle {
+ secondBatch = append(secondBatch, sndPhaseResultsB)
+ sndPhaseResultsA = internal.MergeSortMultiple(secondBatch, limit, sndPhaseResultsA)
+ oldest = sndPhaseResultsA[len(sndPhaseResultsA)-1]
+ // fmt.Println(" new aggregated a", len(sndPhaseResultsB))
} else {
- secondBatch = append(secondBatch, secondPhaseResultsA)
- secondPhaseResultsB = internal.MergeSortMultiple(secondBatch, limit, secondPhaseResultsB)
- oldest = secondPhaseResultsB[len(secondPhaseResultsB)-1]
- // fmt.Println(" new aggregated b", len(secondPhaseResultsB))
+ secondBatch = append(secondBatch, sndPhaseResultsA)
+ sndPhaseResultsB = internal.MergeSortMultiple(secondBatch, limit, sndPhaseResultsB)
+ oldest = sndPhaseResultsB[len(sndPhaseResultsB)-1]
+ // fmt.Println(" new aggregated b", len(sndPhaseResultsB))
}
- secondPhaseResultsToggle = !secondPhaseResultsToggle
+ sndPhaseResultsToggle = !sndPhaseResultsToggle
since = uint32(oldest.CreatedAt)
// fmt.Println(" new since", since)
// reset the `results` list so we can keep using it
results = results[:len(queries)]
- for _, q := range secondPhaseParticipants {
+ for _, q := range sndPhaseParticipants {
results[q] = results[q][:0]
}
- } else if !secondPhase && firstPhaseTotalPulled >= limit && remainingUnexhausted > 0 {
+ } else if !sndPhase && firstPhaseTotalPulled >= limit && remainingUnexhausted > 0 {
// fmt.Println("have enough!", firstPhaseTotalPulled, "/", limit, "remaining", remainingUnexhausted)
// we will exclude this oldest number as it is not relevant anymore
@@ -360,16 +350,16 @@ func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]in
results[q] = results[q][:0]
// build this index of indexes with everybody who remains
- secondPhaseParticipants = append(secondPhaseParticipants, q)
+ sndPhaseParticipants = append(sndPhaseParticipants, q)
}
// we create these two lists and alternate between them so we don't have to create a
// a new one every time
- secondPhaseResultsA = make([]internal.IterEvent, 0, limit*2)
- secondPhaseResultsB = make([]internal.IterEvent, 0, limit*2)
+ sndPhaseResultsA = make([]internal.IterEvent, 0, limit*2)
+ sndPhaseResultsB = make([]internal.IterEvent, 0, limit*2)
// from now on we won't run this block anymore
- secondPhase = true
+ sndPhase = true
}
// fmt.Println("remaining", remainingUnexhausted)
@@ -378,27 +368,27 @@ func (b *LMDBBackend) query(txn *lmdb.Txn, filter nostr.Filter, limit int) ([]in
}
}
- // fmt.Println("is secondPhase?", secondPhase)
+ // fmt.Println("is sndPhase?", sndPhase)
var combinedResults []internal.IterEvent
- if secondPhase {
+ if sndPhase {
// fmt.Println("ending second phase")
- // when we reach this point either secondPhaseResultsA or secondPhaseResultsB will be full of stuff,
+ // when we reach this point either sndPhaseResultsA or sndPhaseResultsB will be full of stuff,
// the other will be empty
- var secondPhaseResults []internal.IterEvent
- // fmt.Println("xxx", secondPhaseResultsToggle, len(secondPhaseResultsA), len(secondPhaseResultsB))
- if secondPhaseResultsToggle {
- secondPhaseResults = secondPhaseResultsB
- combinedResults = secondPhaseResultsA[0:limit] // reuse this
- // fmt.Println(" using b", len(secondPhaseResultsA))
+ var sndPhaseResults []internal.IterEvent
+ // fmt.Println("xxx", sndPhaseResultsToggle, len(sndPhaseResultsA), len(sndPhaseResultsB))
+ if sndPhaseResultsToggle {
+ sndPhaseResults = sndPhaseResultsB
+ combinedResults = sndPhaseResultsA[0:limit] // reuse this
+ // fmt.Println(" using b", len(sndPhaseResultsA))
} else {
- secondPhaseResults = secondPhaseResultsA
- combinedResults = secondPhaseResultsB[0:limit] // reuse this
- // fmt.Println(" using a", len(secondPhaseResultsA))
+ sndPhaseResults = sndPhaseResultsA
+ combinedResults = sndPhaseResultsB[0:limit] // reuse this
+ // fmt.Println(" using a", len(sndPhaseResultsA))
}
- all := [][]internal.IterEvent{firstPhaseResults, secondPhaseResults}
+ all := [][]internal.IterEvent{firstPhaseResults, sndPhaseResults}
combinedResults = internal.MergeSortMultiple(all, limit, combinedResults)
// fmt.Println("final combinedResults", len(combinedResults), cap(combinedResults), limit)
} else {
diff --git a/eventstore/lmdb/replace.go b/eventstore/lmdb/replace.go
index 27aa834..f527212 100644
--- a/eventstore/lmdb/replace.go
+++ b/eventstore/lmdb/replace.go
@@ -1,23 +1,22 @@
package lmdb
import (
- "context"
"fmt"
"math"
- "github.com/PowerDNS/lmdb-go/lmdb"
- "fiatjaf.com/nostr/eventstore/internal"
"fiatjaf.com/nostr"
+ "fiatjaf.com/nostr/eventstore/internal"
+ "github.com/PowerDNS/lmdb-go/lmdb"
)
-func (b *LMDBBackend) ReplaceEvent(ctx context.Context, evt *nostr.Event) error {
+func (b *LMDBBackend) ReplaceEvent(evt nostr.Event) error {
// sanity checking
if evt.CreatedAt > math.MaxUint32 || evt.Kind > math.MaxUint16 {
return fmt.Errorf("event with values out of expected boundaries")
}
return b.lmdbEnv.Update(func(txn *lmdb.Txn) error {
- filter := nostr.Filter{Limit: 1, Kinds: []int{evt.Kind}, Authors: []string{evt.PubKey}}
+ filter := nostr.Filter{Limit: 1, Kinds: []uint16{evt.Kind}, Authors: []nostr.PubKey{evt.PubKey}}
if nostr.IsAddressableKind(evt.Kind) {
// when addressable, add the "d" tag to the filter
filter.Tags = nostr.TagMap{"d": []string{evt.Tags.GetD()}}
diff --git a/eventstore/lmdb/save.go b/eventstore/lmdb/save.go
index 80a0382..703fcbb 100644
--- a/eventstore/lmdb/save.go
+++ b/eventstore/lmdb/save.go
@@ -1,18 +1,16 @@
package lmdb
import (
- "context"
- "encoding/hex"
"fmt"
"math"
- "github.com/PowerDNS/lmdb-go/lmdb"
+ "fiatjaf.com/nostr"
"fiatjaf.com/nostr/eventstore"
bin "fiatjaf.com/nostr/eventstore/internal/binary"
- "fiatjaf.com/nostr"
+ "github.com/PowerDNS/lmdb-go/lmdb"
)
-func (b *LMDBBackend) SaveEvent(ctx context.Context, evt *nostr.Event) error {
+func (b *LMDBBackend) SaveEvent(evt nostr.Event) error {
// sanity checking
if evt.CreatedAt > math.MaxUint32 || evt.Kind > math.MaxUint16 {
return fmt.Errorf("event with values out of expected boundaries")
@@ -35,8 +33,7 @@ func (b *LMDBBackend) SaveEvent(ctx context.Context, evt *nostr.Event) error {
}
// check if we already have this id
- id, _ := hex.DecodeString(evt.ID)
- _, err := txn.Get(b.indexId, id)
+ _, err := txn.Get(b.indexId, evt.ID[0:8])
if operr, ok := err.(*lmdb.OpError); ok && operr.Errno != lmdb.NotFound {
// we will only proceed if we get a NotFound
return eventstore.ErrDupEvent
@@ -46,7 +43,7 @@ func (b *LMDBBackend) SaveEvent(ctx context.Context, evt *nostr.Event) error {
})
}
-func (b *LMDBBackend) save(txn *lmdb.Txn, evt *nostr.Event) error {
+func (b *LMDBBackend) save(txn *lmdb.Txn, evt nostr.Event) error {
// encode to binary form so we'll save it
bin, err := bin.Marshal(evt)
if err != nil {
diff --git a/eventstore/negentropy.go b/eventstore/negentropy.go
deleted file mode 100644
index bbcb8d3..0000000
--- a/eventstore/negentropy.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package eventstore
-
-import "context"
-
-var negentropySessionKey = struct{}{}
-
-func IsNegentropySession(ctx context.Context) bool {
- return ctx.Value(negentropySessionKey) != nil
-}
-
-func SetNegentropy(ctx context.Context) context.Context {
- return context.WithValue(ctx, negentropySessionKey, struct{}{})
-}
diff --git a/eventstore/slicestore/lib.go b/eventstore/slicestore/lib.go
index f452e33..759b4e6 100644
--- a/eventstore/slicestore/lib.go
+++ b/eventstore/slicestore/lib.go
@@ -1,14 +1,15 @@
package slicestore
import (
- "context"
+ "bytes"
+ "cmp"
"fmt"
- "strings"
+ "iter"
"sync"
+ "fiatjaf.com/nostr"
"fiatjaf.com/nostr/eventstore"
"fiatjaf.com/nostr/eventstore/internal"
- "fiatjaf.com/nostr"
"golang.org/x/exp/slices"
)
@@ -16,13 +17,13 @@ var _ eventstore.Store = (*SliceStore)(nil)
type SliceStore struct {
sync.Mutex
- internal []*nostr.Event
+ internal []nostr.Event
MaxLimit int
}
func (b *SliceStore) Init() error {
- b.internal = make([]*nostr.Event, 0, 5000)
+ b.internal = make([]nostr.Event, 0, 5000)
if b.MaxLimit == 0 {
b.MaxLimit = 500
}
@@ -31,50 +32,44 @@ func (b *SliceStore) Init() error {
func (b *SliceStore) Close() {}
-func (b *SliceStore) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) {
- ch := make(chan *nostr.Event)
- if filter.Limit > b.MaxLimit || (filter.Limit == 0 && !filter.LimitZero) {
- filter.Limit = b.MaxLimit
- }
+func (b *SliceStore) QueryEvents(filter nostr.Filter) iter.Seq[nostr.Event] {
+ return func(yield func(nostr.Event) bool) {
+ if filter.Limit > b.MaxLimit || (filter.Limit == 0 && !filter.LimitZero) {
+ filter.Limit = b.MaxLimit
+ }
- // efficiently determine where to start and end
- start := 0
- end := len(b.internal)
- if filter.Until != nil {
- start, _ = slices.BinarySearchFunc(b.internal, *filter.Until, eventTimestampComparator)
- }
- if filter.Since != nil {
- end, _ = slices.BinarySearchFunc(b.internal, *filter.Since, eventTimestampComparator)
- }
+ // efficiently determine where to start and end
+ start := 0
+ end := len(b.internal)
+ if filter.Until != nil {
+ start, _ = slices.BinarySearchFunc(b.internal, *filter.Until, eventTimestampComparator)
+ }
+ if filter.Since != nil {
+ end, _ = slices.BinarySearchFunc(b.internal, *filter.Since, eventTimestampComparator)
+ }
- // ham
- if end < start {
- close(ch)
- return ch, nil
- }
+ // ham
+ if end < start {
+ return
+ }
- count := 0
- go func() {
+ count := 0
for _, event := range b.internal[start:end] {
if count == filter.Limit {
break
}
if filter.Matches(event) {
- select {
- case ch <- event:
- case <-ctx.Done():
+ if !yield(event) {
return
}
count++
}
}
- close(ch)
- }()
- return ch, nil
+ }
}
-func (b *SliceStore) CountEvents(ctx context.Context, filter nostr.Filter) (int64, error) {
+func (b *SliceStore) CountEvents(filter nostr.Filter) (int64, error) {
var val int64
for _, event := range b.internal {
if filter.Matches(event) {
@@ -84,7 +79,7 @@ func (b *SliceStore) CountEvents(ctx context.Context, filter nostr.Filter) (int6
return val, nil
}
-func (b *SliceStore) SaveEvent(ctx context.Context, evt *nostr.Event) error {
+func (b *SliceStore) SaveEvent(evt nostr.Event) error {
idx, found := slices.BinarySearchFunc(b.internal, evt, eventComparator)
if found {
return eventstore.ErrDupEvent
@@ -97,8 +92,8 @@ func (b *SliceStore) SaveEvent(ctx context.Context, evt *nostr.Event) error {
return nil
}
-func (b *SliceStore) DeleteEvent(ctx context.Context, evt *nostr.Event) error {
- idx, found := slices.BinarySearchFunc(b.internal, evt, eventComparator)
+func (b *SliceStore) DeleteEvent(id nostr.ID) error {
+ idx, found := slices.BinarySearchFunc(b.internal, id, eventIDComparator)
if !found {
// we don't have this event
return nil
@@ -110,24 +105,19 @@ func (b *SliceStore) DeleteEvent(ctx context.Context, evt *nostr.Event) error {
return nil
}
-func (b *SliceStore) ReplaceEvent(ctx context.Context, evt *nostr.Event) error {
+func (b *SliceStore) ReplaceEvent(evt nostr.Event) error {
b.Lock()
defer b.Unlock()
- filter := nostr.Filter{Limit: 1, Kinds: []int{evt.Kind}, Authors: []string{evt.PubKey}}
+ filter := nostr.Filter{Limit: 1, Kinds: []uint16{evt.Kind}, Authors: []nostr.PubKey{evt.PubKey}}
if nostr.IsAddressableKind(evt.Kind) {
filter.Tags = nostr.TagMap{"d": []string{evt.Tags.GetD()}}
}
- ch, err := b.QueryEvents(ctx, filter)
- if err != nil {
- return fmt.Errorf("failed to query before replacing: %w", err)
- }
-
shouldStore := true
- for previous := range ch {
+ for previous := range b.QueryEvents(filter) {
if internal.IsOlder(previous, evt) {
- if err := b.DeleteEvent(ctx, previous); err != nil {
+ if err := b.DeleteEvent(previous.ID); err != nil {
return fmt.Errorf("failed to delete event for replacing: %w", err)
}
} else {
@@ -136,7 +126,7 @@ func (b *SliceStore) ReplaceEvent(ctx context.Context, evt *nostr.Event) error {
}
if shouldStore {
- if err := b.SaveEvent(ctx, evt); err != nil && err != eventstore.ErrDupEvent {
+ if err := b.SaveEvent(evt); err != nil && err != eventstore.ErrDupEvent {
return fmt.Errorf("failed to save: %w", err)
}
}
@@ -144,14 +134,18 @@ func (b *SliceStore) ReplaceEvent(ctx context.Context, evt *nostr.Event) error {
return nil
}
-func eventTimestampComparator(e *nostr.Event, t nostr.Timestamp) int {
+func eventTimestampComparator(e nostr.Event, t nostr.Timestamp) int {
return int(t) - int(e.CreatedAt)
}
-func eventComparator(a *nostr.Event, b *nostr.Event) int {
- c := int(b.CreatedAt) - int(a.CreatedAt)
+func eventIDComparator(e nostr.Event, i nostr.ID) int {
+ return bytes.Compare(i[:], e.ID[:])
+}
+
+func eventComparator(a nostr.Event, b nostr.Event) int {
+ c := cmp.Compare(b.CreatedAt, a.CreatedAt)
if c != 0 {
return c
}
- return strings.Compare(b.ID, a.ID)
+ return bytes.Compare(b.ID[:], a.ID[:])
}
diff --git a/eventstore/slicestore/slicestore_test.go b/eventstore/slicestore/slicestore_test.go
index cd7a646..d89741d 100644
--- a/eventstore/slicestore/slicestore_test.go
+++ b/eventstore/slicestore/slicestore_test.go
@@ -1,14 +1,12 @@
package slicestore
import (
- "context"
"testing"
"fiatjaf.com/nostr"
)
func TestBasicStuff(t *testing.T) {
- ctx := context.Background()
ss := &SliceStore{}
ss.Init()
defer ss.Close()
@@ -22,12 +20,11 @@ func TestBasicStuff(t *testing.T) {
if i%3 == 0 {
kind = 12
}
- ss.SaveEvent(ctx, &nostr.Event{CreatedAt: nostr.Timestamp(v), Kind: kind})
+ ss.SaveEvent(nostr.Event{CreatedAt: nostr.Timestamp(v), Kind: uint16(kind)})
}
- ch, _ := ss.QueryEvents(ctx, nostr.Filter{})
- list := make([]*nostr.Event, 0, 20)
- for event := range ch {
+ list := make([]nostr.Event, 0, 20)
+ for event := range ss.QueryEvents(nostr.Filter{}) {
list = append(list, event)
}
@@ -39,9 +36,8 @@ func TestBasicStuff(t *testing.T) {
}
until := nostr.Timestamp(9999)
- ch, _ = ss.QueryEvents(ctx, nostr.Filter{Limit: 15, Until: &until, Kinds: []int{11}})
- list = make([]*nostr.Event, 0, 7)
- for event := range ch {
+ list = make([]nostr.Event, 0, 7)
+ for event := range ss.QueryEvents(nostr.Filter{Limit: 15, Until: &until, Kinds: []uint16{11}}) {
list = append(list, event)
}
if len(list) != 7 {
@@ -49,9 +45,8 @@ func TestBasicStuff(t *testing.T) {
}
since := nostr.Timestamp(10009)
- ch, _ = ss.QueryEvents(ctx, nostr.Filter{Since: &since})
- list = make([]*nostr.Event, 0, 5)
- for event := range ch {
+ list = make([]nostr.Event, 0, 5)
+ for event := range ss.QueryEvents(nostr.Filter{Since: &since}) {
list = append(list, event)
}
if len(list) != 5 {
diff --git a/eventstore/store.go b/eventstore/store.go
index 6877c4b..077da8f 100644
--- a/eventstore/store.go
+++ b/eventstore/store.go
@@ -1,7 +1,7 @@
package eventstore
import (
- "context"
+ "iter"
"fiatjaf.com/nostr"
)
@@ -15,18 +15,19 @@ type Store interface {
// Close must be called after you're done using the store, to free up resources and so on.
Close()
- // QueryEvents should return a channel with the events as they're recovered from a database.
- // the channel should be closed after the events are all delivered.
- QueryEvents(context.Context, nostr.Filter) (chan *nostr.Event, error)
- // DeleteEvent just deletes an event, no side-effects.
- DeleteEvent(context.Context, *nostr.Event) error
+ // QueryEvents returns events that match the filter
+ QueryEvents(nostr.Filter) iter.Seq[nostr.Event]
+
+ // DeleteEvent deletes an event atomically by ID
+ DeleteEvent(nostr.ID) error
+
// SaveEvent just saves an event, no side-effects.
- SaveEvent(context.Context, *nostr.Event) error
+ SaveEvent(nostr.Event) error
+
// ReplaceEvent atomically replaces a replaceable or addressable event.
// Conceptually it is like a Query->Delete->Save, but streamlined.
- ReplaceEvent(context.Context, *nostr.Event) error
-}
+ ReplaceEvent(nostr.Event) error
-type Counter interface {
- CountEvents(context.Context, nostr.Filter) (int64, error)
+ // CountEvents counts all events that match a given filter
+ CountEvents(nostr.Filter) (int64, error)
}
diff --git a/eventstore/test/db_test.go b/eventstore/test/db_test.go
index 90db4c3..4b307e2 100644
--- a/eventstore/test/db_test.go
+++ b/eventstore/test/db_test.go
@@ -5,13 +5,10 @@ import (
"os"
"testing"
- embeddedpostgres "github.com/fergusstrange/embedded-postgres"
"fiatjaf.com/nostr/eventstore"
"fiatjaf.com/nostr/eventstore/badger"
"fiatjaf.com/nostr/eventstore/lmdb"
- "fiatjaf.com/nostr/eventstore/postgresql"
"fiatjaf.com/nostr/eventstore/slicestore"
- "fiatjaf.com/nostr/eventstore/sqlite3"
)
const (
@@ -51,27 +48,3 @@ func TestBadger(t *testing.T) {
t.Run(test.name, func(t *testing.T) { test.run(t, &badger.BadgerBackend{Path: dbpath + "badger"}) })
}
}
-
-func TestSQLite(t *testing.T) {
- for _, test := range tests {
- os.RemoveAll(dbpath + "sqlite")
- t.Run(test.name, func(t *testing.T) {
- test.run(t, &sqlite3.SQLite3Backend{DatabaseURL: dbpath + "sqlite", QueryLimit: 1000, QueryTagsLimit: 50, QueryAuthorsLimit: 2000})
- })
- }
-}
-
-func TestPostgres(t *testing.T) {
- for _, test := range tests {
- postgres := embeddedpostgres.NewDatabase()
- err := postgres.Start()
- if err != nil {
- t.Fatalf("failed to start embedded postgres: %s", err)
- return
- }
- t.Run(test.name, func(t *testing.T) {
- test.run(t, &postgresql.PostgresBackend{DatabaseURL: "postgres://postgres:postgres@localhost:5432/postgres?sslmode=disable", QueryLimit: 1000, QueryTagsLimit: 50, QueryAuthorsLimit: 2000})
- })
- postgres.Stop()
- }
-}
diff --git a/filter.go b/filter.go
index 6d15c1d..8c5edf6 100644
--- a/filter.go
+++ b/filter.go
@@ -27,7 +27,7 @@ func (ef Filter) String() string {
return string(j)
}
-func (ef Filter) Matches(event *Event) bool {
+func (ef Filter) Matches(event Event) bool {
if !ef.MatchesIgnoringTimestampConstraints(event) {
return false
}
@@ -43,11 +43,7 @@ func (ef Filter) Matches(event *Event) bool {
return true
}
-func (ef Filter) MatchesIgnoringTimestampConstraints(event *Event) bool {
- if event == nil {
- return false
- }
-
+func (ef Filter) MatchesIgnoringTimestampConstraints(event Event) bool {
if ef.IDs != nil && !slices.Contains(ef.IDs, event.ID) {
return false
}
diff --git a/interface.go b/interface.go
deleted file mode 100644
index 7149d2b..0000000
--- a/interface.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package nostr
-
-import (
- "context"
- "errors"
- "slices"
-)
-
-type RelayStore interface {
- Publish(context.Context, Event) error
- QueryEvents(context.Context, Filter) (chan *Event, error)
- QuerySync(context.Context, Filter) ([]*Event, error)
-}
-
-var (
- _ RelayStore = (*Relay)(nil)
- _ RelayStore = (*MultiStore)(nil)
-)
-
-type MultiStore []RelayStore
-
-func (multi MultiStore) Publish(ctx context.Context, event Event) error {
- errs := make([]error, len(multi))
- for i, s := range multi {
- errs[i] = s.Publish(ctx, event)
- }
- return errors.Join(errs...)
-}
-
-func (multi MultiStore) QueryEvents(ctx context.Context, filter Filter) (chan *Event, error) {
- multich := make(chan *Event)
-
- errs := make([]error, len(multi))
- var good bool
- for i, s := range multi {
- ch, err := s.QueryEvents(ctx, filter)
- errs[i] = err
- if err == nil {
- good = true
- go func(ch chan *Event) {
- for evt := range ch {
- multich <- evt
- }
- }(ch)
- }
- }
-
- if good {
- return multich, nil
- } else {
- return nil, errors.Join(errs...)
- }
-}
-
-func (multi MultiStore) QuerySync(ctx context.Context, filter Filter) ([]*Event, error) {
- errs := make([]error, len(multi))
- events := make([]*Event, 0, max(filter.Limit, 250))
- for i, s := range multi {
- res, err := s.QuerySync(ctx, filter)
- errs[i] = err
- events = append(events, res...)
- }
- slices.SortFunc(events, func(a, b *Event) int {
- if b.CreatedAt > a.CreatedAt {
- return 1
- } else if b.CreatedAt < a.CreatedAt {
- return -1
- }
- return 0
- })
- return events, errors.Join(errs...)
-}
diff --git a/keyer/bunker.go b/keyer/bunker.go
index 5f21391..1c8d625 100644
--- a/keyer/bunker.go
+++ b/keyer/bunker.go
@@ -25,12 +25,12 @@ func NewBunkerSignerFromBunkerClient(bc *nip46.BunkerClient) BunkerSigner {
// GetPublicKey retrieves the public key from the remote bunker.
// It uses a timeout to prevent hanging indefinitely.
-func (bs BunkerSigner) GetPublicKey(ctx context.Context) (string, error) {
+func (bs BunkerSigner) GetPublicKey(ctx context.Context) (nostr.PubKey, error) {
ctx, cancel := context.WithTimeoutCause(ctx, time.Second*30, errors.New("get_public_key took too long"))
defer cancel()
pk, err := bs.bunker.GetPublicKey(ctx)
if err != nil {
- return "", err
+ return nostr.ZeroPK, err
}
return pk, nil
}
@@ -44,11 +44,11 @@ func (bs BunkerSigner) SignEvent(ctx context.Context, evt *nostr.Event) error {
}
// Encrypt encrypts a plaintext message for a recipient using the remote bunker.
-func (bs BunkerSigner) Encrypt(ctx context.Context, plaintext string, recipient string) (string, error) {
+func (bs BunkerSigner) Encrypt(ctx context.Context, plaintext string, recipient nostr.PubKey) (string, error) {
return bs.bunker.NIP44Encrypt(ctx, recipient, plaintext)
}
// Decrypt decrypts a base64-encoded ciphertext from a sender using the remote bunker.
-func (bs BunkerSigner) Decrypt(ctx context.Context, base64ciphertext string, sender string) (plaintext string, err error) {
+func (bs BunkerSigner) Decrypt(ctx context.Context, base64ciphertext string, sender nostr.PubKey) (plaintext string, err error) {
return bs.bunker.NIP44Encrypt(ctx, sender, base64ciphertext)
}
diff --git a/keyer/encrypted.go b/keyer/encrypted.go
index 99d592e..8d5fece 100644
--- a/keyer/encrypted.go
+++ b/keyer/encrypted.go
@@ -16,26 +16,23 @@ var _ nostr.Keyer = (*EncryptedKeySigner)(nil)
// when needed for operations.
type EncryptedKeySigner struct {
ncryptsec string
- pk string
+ pk nostr.PubKey
callback func(context.Context) string
}
// GetPublicKey returns the public key associated with this signer.
// If the public key is not cached, it will decrypt the private key using the password
// callback to derive the public key.
-func (es *EncryptedKeySigner) GetPublicKey(ctx context.Context) (string, error) {
- if es.pk != "" {
+func (es *EncryptedKeySigner) GetPublicKey(ctx context.Context) (nostr.PubKey, error) {
+ if es.pk != nostr.ZeroPK {
return es.pk, nil
}
password := es.callback(ctx)
key, err := nip49.Decrypt(es.ncryptsec, password)
if err != nil {
- return "", err
- }
- pk, err := nostr.GetPublicKey(key)
- if err != nil {
- return "", err
+ return nostr.ZeroPK, err
}
+ pk := nostr.GetPublicKey(key)
es.pk = pk
return pk, nil
}
@@ -54,7 +51,7 @@ func (es *EncryptedKeySigner) SignEvent(ctx context.Context, evt *nostr.Event) e
// Encrypt encrypts a plaintext message for a recipient using NIP-44.
// It first decrypts the private key using the password callback.
-func (es EncryptedKeySigner) Encrypt(ctx context.Context, plaintext string, recipient string) (c64 string, err error) {
+func (es EncryptedKeySigner) Encrypt(ctx context.Context, plaintext string, recipient nostr.PubKey) (c64 string, err error) {
password := es.callback(ctx)
sk, err := nip49.Decrypt(es.ncryptsec, password)
if err != nil {
@@ -69,7 +66,7 @@ func (es EncryptedKeySigner) Encrypt(ctx context.Context, plaintext string, reci
// Decrypt decrypts a base64-encoded ciphertext from a sender using NIP-44.
// It first decrypts the private key using the password callback.
-func (es EncryptedKeySigner) Decrypt(ctx context.Context, base64ciphertext string, sender string) (plaintext string, err error) {
+func (es EncryptedKeySigner) Decrypt(ctx context.Context, base64ciphertext string, sender nostr.PubKey) (plaintext string, err error) {
password := es.callback(ctx)
sk, err := nip49.Decrypt(es.ncryptsec, password)
if err != nil {
diff --git a/keyer/lib.go b/keyer/lib.go
index a027cc5..aaa2fe9 100644
--- a/keyer/lib.go
+++ b/keyer/lib.go
@@ -53,7 +53,7 @@ type SignerOptions struct {
// The context is used for operations that may require network access.
// The pool is used for relay connections when needed.
// Options are used for additional pieces required for EncryptedKeySigner and BunkerSigner.
-func New(ctx context.Context, pool *nostr.SimplePool, input string, opts *SignerOptions) (nostr.Keyer, error) {
+func New(ctx context.Context, pool *nostr.Pool, input string, opts *SignerOptions) (nostr.Keyer, error) {
if opts == nil {
opts = &SignerOptions{}
}
@@ -69,7 +69,7 @@ func New(ctx context.Context, pool *nostr.SimplePool, input string, opts *Signer
}
return nil, fmt.Errorf("failed to decrypt with given password: %w", err)
}
- pk, _ := nostr.GetPublicKey(sec)
+ pk := nostr.GetPublicKey(sec)
return KeySigner{sec, pk, xsync.NewMapOf[string, [32]byte]()}, nil
} else if nip46.IsValidBunkerURL(input) || nip05.IsValidIdentifier(input) {
bcsk := nostr.GeneratePrivateKey()
diff --git a/keyer/manual.go b/keyer/manual.go
index 4a4cbca..e5d9866 100644
--- a/keyer/manual.go
+++ b/keyer/manual.go
@@ -14,16 +14,16 @@ var _ nostr.Keyer = (*ManualSigner)(nil)
// app wants to implement custom signing logic.
type ManualSigner struct {
// ManualGetPublicKey is called when the public key is needed
- ManualGetPublicKey func(context.Context) (string, error)
+ ManualGetPublicKey func(context.Context) (nostr.PubKey, error)
// ManualSignEvent is called when an event needs to be signed
ManualSignEvent func(context.Context, *nostr.Event) error
// ManualEncrypt is called when a message needs to be encrypted
- ManualEncrypt func(ctx context.Context, plaintext string, recipientPublicKey string) (base64ciphertext string, err error)
+ ManualEncrypt func(ctx context.Context, plaintext string, recipientPublicKey nostr.PubKey) (base64ciphertext string, err error)
// ManualDecrypt is called when a message needs to be decrypted
- ManualDecrypt func(ctx context.Context, base64ciphertext string, senderPublicKey string) (plaintext string, err error)
+ ManualDecrypt func(ctx context.Context, base64ciphertext string, senderPublicKey nostr.PubKey) (plaintext string, err error)
}
// SignEvent delegates event signing to the ManualSignEvent function.
@@ -32,16 +32,16 @@ func (ms ManualSigner) SignEvent(ctx context.Context, evt *nostr.Event) error {
}
// GetPublicKey delegates public key retrieval to the ManualGetPublicKey function.
-func (ms ManualSigner) GetPublicKey(ctx context.Context) (string, error) {
+func (ms ManualSigner) GetPublicKey(ctx context.Context) (nostr.PubKey, error) {
return ms.ManualGetPublicKey(ctx)
}
// Encrypt delegates encryption to the ManualEncrypt function.
-func (ms ManualSigner) Encrypt(ctx context.Context, plaintext string, recipient string) (c64 string, err error) {
+func (ms ManualSigner) Encrypt(ctx context.Context, plaintext string, recipient nostr.PubKey) (c64 string, err error) {
return ms.ManualEncrypt(ctx, plaintext, recipient)
}
// Decrypt delegates decryption to the ManualDecrypt function.
-func (ms ManualSigner) Decrypt(ctx context.Context, base64ciphertext string, sender string) (plaintext string, err error) {
+func (ms ManualSigner) Decrypt(ctx context.Context, base64ciphertext string, sender nostr.PubKey) (plaintext string, err error) {
return ms.ManualDecrypt(ctx, base64ciphertext, sender)
}
diff --git a/keyer/plain.go b/keyer/plain.go
index e9d41b0..cdac081 100644
--- a/keyer/plain.go
+++ b/keyer/plain.go
@@ -12,20 +12,16 @@ var _ nostr.Keyer = (*KeySigner)(nil)
// KeySigner is a signer that holds the private key in memory
type KeySigner struct {
- sk string
- pk string
+ sk [32]byte
+ pk nostr.PubKey
- conversationKeys *xsync.MapOf[string, [32]byte]
+ conversationKeys *xsync.MapOf[nostr.PubKey, [32]byte]
}
// NewPlainKeySigner creates a new KeySigner from a private key.
// Returns an error if the private key is invalid.
-func NewPlainKeySigner(sec string) (KeySigner, error) {
- pk, err := nostr.GetPublicKey(sec)
- if err != nil {
- return KeySigner{}, err
- }
- return KeySigner{sec, pk, xsync.NewMapOf[string, [32]byte]()}, nil
+func NewPlainKeySigner(sec [32]byte) (KeySigner, error) {
+ return KeySigner{sec, nostr.GetPublicKey(sec), xsync.NewMapOf[nostr.PubKey, [32]byte]()}, nil
}
// SignEvent signs the provided event with the signer's private key.
@@ -33,11 +29,11 @@ func NewPlainKeySigner(sec string) (KeySigner, error) {
func (ks KeySigner) SignEvent(ctx context.Context, evt *nostr.Event) error { return evt.Sign(ks.sk) }
// GetPublicKey returns the public key associated with this signer.
-func (ks KeySigner) GetPublicKey(ctx context.Context) (string, error) { return ks.pk, nil }
+func (ks KeySigner) GetPublicKey(ctx context.Context) (nostr.PubKey, error) { return ks.pk, nil }
// Encrypt encrypts a plaintext message for a recipient using NIP-44.
// It caches conversation keys for efficiency in repeated operations.
-func (ks KeySigner) Encrypt(ctx context.Context, plaintext string, recipient string) (string, error) {
+func (ks KeySigner) Encrypt(ctx context.Context, plaintext string, recipient nostr.PubKey) (string, error) {
ck, ok := ks.conversationKeys.Load(recipient)
if !ok {
var err error
@@ -52,7 +48,7 @@ func (ks KeySigner) Encrypt(ctx context.Context, plaintext string, recipient str
// Decrypt decrypts a base64-encoded ciphertext from a sender using NIP-44.
// It caches conversation keys for efficiency in repeated operations.
-func (ks KeySigner) Decrypt(ctx context.Context, base64ciphertext string, sender string) (string, error) {
+func (ks KeySigner) Decrypt(ctx context.Context, base64ciphertext string, sender nostr.PubKey) (string, error) {
ck, ok := ks.conversationKeys.Load(sender)
if !ok {
var err error
diff --git a/khatru/blossom/handlers.go b/khatru/blossom/handlers.go
index eb7cfda..b5bbb64 100644
--- a/khatru/blossom/handlers.go
+++ b/khatru/blossom/handlers.go
@@ -11,8 +11,8 @@ import (
"strings"
"time"
- "github.com/liamg/magic"
"fiatjaf.com/nostr"
+ "github.com/liamg/magic"
)
func (bs BlossomServer) handleUploadCheck(w http.ResponseWriter, r *http.Request) {
@@ -40,8 +40,8 @@ func (bs BlossomServer) handleUploadCheck(w http.ResponseWriter, r *http.Request
// get the file size from the incoming header
size, _ := strconv.Atoi(r.Header.Get("X-Content-Length"))
- for _, rb := range bs.RejectUpload {
- reject, reason, code := rb(r.Context(), auth, size, ext)
+ if bs.RejectUpload != nil {
+ reject, reason, code := bs.RejectUpload(r.Context(), auth, size, ext)
if reject {
blossomError(w, reason, code)
return
@@ -336,13 +336,13 @@ func (bs BlossomServer) handleReport(w http.ResponseWriter, r *http.Request) {
return
}
- var evt *nostr.Event
- if err := json.Unmarshal(body, evt); err != nil {
+ var evt nostr.Event
+ if err := json.Unmarshal(body, &evt); err != nil {
blossomError(w, "can't parse event", 400)
return
}
- if isValid, _ := evt.CheckSignature(); !isValid {
+ if !evt.VerifySignature() {
blossomError(w, "invalid report event is provided", 400)
return
}
@@ -352,8 +352,8 @@ func (bs BlossomServer) handleReport(w http.ResponseWriter, r *http.Request) {
return
}
- for _, rr := range bs.ReceiveReport {
- if err := rr(r.Context(), evt); err != nil {
+ if bs.ReceiveReport != nil {
+ if err := bs.ReceiveReport(r.Context(), evt); err != nil {
blossomError(w, "failed to receive report: "+err.Error(), 500)
return
}
diff --git a/khatru/blossom/server.go b/khatru/blossom/server.go
index 60a5284..7b5b380 100644
--- a/khatru/blossom/server.go
+++ b/khatru/blossom/server.go
@@ -6,23 +6,23 @@ import (
"net/http"
"strings"
- "fiatjaf.com/nostr/khatru"
"fiatjaf.com/nostr"
+ "fiatjaf.com/nostr/khatru"
)
type BlossomServer struct {
ServiceURL string
Store BlobIndex
- StoreBlob []func(ctx context.Context, sha256 string, body []byte) error
- LoadBlob []func(ctx context.Context, sha256 string) (io.ReadSeeker, error)
- DeleteBlob []func(ctx context.Context, sha256 string) error
- ReceiveReport []func(ctx context.Context, reportEvt *nostr.Event) error
+ StoreBlob func(ctx context.Context, sha256 string, body []byte) error
+ LoadBlob func(ctx context.Context, sha256 string) (io.ReadSeeker, error)
+ DeleteBlob func(ctx context.Context, sha256 string) error
+ ReceiveReport func(ctx context.Context, reportEvt nostr.Event) error
- RejectUpload []func(ctx context.Context, auth *nostr.Event, size int, ext string) (bool, string, int)
- RejectGet []func(ctx context.Context, auth *nostr.Event, sha256 string) (bool, string, int)
- RejectList []func(ctx context.Context, auth *nostr.Event, pubkey string) (bool, string, int)
- RejectDelete []func(ctx context.Context, auth *nostr.Event, sha256 string) (bool, string, int)
+ RejectUpload func(ctx context.Context, auth *nostr.Event, size int, ext string) (bool, string, int)
+ RejectGet func(ctx context.Context, auth *nostr.Event, sha256 string) (bool, string, int)
+ RejectList func(ctx context.Context, auth *nostr.Event, pubkey string) (bool, string, int)
+ RejectDelete func(ctx context.Context, auth *nostr.Event, sha256 string) (bool, string, int)
}
func New(rl *khatru.Relay, serviceURL string) *BlossomServer {
diff --git a/khatru/examples/basic-elasticsearch/main.go b/khatru/examples/basic-elasticsearch/main.go
deleted file mode 100644
index f0e8b9b..0000000
--- a/khatru/examples/basic-elasticsearch/main.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package main
-
-import (
- "fmt"
- "net/http"
-
- "fiatjaf.com/nostr/eventstore/elasticsearch"
- "fiatjaf.com/nostr/khatru"
-)
-
-func main() {
- relay := khatru.NewRelay()
-
- db := elasticsearch.ElasticsearchStorage{URL: ""}
- if err := db.Init(); err != nil {
- panic(err)
- }
-
- relay.StoreEvent = append(relay.StoreEvent, db.SaveEvent)
- relay.QueryEvents = append(relay.QueryEvents, db.QueryEvents)
- relay.CountEvents = append(relay.CountEvents, db.CountEvents)
- relay.DeleteEvent = append(relay.DeleteEvent, db.DeleteEvent)
- relay.ReplaceEvent = append(relay.ReplaceEvent, db.ReplaceEvent)
-
- fmt.Println("running on :3334")
- http.ListenAndServe(":3334", relay)
-}
diff --git a/khatru/examples/basic-postgres/main.go b/khatru/examples/basic-postgres/main.go
deleted file mode 100644
index 0d43e10..0000000
--- a/khatru/examples/basic-postgres/main.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package main
-
-import (
- "fmt"
- "net/http"
-
- "fiatjaf.com/nostr/eventstore/postgresql"
- "fiatjaf.com/nostr/khatru"
-)
-
-func main() {
- relay := khatru.NewRelay()
-
- db := postgresql.PostgresBackend{DatabaseURL: "postgresql://localhost:5432/tmp-khatru-relay"}
- if err := db.Init(); err != nil {
- panic(err)
- }
-
- relay.StoreEvent = append(relay.StoreEvent, db.SaveEvent)
- relay.QueryEvents = append(relay.QueryEvents, db.QueryEvents)
- relay.CountEvents = append(relay.CountEvents, db.CountEvents)
- relay.DeleteEvent = append(relay.DeleteEvent, db.DeleteEvent)
- relay.ReplaceEvent = append(relay.ReplaceEvent, db.ReplaceEvent)
-
- fmt.Println("running on :3334")
- http.ListenAndServe(":3334", relay)
-}
diff --git a/khatru/examples/basic-sqlite3/main.go b/khatru/examples/basic-sqlite3/main.go
deleted file mode 100644
index db91cb5..0000000
--- a/khatru/examples/basic-sqlite3/main.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package main
-
-import (
- "fmt"
- "net/http"
-
- "fiatjaf.com/nostr/eventstore/sqlite3"
- "fiatjaf.com/nostr/khatru"
-)
-
-func main() {
- relay := khatru.NewRelay()
-
- db := sqlite3.SQLite3Backend{DatabaseURL: "/tmp/khatru-sqlite-tmp"}
- if err := db.Init(); err != nil {
- panic(err)
- }
-
- relay.StoreEvent = append(relay.StoreEvent, db.SaveEvent)
- relay.QueryEvents = append(relay.QueryEvents, db.QueryEvents)
- relay.CountEvents = append(relay.CountEvents, db.CountEvents)
- relay.DeleteEvent = append(relay.DeleteEvent, db.DeleteEvent)
- relay.ReplaceEvent = append(relay.ReplaceEvent, db.ReplaceEvent)
-
- fmt.Println("running on :3334")
- http.ListenAndServe(":3334", relay)
-}
diff --git a/khatru/examples/readme-demo/demo-memory b/khatru/examples/readme-demo/demo-memory
deleted file mode 100755
index 55ffe0b..0000000
Binary files a/khatru/examples/readme-demo/demo-memory and /dev/null differ
diff --git a/khatru/examples/readme-demo/main.go b/khatru/examples/readme-demo/main.go
index 6edbe1b..f31a1a3 100644
--- a/khatru/examples/readme-demo/main.go
+++ b/khatru/examples/readme-demo/main.go
@@ -6,9 +6,9 @@ import (
"log"
"net/http"
+ "fiatjaf.com/nostr"
"fiatjaf.com/nostr/khatru"
"fiatjaf.com/nostr/khatru/policies"
- "fiatjaf.com/nostr"
)
func main() {
diff --git a/khatru/go.mod b/khatru/go.mod
deleted file mode 100644
index 1f5db35..0000000
--- a/khatru/go.mod
+++ /dev/null
@@ -1,72 +0,0 @@
-module github.com/fiatjaf/khatru
-
-go 1.24.1
-
-require (
- github.com/bep/debounce v1.2.1
- github.com/fasthttp/websocket v1.5.12
- github.com/fiatjaf/eventstore v0.16.2
- github.com/liamg/magic v0.0.1
- github.com/mailru/easyjson v0.9.0
- github.com/nbd-wtf/go-nostr v0.51.8
- github.com/puzpuzpuz/xsync/v3 v3.5.1
- github.com/rs/cors v1.11.1
- github.com/stretchr/testify v1.10.0
-)
-
-require (
- fiatjaf.com/lib v0.2.0 // indirect
- github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3 // indirect
- github.com/PowerDNS/lmdb-go v1.9.3 // indirect
- github.com/andybalholm/brotli v1.1.1 // indirect
- github.com/aquasecurity/esquery v0.2.0 // indirect
- github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect
- github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 // indirect
- github.com/bytedance/sonic v1.13.2 // indirect
- github.com/bytedance/sonic/loader v0.2.4 // indirect
- github.com/cespare/xxhash/v2 v2.3.0 // indirect
- github.com/cloudwego/base64x v0.1.5 // indirect
- github.com/coder/websocket v1.8.13 // indirect
- github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/decred/dcrd/crypto/blake256 v1.1.0 // indirect
- github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
- github.com/dgraph-io/badger/v4 v4.5.0 // indirect
- github.com/dgraph-io/ristretto/v2 v2.1.0 // indirect
- github.com/dustin/go-humanize v1.0.1 // indirect
- github.com/elastic/elastic-transport-go/v8 v8.6.0 // indirect
- github.com/elastic/go-elasticsearch/v7 v7.17.10 // indirect
- github.com/elastic/go-elasticsearch/v8 v8.16.0 // indirect
- github.com/fatih/structs v1.1.0 // indirect
- github.com/go-logr/logr v1.4.2 // indirect
- github.com/go-logr/stdr v1.2.2 // indirect
- github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
- github.com/google/flatbuffers v24.12.23+incompatible // indirect
- github.com/jmoiron/sqlx v1.4.0 // indirect
- github.com/josharian/intern v1.0.0 // indirect
- github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/compress v1.18.0 // indirect
- github.com/klauspost/cpuid/v2 v2.2.10 // indirect
- github.com/lib/pq v1.10.9 // indirect
- github.com/mattn/go-sqlite3 v1.14.24 // indirect
- github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
- github.com/modern-go/reflect2 v1.0.2 // indirect
- github.com/pkg/errors v0.9.1 // indirect
- github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/savsgio/gotils v0.0.0-20240704082632-aef3928b8a38 // indirect
- github.com/tidwall/gjson v1.18.0 // indirect
- github.com/tidwall/match v1.1.1 // indirect
- github.com/tidwall/pretty v1.2.1 // indirect
- github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
- github.com/valyala/bytebufferpool v1.0.0 // indirect
- github.com/valyala/fasthttp v1.59.0 // indirect
- go.opencensus.io v0.24.0 // indirect
- go.opentelemetry.io/otel v1.32.0 // indirect
- go.opentelemetry.io/otel/metric v1.32.0 // indirect
- go.opentelemetry.io/otel/trace v1.32.0 // indirect
- golang.org/x/arch v0.15.0 // indirect
- golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect
- golang.org/x/net v0.37.0 // indirect
- golang.org/x/sys v0.31.0 // indirect
- google.golang.org/protobuf v1.36.2 // indirect
- gopkg.in/yaml.v3 v3.0.1 // indirect
-)
diff --git a/khatru/go.sum b/khatru/go.sum
deleted file mode 100644
index b0de1e8..0000000
--- a/khatru/go.sum
+++ /dev/null
@@ -1,254 +0,0 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-fiatjaf.com/lib v0.2.0 h1:TgIJESbbND6GjOgGHxF5jsO6EMjuAxIzZHPo5DXYexs=
-fiatjaf.com/lib v0.2.0/go.mod h1:Ycqq3+mJ9jAWu7XjbQI1cVr+OFgnHn79dQR5oTII47g=
-filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
-filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3 h1:ClzzXMDDuUbWfNNZqGeYq4PnYOlwlOVIvSyNaIy0ykg=
-github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3/go.mod h1:we0YA5CsBbH5+/NUzC/AlMmxaDtWlXeNsqrwXjTzmzA=
-github.com/PowerDNS/lmdb-go v1.9.3 h1:AUMY2pZT8WRpkEv39I9Id3MuoHd+NZbTVpNhruVkPTg=
-github.com/PowerDNS/lmdb-go v1.9.3/go.mod h1:TE0l+EZK8Z1B4dx070ZxkWTlp8RG1mjN0/+FkFRQMtU=
-github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
-github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
-github.com/aquasecurity/esquery v0.2.0 h1:9WWXve95TE8hbm3736WB7nS6Owl8UGDeu+0jiyE9ttA=
-github.com/aquasecurity/esquery v0.2.0/go.mod h1:VU+CIFR6C+H142HHZf9RUkp4Eedpo9UrEKeCQHWf9ao=
-github.com/bep/debounce v1.2.1 h1:v67fRdBA9UQu2NhLFXrSg0Brw7CexQekrBwDMM8bzeY=
-github.com/bep/debounce v1.2.1/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0=
-github.com/btcsuite/btcd v0.24.2 h1:aLmxPguqxza+4ag8R1I2nnJjSu2iFn/kqtHTIImswcY=
-github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ=
-github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
-github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ=
-github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
-github.com/bytedance/sonic v1.13.1 h1:Jyd5CIvdFnkOWuKXr+wm4Nyk2h0yAFsr8ucJgEasO3g=
-github.com/bytedance/sonic v1.13.1/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4=
-github.com/bytedance/sonic v1.13.2 h1:8/H1FempDZqC4VqjptGo14QQlJx8VdZJegxs6wwfqpQ=
-github.com/bytedance/sonic v1.13.2/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4=
-github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
-github.com/bytedance/sonic/loader v0.2.4 h1:ZWCw4stuXUsn1/+zQDqeE7JKP+QO47tz7QCNan80NzY=
-github.com/bytedance/sonic/loader v0.2.4/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
-github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4=
-github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
-github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo=
-github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
-github.com/coder/websocket v1.8.13 h1:f3QZdXy7uGVz+4uCJy2nTZyM0yTBj8yANEHhqlXZ9FE=
-github.com/coder/websocket v1.8.13/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=
-github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
-github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
-github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
-github.com/dgraph-io/badger/v4 v4.5.0 h1:TeJE3I1pIWLBjYhIYCA1+uxrjWEoJXImFBMEBVSm16g=
-github.com/dgraph-io/badger/v4 v4.5.0/go.mod h1:ysgYmIeG8dS/E8kwxT7xHyc7MkmwNYLRoYnFbr7387A=
-github.com/dgraph-io/ristretto/v2 v2.1.0 h1:59LjpOJLNDULHh8MC4UaegN52lC4JnO2dITsie/Pa8I=
-github.com/dgraph-io/ristretto/v2 v2.1.0/go.mod h1:uejeqfYXpUomfse0+lO+13ATz4TypQYLJZzBSAemuB4=
-github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y=
-github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
-github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
-github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
-github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
-github.com/elastic/elastic-transport-go/v8 v8.6.0 h1:Y2S/FBjx1LlCv5m6pWAF2kDJAHoSjSRSJCApolgfthA=
-github.com/elastic/elastic-transport-go/v8 v8.6.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk=
-github.com/elastic/go-elasticsearch/v7 v7.6.0/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4=
-github.com/elastic/go-elasticsearch/v7 v7.17.10 h1:TCQ8i4PmIJuBunvBS6bwT2ybzVFxxUhhltAs3Gyu1yo=
-github.com/elastic/go-elasticsearch/v7 v7.17.10/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4=
-github.com/elastic/go-elasticsearch/v8 v8.16.0 h1:f7bR+iBz8GTAVhwyFO3hm4ixsz2eMaEy0QroYnXV3jE=
-github.com/elastic/go-elasticsearch/v8 v8.16.0/go.mod h1:lGMlgKIbYoRvay3xWBeKahAiJOgmFDsjZC39nmO3H64=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/fasthttp/websocket v1.5.12 h1:e4RGPpWW2HTbL3zV0Y/t7g0ub294LkiuXXUuTOUInlE=
-github.com/fasthttp/websocket v1.5.12/go.mod h1:I+liyL7/4moHojiOgUOIKEWm9EIxHqxZChS+aMFltyg=
-github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
-github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
-github.com/fiatjaf/eventstore v0.16.2 h1:h4rHwSwPcqAKqWUsAbYWUhDeSgm2Kp+PBkJc3FgBYu4=
-github.com/fiatjaf/eventstore v0.16.2/go.mod h1:0gU8fzYO/bG+NQAVlHtJWOlt3JKKFefh5Xjj2d1dLIs=
-github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
-github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
-github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
-github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
-github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
-github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/google/flatbuffers v24.12.23+incompatible h1:ubBKR94NR4pXUCY/MUsRVzd9umNW7ht7EG9hHfS9FX8=
-github.com/google/flatbuffers v24.12.23+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
-github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/jgroeneveld/schema v1.0.0 h1:J0E10CrOkiSEsw6dfb1IfrDJD14pf6QLVJ3tRPl/syI=
-github.com/jgroeneveld/schema v1.0.0/go.mod h1:M14lv7sNMtGvo3ops1MwslaSYgDYxrSmbzWIQ0Mr5rs=
-github.com/jgroeneveld/trial v2.0.0+incompatible h1:d59ctdgor+VqdZCAiUfVN8K13s0ALDioG5DWwZNtRuQ=
-github.com/jgroeneveld/trial v2.0.0+incompatible/go.mod h1:I6INLW96EN8WysNBXUFI3M4RIC8ePg9ntAc/Wy+U/+M=
-github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o=
-github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY=
-github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
-github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
-github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
-github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
-github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
-github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
-github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
-github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
-github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
-github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
-github.com/liamg/magic v0.0.1 h1:Ru22ElY+sCh6RvRTWjQzKKCxsEco8hE0co8n1qe7TBM=
-github.com/liamg/magic v0.0.1/go.mod h1:yQkOmZZI52EA+SQ2xyHpVw8fNvTBruF873Y+Vt6S+fk=
-github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
-github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
-github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
-github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
-github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
-github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM=
-github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
-github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
-github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
-github.com/nbd-wtf/go-nostr v0.51.7 h1:dGjtaaFQ1kA3H+vF8wt9a9WYl54K8C0JmVDf4cp+a4A=
-github.com/nbd-wtf/go-nostr v0.51.7/go.mod h1:d6+DfvMWYG5pA3dmNMBJd6WCHVDDhkXbHqvfljf0Gzg=
-github.com/nbd-wtf/go-nostr v0.51.8 h1:CIoS+YqChcm4e1L1rfMZ3/mIwTz4CwApM2qx7MHNzmE=
-github.com/nbd-wtf/go-nostr v0.51.8/go.mod h1:d6+DfvMWYG5pA3dmNMBJd6WCHVDDhkXbHqvfljf0Gzg=
-github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
-github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
-github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
-github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
-github.com/savsgio/gotils v0.0.0-20240704082632-aef3928b8a38 h1:D0vL7YNisV2yqE55+q0lFuGse6U8lxlg7fYTctlT5Gc=
-github.com/savsgio/gotils v0.0.0-20240704082632-aef3928b8a38/go.mod h1:sM7Mt7uEoCeFSCBM+qBrqvEo+/9vdmj19wzp3yzUhmg=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
-github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
-github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
-github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
-github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
-github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
-github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
-github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
-github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
-github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
-github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
-github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
-github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
-github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
-github.com/valyala/fasthttp v1.59.0 h1:Qu0qYHfXvPk1mSLNqcFtEk6DpxgA26hy6bmydotDpRI=
-github.com/valyala/fasthttp v1.59.0/go.mod h1:GTxNb9Bc6r2a9D0TWNSPwDz78UxnTGBViY3xZNEqyYU=
-github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
-github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
-go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
-go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
-go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U=
-go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg=
-go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M=
-go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8=
-go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
-go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
-go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM=
-go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8=
-golang.org/x/arch v0.15.0 h1:QtOrQd0bTUnhNVNndMpLHNWrDmYzZ2KDqSrEymqInZw=
-golang.org/x/arch v0.15.0/go.mod h1:JmwW7aLIoRUKgaTzhkiEFxvcEiQGyOg9BMonBJUS7EE=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw=
-golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
-golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
-golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
-google.golang.org/protobuf v1.36.2 h1:R8FeyR1/eLmkutZOM5CWghmo5itiG9z0ktFlTVLuTmU=
-google.golang.org/protobuf v1.36.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
-gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
-gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
diff --git a/khatru/handlers.go b/khatru/handlers.go
index 3883a35..b66a720 100644
--- a/khatru/handlers.go
+++ b/khatru/handlers.go
@@ -12,8 +12,6 @@ import (
"time"
"unsafe"
- "github.com/bep/debounce"
- "github.com/fasthttp/websocket"
"fiatjaf.com/nostr"
"fiatjaf.com/nostr/nip42"
"fiatjaf.com/nostr/nip45"
@@ -21,6 +19,8 @@ import (
"fiatjaf.com/nostr/nip70"
"fiatjaf.com/nostr/nip77"
"fiatjaf.com/nostr/nip77/negentropy"
+ "github.com/bep/debounce"
+ "github.com/fasthttp/websocket"
"github.com/puzpuzpuz/xsync/v3"
"github.com/rs/cors"
)
@@ -53,8 +53,8 @@ func (rl *Relay) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
func (rl *Relay) HandleWebsocket(w http.ResponseWriter, r *http.Request) {
- for _, reject := range rl.RejectConnection {
- if reject(r) {
+ if rl.RejectConnection == nil {
+ if rl.RejectConnection(r) {
w.WriteHeader(429) // Too many requests
return
}
@@ -92,8 +92,8 @@ func (rl *Relay) HandleWebsocket(w http.ResponseWriter, r *http.Request) {
)
kill := func() {
- for _, ondisconnect := range rl.OnDisconnect {
- ondisconnect(ctx)
+ if rl.OnDisconnect == nil {
+ rl.OnDisconnect(ctx)
}
ticker.Stop()
@@ -114,8 +114,8 @@ func (rl *Relay) HandleWebsocket(w http.ResponseWriter, r *http.Request) {
return nil
})
- for _, onconnect := range rl.OnConnect {
- onconnect(ctx)
+ if rl.OnConnect == nil {
+ rl.OnConnect(ctx)
}
smp := nostr.NewMessageParser()
@@ -169,10 +169,7 @@ func (rl *Relay) HandleWebsocket(w http.ResponseWriter, r *http.Request) {
}
// check signature
- if ok, err := env.Event.CheckSignature(); err != nil {
- ws.WriteJSON(nostr.OKEnvelope{EventID: env.Event.ID, OK: false, Reason: "error: failed to verify signature"})
- return
- } else if !ok {
+ if !env.Event.VerifySignature() {
ws.WriteJSON(nostr.OKEnvelope{EventID: env.Event.ID, OK: false, Reason: "invalid: signature is invalid"})
return
}
@@ -228,9 +225,6 @@ func (rl *Relay) HandleWebsocket(w http.ResponseWriter, r *http.Request) {
var reason string
if writeErr == nil {
ok = true
- for _, ovw := range srl.OverwriteResponseEvent {
- ovw(ctx, &env.Event)
- }
if !skipBroadcast {
n := srl.notifyListeners(&env.Event)
diff --git a/khatru/nip86.go b/khatru/nip86.go
index 1703920..23e6628 100644
--- a/khatru/nip86.go
+++ b/khatru/nip86.go
@@ -20,13 +20,13 @@ import (
type RelayManagementAPI struct {
RejectAPICall []func(ctx context.Context, mp nip86.MethodParams) (reject bool, msg string)
- BanPubKey func(ctx context.Context, pubkey string, reason string) error
+ BanPubKey func(ctx context.Context, pubkey nostr.PubKey, reason string) error
ListBannedPubKeys func(ctx context.Context) ([]nip86.PubKeyReason, error)
- AllowPubKey func(ctx context.Context, pubkey string, reason string) error
+ AllowPubKey func(ctx context.Context, pubkey nostr.PubKey, reason string) error
ListAllowedPubKeys func(ctx context.Context) ([]nip86.PubKeyReason, error)
ListEventsNeedingModeration func(ctx context.Context) ([]nip86.IDReason, error)
- AllowEvent func(ctx context.Context, id string, reason string) error
- BanEvent func(ctx context.Context, id string, reason string) error
+ AllowEvent func(ctx context.Context, id nostr.ID, reason string) error
+ BanEvent func(ctx context.Context, id nostr.ID, reason string) error
ListBannedEvents func(ctx context.Context) ([]nip86.IDReason, error)
ListAllowedEvents func(ctx context.Context) ([]nip86.IDReason, error)
ChangeRelayName func(ctx context.Context, name string) error
@@ -40,8 +40,8 @@ type RelayManagementAPI struct {
UnblockIP func(ctx context.Context, ip net.IP, reason string) error
ListBlockedIPs func(ctx context.Context) ([]nip86.IPReason, error)
Stats func(ctx context.Context) (nip86.Response, error)
- GrantAdmin func(ctx context.Context, pubkey string, methods []string) error
- RevokeAdmin func(ctx context.Context, pubkey string, methods []string) error
+ GrantAdmin func(ctx context.Context, pubkey nostr.PubKey, methods []string) error
+ RevokeAdmin func(ctx context.Context, pubkey nostr.PubKey, methods []string) error
Generic func(ctx context.Context, request nip86.Request) (nip86.Response, error)
}
@@ -81,7 +81,7 @@ func (rl *Relay) HandleNIP86(w http.ResponseWriter, r *http.Request) {
resp.Error = "invalid auth event json"
goto respond
}
- if ok, _ := evt.CheckSignature(); !ok {
+ if !evt.VerifySignature() {
resp.Error = "invalid auth event"
goto respond
}
diff --git a/khatru/relay.go b/khatru/relay.go
index fd76d4a..99f98ea 100644
--- a/khatru/relay.go
+++ b/khatru/relay.go
@@ -10,10 +10,10 @@ import (
"sync"
"time"
- "github.com/fasthttp/websocket"
"fiatjaf.com/nostr"
"fiatjaf.com/nostr/nip11"
"fiatjaf.com/nostr/nip45/hyperloglog"
+ "github.com/fasthttp/websocket"
)
func NewRelay() *Relay {
@@ -56,25 +56,23 @@ type Relay struct {
ServiceURL string
// hooks that will be called at various times
- RejectEvent []func(ctx context.Context, event *nostr.Event) (reject bool, msg string)
- OverwriteDeletionOutcome []func(ctx context.Context, target *nostr.Event, deletion *nostr.Event) (acceptDeletion bool, msg string)
- StoreEvent []func(ctx context.Context, event *nostr.Event) error
- ReplaceEvent []func(ctx context.Context, event *nostr.Event) error
- DeleteEvent []func(ctx context.Context, event *nostr.Event) error
- OnEventSaved []func(ctx context.Context, event *nostr.Event)
- OnEphemeralEvent []func(ctx context.Context, event *nostr.Event)
- RejectFilter []func(ctx context.Context, filter nostr.Filter) (reject bool, msg string)
- RejectCountFilter []func(ctx context.Context, filter nostr.Filter) (reject bool, msg string)
- OverwriteFilter []func(ctx context.Context, filter *nostr.Filter)
- QueryEvents []func(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error)
- CountEvents []func(ctx context.Context, filter nostr.Filter) (int64, error)
- CountEventsHLL []func(ctx context.Context, filter nostr.Filter, offset int) (int64, *hyperloglog.HyperLogLog, error)
- RejectConnection []func(r *http.Request) bool
- OnConnect []func(ctx context.Context)
- OnDisconnect []func(ctx context.Context)
- OverwriteRelayInformation []func(ctx context.Context, r *http.Request, info nip11.RelayInformationDocument) nip11.RelayInformationDocument
- OverwriteResponseEvent []func(ctx context.Context, event *nostr.Event)
- PreventBroadcast []func(ws *WebSocket, event *nostr.Event) bool
+ RejectEvent func(ctx context.Context, event *nostr.Event) (reject bool, msg string)
+ OverwriteDeletionOutcome func(ctx context.Context, target *nostr.Event, deletion *nostr.Event) (acceptDeletion bool, msg string)
+ StoreEvent func(ctx context.Context, event *nostr.Event) error
+ ReplaceEvent func(ctx context.Context, event *nostr.Event) error
+ DeleteEvent func(ctx context.Context, event *nostr.Event) error
+ OnEventSaved func(ctx context.Context, event *nostr.Event)
+ OnEphemeralEvent func(ctx context.Context, event *nostr.Event)
+ RejectFilter func(ctx context.Context, filter nostr.Filter) (reject bool, msg string)
+ RejectCountFilter func(ctx context.Context, filter nostr.Filter) (reject bool, msg string)
+ QueryEvents func(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error)
+ CountEvents func(ctx context.Context, filter nostr.Filter) (int64, error)
+ CountEventsHLL func(ctx context.Context, filter nostr.Filter, offset int) (int64, *hyperloglog.HyperLogLog, error)
+ RejectConnection func(r *http.Request) bool
+ OnConnect func(ctx context.Context)
+ OnDisconnect func(ctx context.Context)
+ OverwriteRelayInformation func(ctx context.Context, r *http.Request, info nip11.RelayInformationDocument) nip11.RelayInformationDocument
+ PreventBroadcast func(ws *WebSocket, event *nostr.Event) bool
// these are used when this relays acts as a router
routes []Route
diff --git a/khatru/responding.go b/khatru/responding.go
index 4f30d51..b9b7139 100644
--- a/khatru/responding.go
+++ b/khatru/responding.go
@@ -12,12 +12,6 @@ import (
func (rl *Relay) handleRequest(ctx context.Context, id string, eose *sync.WaitGroup, ws *WebSocket, filter nostr.Filter) error {
defer eose.Done()
- // overwrite the filter (for example, to eliminate some kinds or
- // that we know we don't support)
- for _, ovw := range rl.OverwriteFilter {
- ovw(ctx, &filter)
- }
-
if filter.LimitZero {
// don't do any queries, just subscribe to future events
return nil
@@ -27,31 +21,24 @@ func (rl *Relay) handleRequest(ctx context.Context, id string, eose *sync.WaitGr
// because we may, for example, remove some things from the incoming filters
// that we know we don't support, and then if the end result is an empty
// filter we can just reject it)
- for _, reject := range rl.RejectFilter {
- if reject, msg := reject(ctx, filter); reject {
+ if rl.RejectFilter != nil {
+ if reject, msg := rl.RejectFilter(ctx, filter); reject {
return errors.New(nostr.NormalizeOKMessage(msg, "blocked"))
}
}
- // run the functions to query events (generally just one,
- // but we might be fetching stuff from multiple places)
- eose.Add(len(rl.QueryEvents))
- for _, query := range rl.QueryEvents {
- ch, err := query(ctx, filter)
+ // run the function to query events
+ if rl.QueryEvents != nil {
+ ch, err := rl.QueryEvents(ctx, filter)
if err != nil {
ws.WriteJSON(nostr.NoticeEnvelope(err.Error()))
eose.Done()
- continue
} else if ch == nil {
eose.Done()
- continue
}
go func(ch chan *nostr.Event) {
for event := range ch {
- for _, ovw := range rl.OverwriteResponseEvent {
- ovw(ctx, event)
- }
ws.WriteJSON(nostr.EventEnvelope{SubscriptionID: &id, Event: *event})
}
eose.Done()
@@ -63,8 +50,8 @@ func (rl *Relay) handleRequest(ctx context.Context, id string, eose *sync.WaitGr
func (rl *Relay) handleCountRequest(ctx context.Context, ws *WebSocket, filter nostr.Filter) int64 {
// check if we'll reject this filter
- for _, reject := range rl.RejectCountFilter {
- if rejecting, msg := reject(ctx, filter); rejecting {
+ if rl.RejectCountFilter != nil {
+ if rejecting, msg := rl.RejectCountFilter(ctx, filter); rejecting {
ws.WriteJSON(nostr.NoticeEnvelope(msg))
return 0
}
@@ -72,8 +59,8 @@ func (rl *Relay) handleCountRequest(ctx context.Context, ws *WebSocket, filter n
// run the functions to count (generally it will be just one)
var subtotal int64 = 0
- for _, count := range rl.CountEvents {
- res, err := count(ctx, filter)
+ if rl.CountEvents != nil {
+ res, err := rl.CountEvents(ctx, filter)
if err != nil {
ws.WriteJSON(nostr.NoticeEnvelope(err.Error()))
}
@@ -90,8 +77,8 @@ func (rl *Relay) handleCountRequestWithHLL(
offset int,
) (int64, *hyperloglog.HyperLogLog) {
// check if we'll reject this filter
- for _, reject := range rl.RejectCountFilter {
- if rejecting, msg := reject(ctx, filter); rejecting {
+ if rl.RejectCountFilter != nil {
+ if rejecting, msg := rl.RejectCountFilter(ctx, filter); rejecting {
ws.WriteJSON(nostr.NoticeEnvelope(msg))
return 0, nil
}
@@ -100,8 +87,8 @@ func (rl *Relay) handleCountRequestWithHLL(
// run the functions to count (generally it will be just one)
var subtotal int64 = 0
var hll *hyperloglog.HyperLogLog
- for _, countHLL := range rl.CountEventsHLL {
- res, fhll, err := countHLL(ctx, filter, offset)
+ if rl.CountEventsHLL != nil {
+ res, fhll, err := rl.CountEventsHLL(ctx, filter, offset)
if err != nil {
ws.WriteJSON(nostr.NoticeEnvelope(err.Error()))
}
diff --git a/khatru/utils.go b/khatru/utils.go
index b88f57e..9d8c5f7 100644
--- a/khatru/utils.go
+++ b/khatru/utils.go
@@ -31,14 +31,14 @@ func GetConnection(ctx context.Context) *WebSocket {
return nil
}
-func GetAuthed(ctx context.Context) string {
+func GetAuthed(ctx context.Context) (nostr.PubKey, bool) {
if conn := GetConnection(ctx); conn != nil {
- return conn.AuthedPublicKey
+ return conn.AuthedPublicKey, true
}
if nip86Auth := ctx.Value(nip86HeaderAuthKey); nip86Auth != nil {
- return nip86Auth.(string)
+ return nip86Auth.(nostr.PubKey), true
}
- return ""
+ return nostr.ZeroPK, false
}
// IsInternalCall returns true when a call to QueryEvents, for example, is being made because of a deletion
diff --git a/khatru/websocket.go b/khatru/websocket.go
index 8888f6a..af40533 100644
--- a/khatru/websocket.go
+++ b/khatru/websocket.go
@@ -5,6 +5,7 @@ import (
"net/http"
"sync"
+ "fiatjaf.com/nostr"
"github.com/fasthttp/websocket"
"github.com/puzpuzpuz/xsync/v3"
)
@@ -22,7 +23,7 @@ type WebSocket struct {
// nip42
Challenge string
- AuthedPublicKey string
+ AuthedPublicKey nostr.PubKey
Authed chan struct{}
// nip77
diff --git a/log.go b/log.go
index 52e2a44..6ba2ff9 100644
--- a/log.go
+++ b/log.go
@@ -7,8 +7,8 @@ import (
var (
// call SetOutput on InfoLogger to enable info logging
- InfoLogger = log.New(os.Stderr, "[go-nostr][info] ", log.LstdFlags)
+ InfoLogger = log.New(os.Stderr, "[nl][info] ", log.LstdFlags)
// call SetOutput on DebugLogger to enable debug logging
- DebugLogger = log.New(os.Stderr, "[go-nostr][debug] ", log.LstdFlags)
+ DebugLogger = log.New(os.Stderr, "[nl][debug] ", log.LstdFlags)
)
diff --git a/nip13/nip13.go b/nip13/nip13.go
index d850e24..6ba78fd 100644
--- a/nip13/nip13.go
+++ b/nip13/nip13.go
@@ -37,11 +37,11 @@ func CommittedDifficulty(event *nostr.Event) int {
}
// Difficulty counts the number of leading zero bits in an event ID.
-func Difficulty(id string) int {
+func Difficulty(id nostr.ID) int {
var zeros int
var b [1]byte
- for i := 0; i < 64; i += 2 {
- if id[i:i+2] == "00" {
+ for i := 0; i < 32; i += 2 {
+ if id[i] == 0 {
zeros += 8
continue
}
@@ -70,8 +70,8 @@ func difficultyBytes(id [32]byte) int {
// Check reports whether the event ID demonstrates a sufficient proof of work difficulty.
// Note that Check performs no validation other than counting leading zero bits
// in an event ID. It is up to the callers to verify the event with other methods,
-// such as [nostr.Event.CheckSignature].
-func Check(id string, minDifficulty int) error {
+// such as [nostr.Event.VerifySignature].
+func Check(id nostr.ID, minDifficulty int) error {
if Difficulty(id) < minDifficulty {
return ErrDifficultyTooLow
}
@@ -82,7 +82,7 @@ func Check(id string, minDifficulty int) error {
// nonce (as a nostr.Tag) that yields the required work.
// Returns an error if the context expires before that.
func DoWork(ctx context.Context, event nostr.Event, targetDifficulty int) (nostr.Tag, error) {
- if event.PubKey == "" {
+ if event.PubKey == nostr.ZeroPK {
return nil, ErrMissingPubKey
}
diff --git a/nip17/nip17.go b/nip17/nip17.go
index d08748e..d92ef30 100644
--- a/nip17/nip17.go
+++ b/nip17/nip17.go
@@ -9,7 +9,7 @@ import (
"fiatjaf.com/nostr/nip59"
)
-func GetDMRelays(ctx context.Context, pubkey string, pool *nostr.SimplePool, relaysToQuery []string) []string {
+func GetDMRelays(ctx context.Context, pubkey string, pool *nostr.Pool, relaysToQuery []string) []string {
ie := pool.QuerySingle(ctx, relaysToQuery, nostr.Filter{
Authors: []string{pubkey},
Kinds: []int{nostr.KindDMRelayList},
@@ -35,7 +35,7 @@ func PublishMessage(
ctx context.Context,
content string,
tags nostr.Tags,
- pool *nostr.SimplePool,
+ pool *nostr.Pool,
ourRelays []string,
theirRelays []string,
kr nostr.Keyer,
@@ -137,7 +137,7 @@ func PrepareMessage(
// ListenForMessages returns a channel with the rumors already decrypted and checked
func ListenForMessages(
ctx context.Context,
- pool *nostr.SimplePool,
+ pool *nostr.Pool,
kr nostr.Keyer,
ourRelays []string,
since nostr.Timestamp,
diff --git a/nip42/nip42.go b/nip42/nip42.go
index 8ac02f4..54c5364 100644
--- a/nip42/nip42.go
+++ b/nip42/nip42.go
@@ -10,7 +10,7 @@ import (
// CreateUnsignedAuthEvent creates an event which should be sent via an "AUTH" command.
// If the authentication succeeds, the user will be authenticated as pubkey.
-func CreateUnsignedAuthEvent(challenge, pubkey, relayURL string) nostr.Event {
+func CreateUnsignedAuthEvent(challenge string, pubkey nostr.PubKey, relayURL string) nostr.Event {
return nostr.Event{
PubKey: pubkey,
CreatedAt: nostr.Now(),
@@ -34,40 +34,40 @@ func parseURL(input string) (*url.URL, error) {
// ValidateAuthEvent checks whether event is a valid NIP-42 event for given challenge and relayURL.
// The result of the validation is encoded in the ok bool.
-func ValidateAuthEvent(event *nostr.Event, challenge string, relayURL string) (pubkey string, ok bool) {
+func ValidateAuthEvent(event nostr.Event, challenge string, relayURL string) (pubkey nostr.PubKey, ok bool) {
if event.Kind != nostr.KindClientAuthentication {
- return "", false
+ return nostr.ZeroPK, false
}
if event.Tags.FindWithValue("challenge", challenge) == nil {
- return "", false
+ return nostr.ZeroPK, false
}
expected, err := parseURL(relayURL)
if err != nil {
- return "", false
+ return nostr.ZeroPK, false
}
- found, err := parseURL(event.Tags.GetFirst([]string{"relay", ""}).Value())
+ found, err := parseURL(event.Tags.Find("relay")[1])
if err != nil {
- return "", false
+ return nostr.ZeroPK, false
}
if expected.Scheme != found.Scheme ||
expected.Host != found.Host ||
expected.Path != found.Path {
- return "", false
+ return nostr.ZeroPK, false
}
now := time.Now()
if event.CreatedAt.Time().After(now.Add(10*time.Minute)) || event.CreatedAt.Time().Before(now.Add(-10*time.Minute)) {
- return "", false
+ return nostr.ZeroPK, false
}
// save for last, as it is most expensive operation
// no need to check returned error, since ok == true implies err == nil.
- if ok, _ := event.CheckSignature(); !ok {
- return "", false
+ if !event.VerifySignature() {
+ return nostr.ZeroPK, false
}
return event.PubKey, true
diff --git a/nip45/hll_event.go b/nip45/hll_event.go
index 35be1c4..bc8d495 100644
--- a/nip45/hll_event.go
+++ b/nip45/hll_event.go
@@ -7,7 +7,7 @@ import (
"fiatjaf.com/nostr"
)
-func HyperLogLogEventPubkeyOffsetsAndReferencesForEvent(evt *nostr.Event) iter.Seq2[string, int] {
+func HyperLogLogEventPubkeyOffsetsAndReferencesForEvent(evt nostr.Event) iter.Seq2[string, int] {
return func(yield func(string, int) bool) {
switch evt.Kind {
case 3:
diff --git a/nip45/hyperloglog/hll.go b/nip45/hyperloglog/hll.go
index e546b67..d32b83c 100644
--- a/nip45/hyperloglog/hll.go
+++ b/nip45/hyperloglog/hll.go
@@ -2,7 +2,6 @@ package hyperloglog
import (
"encoding/binary"
- "encoding/hex"
"fmt"
)
@@ -51,8 +50,8 @@ func (hll *HyperLogLog) Clear() {
}
// Add takes a Nostr event pubkey which will be used as the item "key" (that combined with the offset)
-func (hll *HyperLogLog) Add(pubkey string) {
- x, _ := hex.DecodeString(pubkey[hll.offset*2 : hll.offset*2+8*2])
+func (hll *HyperLogLog) Add(pubkey [32]byte) {
+ x := pubkey[hll.offset : hll.offset+8]
j := x[0] // register address (first 8 bits, i.e. first byte)
w := binary.BigEndian.Uint64(x) // number that we will use
@@ -64,7 +63,7 @@ func (hll *HyperLogLog) Add(pubkey string) {
}
// AddBytes is like Add, but takes pubkey as bytes instead of as string
-func (hll *HyperLogLog) AddBytes(pubkey []byte) {
+func (hll *HyperLogLog) AddBytes(pubkey [32]byte) {
x := pubkey[hll.offset : hll.offset+8]
j := x[0] // register address (first 8 bits, i.e. first byte)
diff --git a/nip46/client.go b/nip46/client.go
index 6c773c6..754ea6c 100644
--- a/nip46/client.go
+++ b/nip46/client.go
@@ -2,14 +2,15 @@ package nip46
import (
"context"
+ "encoding/hex"
"fmt"
"math/rand"
"net/url"
"strconv"
"sync/atomic"
+ "unsafe"
"fiatjaf.com/nostr"
- "fiatjaf.com/nostr/nip04"
"fiatjaf.com/nostr/nip44"
"github.com/mailru/easyjson"
"github.com/puzpuzpuz/xsync/v3"
@@ -17,9 +18,9 @@ import (
type BunkerClient struct {
serial atomic.Uint64
- clientSecretKey string
- pool *nostr.SimplePool
- target string
+ clientSecretKey [32]byte
+ pool *nostr.Pool
+ target nostr.PubKey
relays []string
conversationKey [32]byte // nip44
listeners *xsync.MapOf[string, chan Response]
@@ -28,7 +29,7 @@ type BunkerClient struct {
onAuth func(string)
// memoized
- getPublicKeyResponse string
+ getPublicKeyResponse nostr.PubKey
// SkipSignatureCheck can be set if you don't want to double-check incoming signatures
SkipSignatureCheck bool
@@ -40,7 +41,7 @@ func ConnectBunker(
ctx context.Context,
clientSecretKey nostr.PubKey,
bunkerURLOrNIP05 string,
- pool *nostr.SimplePool,
+ pool *nostr.Pool,
onAuth func(string),
) (*BunkerClient, error) {
parsed, err := url.Parse(bunkerURLOrNIP05)
@@ -79,7 +80,7 @@ func ConnectBunker(
pool,
onAuth,
)
- _, err = bunker.RPC(ctx, "connect", []string{targetPublicKey, secret})
+ _, err = bunker.RPC(ctx, "connect", []string{hex.EncodeToString(targetPublicKey[:]), secret})
return bunker, err
}
@@ -88,11 +89,11 @@ func NewBunker(
clientSecretKey [32]byte,
targetPublicKey nostr.PubKey,
relays []string,
- pool *nostr.SimplePool,
+ pool *nostr.Pool,
onAuth func(string),
) *BunkerClient {
if pool == nil {
- pool = nostr.NewSimplePool(ctx)
+ pool = nostr.NewPool(nostr.PoolOptions{})
}
clientPublicKey := nostr.GetPublicKey(clientSecretKey)
@@ -113,11 +114,13 @@ func NewBunker(
go func() {
now := nostr.Now()
events := pool.SubscribeMany(ctx, relays, nostr.Filter{
- Tags: nostr.TagMap{"p": []string{clientPublicKey}},
- Kinds: []int{nostr.KindNostrConnect},
+ Tags: nostr.TagMap{"p": []string{clientPublicKey.Hex()}},
+ Kinds: []uint16{nostr.KindNostrConnect},
Since: &now,
LimitZero: true,
- }, nostr.WithLabel("bunker46client"))
+ }, nostr.SubscriptionOptions{
+ Label: "bunker46client",
+ })
for ie := range events {
if ie.Kind != nostr.KindNostrConnect {
continue
@@ -126,10 +129,7 @@ func NewBunker(
var resp Response
plain, err := nip44.Decrypt(ie.Content, conversationKey)
if err != nil {
- plain, err = nip04.Decrypt(ie.Content, sharedSecret)
- if err != nil {
- continue
- }
+ continue
}
err = json.Unmarshal([]byte(plain), &resp)
@@ -164,13 +164,22 @@ func (bunker *BunkerClient) Ping(ctx context.Context) error {
return nil
}
-func (bunker *BunkerClient) GetPublicKey(ctx context.Context) (string, error) {
- if bunker.getPublicKeyResponse != "" {
+func (bunker *BunkerClient) GetPublicKey(ctx context.Context) (nostr.PubKey, error) {
+ if bunker.getPublicKeyResponse != nostr.ZeroPK {
return bunker.getPublicKeyResponse, nil
}
resp, err := bunker.RPC(ctx, "get_public_key", []string{})
- bunker.getPublicKeyResponse = resp
- return resp, err
+ if err != nil {
+ return nostr.ZeroPK, err
+ }
+
+ pk, err := nostr.PubKeyFromHex(resp)
+ if err != nil {
+ return nostr.ZeroPK, err
+ }
+
+ bunker.getPublicKeyResponse = pk
+ return pk, nil
}
func (bunker *BunkerClient) SignEvent(ctx context.Context, evt *nostr.Event) error {
@@ -179,7 +188,7 @@ func (bunker *BunkerClient) SignEvent(ctx context.Context, evt *nostr.Event) err
return err
}
- err = easyjson.Unmarshal([]byte(resp), evt)
+ err = easyjson.Unmarshal(unsafe.Slice(unsafe.StringData(resp), len(resp)), evt)
if err != nil {
return err
}
@@ -188,7 +197,7 @@ func (bunker *BunkerClient) SignEvent(ctx context.Context, evt *nostr.Event) err
if ok := evt.CheckID(); !ok {
return fmt.Errorf("sign_event response from bunker has invalid id")
}
- if ok, _ := evt.CheckSignature(); !ok {
+ if !evt.VerifySignature() {
return fmt.Errorf("sign_event response from bunker has invalid signature")
}
}
@@ -198,34 +207,34 @@ func (bunker *BunkerClient) SignEvent(ctx context.Context, evt *nostr.Event) err
func (bunker *BunkerClient) NIP44Encrypt(
ctx context.Context,
- targetPublicKey string,
+ targetPublicKey nostr.PubKey,
plaintext string,
) (string, error) {
- return bunker.RPC(ctx, "nip44_encrypt", []string{targetPublicKey, plaintext})
+ return bunker.RPC(ctx, "nip44_encrypt", []string{targetPublicKey.Hex(), plaintext})
}
func (bunker *BunkerClient) NIP44Decrypt(
ctx context.Context,
- targetPublicKey string,
+ targetPublicKey nostr.PubKey,
ciphertext string,
) (string, error) {
- return bunker.RPC(ctx, "nip44_decrypt", []string{targetPublicKey, ciphertext})
+ return bunker.RPC(ctx, "nip44_decrypt", []string{targetPublicKey.Hex(), ciphertext})
}
func (bunker *BunkerClient) NIP04Encrypt(
ctx context.Context,
- targetPublicKey string,
+ targetPublicKey nostr.PubKey,
plaintext string,
) (string, error) {
- return bunker.RPC(ctx, "nip04_encrypt", []string{targetPublicKey, plaintext})
+ return bunker.RPC(ctx, "nip04_encrypt", []string{targetPublicKey.Hex(), plaintext})
}
func (bunker *BunkerClient) NIP04Decrypt(
ctx context.Context,
- targetPublicKey string,
+ targetPublicKey nostr.PubKey,
ciphertext string,
) (string, error) {
- return bunker.RPC(ctx, "nip04_decrypt", []string{targetPublicKey, ciphertext})
+ return bunker.RPC(ctx, "nip04_decrypt", []string{targetPublicKey.Hex(), ciphertext})
}
func (bunker *BunkerClient) RPC(ctx context.Context, method string, params []string) (string, error) {
@@ -248,7 +257,7 @@ func (bunker *BunkerClient) RPC(ctx context.Context, method string, params []str
Content: content,
CreatedAt: nostr.Now(),
Kind: nostr.KindNostrConnect,
- Tags: nostr.Tags{{"p", bunker.target}},
+ Tags: nostr.Tags{{"p", bunker.target.Hex()}},
}
if err := evt.Sign(bunker.clientSecretKey); err != nil {
return "", fmt.Errorf("failed to sign request event: %w", err)
diff --git a/nip49/nip49.go b/nip49/nip49.go
index c8e3708..1dfbd54 100644
--- a/nip49/nip49.go
+++ b/nip49/nip49.go
@@ -2,7 +2,6 @@ package nip49
import (
"crypto/rand"
- "encoding/hex"
"fmt"
"math"
@@ -21,15 +20,7 @@ const (
ClientDoesNotTrackThisData KeySecurityByte = 0x02
)
-func Encrypt(secretKey string, password string, logn uint8, ksb KeySecurityByte) (b32code string, err error) {
- skb, err := hex.DecodeString(secretKey)
- if err != nil || len(skb) != 32 {
- return "", fmt.Errorf("invalid secret key")
- }
- return EncryptBytes(skb, password, logn, ksb)
-}
-
-func EncryptBytes(secretKey []byte, password string, logn uint8, ksb KeySecurityByte) (b32code string, err error) {
+func Encrypt(secretKey [32]byte, password string, logn uint8, ksb KeySecurityByte) (b32code string, err error) {
salt := make([]byte, 16)
if _, err := rand.Read(salt); err != nil {
return "", fmt.Errorf("failed to read salt: %w", err)
@@ -53,7 +44,7 @@ func EncryptBytes(secretKey []byte, password string, logn uint8, ksb KeySecurity
if err != nil {
return "", fmt.Errorf("failed to start xchacha20poly1305: %w", err)
}
- ciphertext := c2p1.Seal(nil, concat[2+16:2+16+24], secretKey, ad)
+ ciphertext := c2p1.Seal(nil, concat[2+16:2+16+24], secretKey[:], ad)
copy(concat[2+16+24+1:], ciphertext)
bits5, err := bech32.ConvertBits(concat, 8, 5, true)
@@ -63,9 +54,9 @@ func EncryptBytes(secretKey []byte, password string, logn uint8, ksb KeySecurity
return bech32.Encode("ncryptsec", bits5)
}
-func Decrypt(bech32string string, password string) (secretKey string, err error) {
+func Decrypt(bech32string string, password string) (secretKey [32]byte, err error) {
secb, err := DecryptToBytes(bech32string, password)
- return hex.EncodeToString(secb), err
+ return [32]byte(secb), err
}
func DecryptToBytes(bech32string string, password string) (secretKey []byte, err error) {
diff --git a/nip59/nip59.go b/nip59/nip59.go
index 0e151bb..bdb1cd6 100644
--- a/nip59/nip59.go
+++ b/nip59/nip59.go
@@ -4,21 +4,21 @@ import (
"fmt"
"math/rand"
- "github.com/mailru/easyjson"
"fiatjaf.com/nostr"
"fiatjaf.com/nostr/nip44"
+ "github.com/mailru/easyjson"
)
// GiftWrap takes a 'rumor', encrypts it with our own key, making a 'seal', then encrypts that with a nonce key and
// signs that (after potentially applying a modify function, which can be nil otherwise), yielding a 'gift-wrap'.
func GiftWrap(
rumor nostr.Event,
- recipient string,
+ recipient nostr.PubKey,
encrypt func(plaintext string) (string, error),
sign func(*nostr.Event) error,
modify func(*nostr.Event),
) (nostr.Event, error) {
- rumor.Sig = ""
+ rumor.Sig = [64]byte{}
rumorCiphertext, err := encrypt(rumor.String())
if err != nil {
@@ -51,7 +51,7 @@ func GiftWrap(
Content: sealCiphertext,
CreatedAt: nostr.Now() - nostr.Timestamp(60*rand.Int63n(600) /* up to 6 hours in the past */),
Tags: nostr.Tags{
- nostr.Tag{"p", recipient},
+ nostr.Tag{"p", recipient.Hex()},
},
}
if modify != nil {
@@ -66,7 +66,7 @@ func GiftWrap(
func GiftUnwrap(
gw nostr.Event,
- decrypt func(otherpubkey, ciphertext string) (string, error),
+ decrypt func(otherpubkey nostr.PubKey, ciphertext string) (string, error),
) (rumor nostr.Event, err error) {
jseal, err := decrypt(gw.PubKey, gw.Content)
if err != nil {
@@ -79,7 +79,7 @@ func GiftUnwrap(
return rumor, fmt.Errorf("seal is invalid json: %w", err)
}
- if ok, _ := seal.CheckSignature(); !ok {
+ if !seal.VerifySignature() {
return rumor, fmt.Errorf("seal signature is invalid")
}
diff --git a/nip60/wallet.go b/nip60/wallet.go
index 391fd4c..22bac56 100644
--- a/nip60/wallet.go
+++ b/nip60/wallet.go
@@ -50,7 +50,7 @@ type Wallet struct {
func LoadWallet(
ctx context.Context,
kr nostr.Keyer,
- pool *nostr.SimplePool,
+ pool *nostr.Pool,
relays []string,
) *Wallet {
return loadWalletFromPool(ctx, kr, pool, relays, false)
@@ -59,7 +59,7 @@ func LoadWallet(
func LoadWalletWithHistory(
ctx context.Context,
kr nostr.Keyer,
- pool *nostr.SimplePool,
+ pool *nostr.Pool,
relays []string,
) *Wallet {
return loadWalletFromPool(ctx, kr, pool, relays, true)
@@ -68,7 +68,7 @@ func LoadWalletWithHistory(
func loadWalletFromPool(
ctx context.Context,
kr nostr.Keyer,
- pool *nostr.SimplePool,
+ pool *nostr.Pool,
relays []string,
withHistory bool,
) *Wallet {
diff --git a/nip61/nip61.go b/nip61/nip61.go
index 330a473..746370b 100644
--- a/nip61/nip61.go
+++ b/nip61/nip61.go
@@ -19,7 +19,7 @@ func SendNutzap(
ctx context.Context,
kr nostr.Keyer,
w *nip60.Wallet,
- pool *nostr.SimplePool,
+ pool *nostr.Pool,
targetUserPublickey string,
getUserReadRelays func(context.Context, string, int) []string,
relays []string,
diff --git a/nip77/idsonly.go b/nip77/idsonly.go
index 98ad058..3ac87ee 100644
--- a/nip77/idsonly.go
+++ b/nip77/idsonly.go
@@ -14,13 +14,13 @@ func FetchIDsOnly(
url string,
filter nostr.Filter,
) (<-chan nostr.ID, error) {
- id := "go-nostr-tmp" // for now we can't have more than one subscription in the same connection
+ id := "nl-tmp" // for now we can't have more than one subscription in the same connection
neg := negentropy.New(empty.Empty{}, 1024*1024)
result := make(chan error)
var r *nostr.Relay
- r, err := nostr.RelayConnect(ctx, url, nostr.WithCustomHandler(func(data string) {
+ r, err := nostr.RelayConnect(ctx, url, nostr.RelayOptions{CustomHandler: func(data string) {
envelope := ParseNegMessage(data)
if envelope == nil {
return
@@ -44,7 +44,7 @@ func FetchIDsOnly(
r.Write(msgb)
}
}
- }))
+ }})
if err != nil {
return nil, err
}
diff --git a/nip86/methods.go b/nip86/methods.go
index 64d1759..f284eb9 100644
--- a/nip86/methods.go
+++ b/nip86/methods.go
@@ -16,10 +16,15 @@ func DecodeRequest(req Request) (MethodParams, error) {
if len(req.Params) == 0 {
return nil, fmt.Errorf("invalid number of params for '%s'", req.Method)
}
- pk, ok := req.Params[0].(string)
- if !ok || !nostr.IsValidPublicKey(pk) {
+ pkh, ok := req.Params[0].(string)
+ if !ok {
+ return nil, fmt.Errorf("missing pubkey param for '%s'", req.Method)
+ }
+ pk, err := nostr.PubKeyFromHex(pkh)
+ if err != nil {
return nil, fmt.Errorf("invalid pubkey param for '%s'", req.Method)
}
+
var reason string
if len(req.Params) >= 2 {
reason, _ = req.Params[1].(string)
@@ -31,10 +36,15 @@ func DecodeRequest(req Request) (MethodParams, error) {
if len(req.Params) == 0 {
return nil, fmt.Errorf("invalid number of params for '%s'", req.Method)
}
- pk, ok := req.Params[0].(string)
- if !ok || !nostr.IsValidPublicKey(pk) {
+ pkh, ok := req.Params[0].(string)
+ if !ok {
+ return nil, fmt.Errorf("missing pubkey param for '%s'", req.Method)
+ }
+ pk, err := nostr.PubKeyFromHex(pkh)
+ if err != nil {
return nil, fmt.Errorf("invalid pubkey param for '%s'", req.Method)
}
+
var reason string
if len(req.Params) >= 2 {
reason, _ = req.Params[1].(string)
@@ -48,10 +58,15 @@ func DecodeRequest(req Request) (MethodParams, error) {
if len(req.Params) == 0 {
return nil, fmt.Errorf("invalid number of params for '%s'", req.Method)
}
- id, ok := req.Params[0].(string)
- if !ok || !nostr.IsValid32ByteHex(id) {
+ idh, ok := req.Params[0].(string)
+ if !ok {
+ return nil, fmt.Errorf("missing id param for '%s'", req.Method)
+ }
+ id, err := nostr.IDFromHex(idh)
+ if err != nil {
return nil, fmt.Errorf("invalid id param for '%s'", req.Method)
}
+
var reason string
if len(req.Params) >= 2 {
reason, _ = req.Params[1].(string)
@@ -61,10 +76,15 @@ func DecodeRequest(req Request) (MethodParams, error) {
if len(req.Params) == 0 {
return nil, fmt.Errorf("invalid number of params for '%s'", req.Method)
}
- id, ok := req.Params[0].(string)
- if !ok || !nostr.IsValid32ByteHex(id) {
+ idh, ok := req.Params[0].(string)
+ if !ok {
+ return nil, fmt.Errorf("missing id param for '%s'", req.Method)
+ }
+ id, err := nostr.IDFromHex(idh)
+ if err != nil {
return nil, fmt.Errorf("invalid id param for '%s'", req.Method)
}
+
var reason string
if len(req.Params) >= 2 {
reason, _ = req.Params[1].(string)
@@ -149,11 +169,19 @@ func DecodeRequest(req Request) (MethodParams, error) {
return nil, fmt.Errorf("invalid number of params for '%s'", req.Method)
}
- pubkey := req.Params[0].(string)
+ pkh, ok := req.Params[0].(string)
+ if !ok {
+ return nil, fmt.Errorf("missing pubkey param for '%s'", req.Method)
+ }
+ pk, err := nostr.PubKeyFromHex(pkh)
+ if err != nil {
+ return nil, fmt.Errorf("invalid pubkey param for '%s'", req.Method)
+ }
+
allowedMethods := req.Params[1].([]string)
return GrantAdmin{
- Pubkey: pubkey,
+ Pubkey: pk,
AllowMethods: allowedMethods,
}, nil
case "revokeadmin":
@@ -161,11 +189,19 @@ func DecodeRequest(req Request) (MethodParams, error) {
return nil, fmt.Errorf("invalid number of params for '%s'", req.Method)
}
- pubkey := req.Params[0].(string)
+ pkh, ok := req.Params[0].(string)
+ if !ok {
+ return nil, fmt.Errorf("missing pubkey param for '%s'", req.Method)
+ }
+ pk, err := nostr.PubKeyFromHex(pkh)
+ if err != nil {
+ return nil, fmt.Errorf("invalid pubkey param for '%s'", req.Method)
+ }
+
disallowedMethods := req.Params[1].([]string)
return RevokeAdmin{
- Pubkey: pubkey,
+ Pubkey: pk,
DisallowMethods: disallowedMethods,
}, nil
case "stats":
@@ -210,7 +246,7 @@ type SupportedMethods struct{}
func (SupportedMethods) MethodName() string { return "supportedmethods" }
type BanPubKey struct {
- PubKey string
+ PubKey nostr.PubKey
Reason string
}
@@ -221,7 +257,7 @@ type ListBannedPubKeys struct{}
func (ListBannedPubKeys) MethodName() string { return "listbannedpubkeys" }
type AllowPubKey struct {
- PubKey string
+ PubKey nostr.PubKey
Reason string
}
@@ -236,14 +272,14 @@ type ListEventsNeedingModeration struct{}
func (ListEventsNeedingModeration) MethodName() string { return "listeventsneedingmoderation" }
type AllowEvent struct {
- ID string
+ ID nostr.ID
Reason string
}
func (AllowEvent) MethodName() string { return "allowevent" }
type BanEvent struct {
- ID string
+ ID nostr.ID
Reason string
}
@@ -314,14 +350,14 @@ type ListDisallowedKinds struct{}
func (ListDisallowedKinds) MethodName() string { return "listdisallowedkinds" }
type GrantAdmin struct {
- Pubkey string
+ Pubkey nostr.PubKey
AllowMethods []string
}
func (GrantAdmin) MethodName() string { return "grantadmin" }
type RevokeAdmin struct {
- Pubkey string
+ Pubkey nostr.PubKey
DisallowMethods []string
}
diff --git a/paginator.go b/paginator.go
index 044a8c5..7ff16e7 100644
--- a/paginator.go
+++ b/paginator.go
@@ -7,7 +7,7 @@ import (
"time"
)
-func (pool *SimplePool) PaginatorWithInterval(
+func (pool *Pool) PaginatorWithInterval(
interval time.Duration,
) func(ctx context.Context, urls []string, filter Filter, opts ...SubscriptionOption) chan RelayEvent {
return func(ctx context.Context, urls []string, filter Filter, opts ...SubscriptionOption) chan RelayEvent {
diff --git a/pool.go b/pool.go
index b006f74..351dd36 100644
--- a/pool.go
+++ b/pool.go
@@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"math"
- "net/http"
"slices"
"strings"
"sync"
@@ -20,22 +19,22 @@ const (
seenAlreadyDropTick = time.Minute
)
-// SimplePool manages connections to multiple relays, ensures they are reopened when necessary and not duplicated.
-type SimplePool struct {
+// Pool manages connections to multiple relays, ensures they are reopened when necessary and not duplicated.
+type Pool struct {
Relays *xsync.MapOf[string, *Relay]
Context context.Context
- authHandler func(context.Context, RelayEvent) error
+ authHandler func(context.Context, *Event) error
cancel context.CancelCauseFunc
eventMiddleware func(RelayEvent)
duplicateMiddleware func(relay string, id ID)
queryMiddleware func(relay string, pubkey PubKey, kind uint16)
+ relayOptions RelayOptions
// custom things not often used
penaltyBoxMu sync.Mutex
penaltyBox map[string][2]float64
- relayOptions []RelayOption
}
// DirectedFilter combines a Filter with a specific relay URL.
@@ -44,64 +43,58 @@ type DirectedFilter struct {
Relay string
}
-// RelayEvent represents an event received from a specific relay.
-type RelayEvent struct {
- *Event
- Relay *Relay
-}
-
func (ie RelayEvent) String() string { return fmt.Sprintf("[%s] >> %s", ie.Relay.URL, ie.Event) }
-// PoolOption is an interface for options that can be applied to a SimplePool.
-type PoolOption interface {
- ApplyPoolOption(*SimplePool)
-}
+// NewPool creates a new Pool with the given context and options.
+func NewPool(opts PoolOptions) *Pool {
+ ctx, cancel := context.WithCancelCause(context.Background())
-// NewSimplePool creates a new SimplePool with the given context and options.
-func NewSimplePool(ctx context.Context, opts ...PoolOption) *SimplePool {
- ctx, cancel := context.WithCancelCause(ctx)
-
- pool := &SimplePool{
+ pool := &Pool{
Relays: xsync.NewMapOf[string, *Relay](),
Context: ctx,
cancel: cancel,
+
+ authHandler: opts.AuthHandler,
+ eventMiddleware: opts.EventMiddleware,
+ duplicateMiddleware: opts.DuplicateMiddleware,
+ queryMiddleware: opts.AuthorKindQueryMiddleware,
+ relayOptions: opts.RelayOptions,
}
- for _, opt := range opts {
- opt.ApplyPoolOption(pool)
+ if opts.PenaltyBox {
+ go pool.startPenaltyBox()
}
return pool
}
-// WithRelayOptions sets options that will be used on every relay instance created by this pool.
-func WithRelayOptions(ropts ...RelayOption) withRelayOptionsOpt {
- return ropts
+type PoolOptions struct {
+ // AuthHandler, if given, must be a function that signs the auth event when called.
+ // it will be called whenever any relay in the pool returns a `CLOSED` message
+ // with the "auth-required:" prefix, only once for each relay
+ AuthHandler func(context.Context, *Event) error
+
+ // PenaltyBox just sets the penalty box mechanism so relays that fail to connect
+ // or that disconnect will be ignored for a while and we won't attempt to connect again.
+ PenaltyBox bool
+
+ // EventMiddleware is a function that will be called with all events received.
+ EventMiddleware func(RelayEvent)
+
+ // DuplicateMiddleware is a function that will be called with all duplicate ids received.
+ DuplicateMiddleware func(relay string, id ID)
+
+ // AuthorKindQueryMiddleware is a function that will be called with every combination of
+ // relay+pubkey+kind queried in a .SubscribeMany*() call -- when applicable (i.e. when the query
+ // contains a pubkey and a kind).
+ AuthorKindQueryMiddleware func(relay string, pubkey PubKey, kind uint16)
+
+ // RelayOptions are any options that should be passed to Relays instantiated by this pool
+ RelayOptions RelayOptions
}
-type withRelayOptionsOpt []RelayOption
-
-func (h withRelayOptionsOpt) ApplyPoolOption(pool *SimplePool) {
- pool.relayOptions = h
-}
-
-// WithAuthHandler must be a function that signs the auth event when called.
-// it will be called whenever any relay in the pool returns a `CLOSED` message
-// with the "auth-required:" prefix, only once for each relay
-type WithAuthHandler func(ctx context.Context, authEvent RelayEvent) error
-
-func (h WithAuthHandler) ApplyPoolOption(pool *SimplePool) {
- pool.authHandler = h
-}
-
-// WithPenaltyBox just sets the penalty box mechanism so relays that fail to connect
-// or that disconnect will be ignored for a while and we won't attempt to connect again.
-func WithPenaltyBox() withPenaltyBoxOpt { return withPenaltyBoxOpt{} }
-
-type withPenaltyBoxOpt struct{}
-
-func (h withPenaltyBoxOpt) ApplyPoolOption(pool *SimplePool) {
+func (pool *Pool) startPenaltyBox() {
pool.penaltyBox = make(map[string][2]float64)
go func() {
sleep := 30.0
@@ -131,38 +124,9 @@ func (h withPenaltyBoxOpt) ApplyPoolOption(pool *SimplePool) {
}()
}
-// WithEventMiddleware is a function that will be called with all events received.
-type WithEventMiddleware func(RelayEvent)
-
-func (h WithEventMiddleware) ApplyPoolOption(pool *SimplePool) {
- pool.eventMiddleware = h
-}
-
-// WithDuplicateMiddleware is a function that will be called with all duplicate ids received.
-type WithDuplicateMiddleware func(relay string, id ID)
-
-func (h WithDuplicateMiddleware) ApplyPoolOption(pool *SimplePool) {
- pool.duplicateMiddleware = h
-}
-
-// WithAuthorKindQueryMiddleware is a function that will be called with every combination of relay+pubkey+kind queried
-// in a .SubMany*() call -- when applicable (i.e. when the query contains a pubkey and a kind).
-type WithAuthorKindQueryMiddleware func(relay string, pubkey PubKey, kind uint16)
-
-func (h WithAuthorKindQueryMiddleware) ApplyPoolOption(pool *SimplePool) {
- pool.queryMiddleware = h
-}
-
-var (
- _ PoolOption = (WithAuthHandler)(nil)
- _ PoolOption = (WithEventMiddleware)(nil)
- _ PoolOption = WithPenaltyBox()
- _ PoolOption = WithRelayOptions(WithRequestHeader(http.Header{}))
-)
-
// EnsureRelay ensures that a relay connection exists and is active.
// If the relay is not connected, it attempts to connect.
-func (pool *SimplePool) EnsureRelay(url string) (*Relay, error) {
+func (pool *Pool) EnsureRelay(url string) (*Relay, error) {
nm := NormalizeURL(url)
defer namedLock(nm)()
@@ -190,7 +154,7 @@ func (pool *SimplePool) EnsureRelay(url string) (*Relay, error) {
)
defer cancel()
- relay = NewRelay(context.Background(), url, pool.relayOptions...)
+ relay = NewRelay(pool.Context, url, pool.relayOptions)
if err := relay.Connect(ctx); err != nil {
if pool.penaltyBox != nil {
// putting relay in penalty box
@@ -214,7 +178,7 @@ type PublishResult struct {
}
// PublishMany publishes an event to multiple relays and returns a channel of results emitted as they're received.
-func (pool *SimplePool) PublishMany(ctx context.Context, urls []string, evt Event) chan PublishResult {
+func (pool *Pool) PublishMany(ctx context.Context, urls []string, evt Event) chan PublishResult {
ch := make(chan PublishResult, len(urls))
wg := sync.WaitGroup{}
@@ -235,9 +199,7 @@ func (pool *SimplePool) PublishMany(ctx context.Context, urls []string, evt Even
ch <- PublishResult{nil, url, relay}
} else if strings.HasPrefix(err.Error(), "msg: auth-required:") && pool.authHandler != nil {
// try to authenticate if we can
- if authErr := relay.Auth(ctx, func(event *Event) error {
- return pool.authHandler(ctx, RelayEvent{Event: event, Relay: relay})
- }); authErr == nil {
+ if authErr := relay.Auth(ctx, pool.authHandler); authErr == nil {
if err := relay.Publish(ctx, evt); err == nil {
// success after auth
ch <- PublishResult{nil, url, relay}
@@ -265,36 +227,46 @@ func (pool *SimplePool) PublishMany(ctx context.Context, urls []string, evt Even
// SubscribeMany opens a subscription with the given filter to multiple relays
// the subscriptions ends when the context is canceled or when all relays return a CLOSED.
-func (pool *SimplePool) SubscribeMany(
+func (pool *Pool) SubscribeMany(
ctx context.Context,
urls []string,
filter Filter,
- opts ...SubscriptionOption,
+ opts SubscriptionOptions,
) chan RelayEvent {
- return pool.subMany(ctx, urls, filter, nil, opts...)
+ return pool.subMany(ctx, urls, filter, nil, opts)
}
// FetchMany opens a subscription, much like SubscribeMany, but it ends as soon as all Relays
// return an EOSE message.
-func (pool *SimplePool) FetchMany(
+func (pool *Pool) FetchMany(
ctx context.Context,
urls []string,
filter Filter,
- opts ...SubscriptionOption,
+ opts SubscriptionOptions,
) chan RelayEvent {
- return pool.SubManyEose(ctx, urls, filter, opts...)
+ seenAlready := xsync.NewMapOf[ID, struct{}]()
+
+ opts.CheckDuplicate = func(id ID, relay string) bool {
+ _, exists := seenAlready.LoadOrStore(id, struct{}{})
+ if exists && pool.duplicateMiddleware != nil {
+ pool.duplicateMiddleware(relay, id)
+ }
+ return exists
+ }
+
+ return pool.subManyEoseNonOverwriteCheckDuplicate(ctx, urls, filter, opts)
}
// SubscribeManyNotifyEOSE is like SubscribeMany, but takes a channel that is closed when
// all subscriptions have received an EOSE
-func (pool *SimplePool) SubscribeManyNotifyEOSE(
+func (pool *Pool) SubscribeManyNotifyEOSE(
ctx context.Context,
urls []string,
filter Filter,
eoseChan chan struct{},
- opts ...SubscriptionOption,
+ opts SubscriptionOptions,
) chan RelayEvent {
- return pool.subMany(ctx, urls, filter, eoseChan, opts...)
+ return pool.subMany(ctx, urls, filter, eoseChan, opts)
}
type ReplaceableKey struct {
@@ -304,21 +276,21 @@ type ReplaceableKey struct {
// FetchManyReplaceable is like FetchMany, but deduplicates replaceable and addressable events and returns
// only the latest for each "d" tag.
-func (pool *SimplePool) FetchManyReplaceable(
+func (pool *Pool) FetchManyReplaceable(
ctx context.Context,
urls []string,
filter Filter,
- opts ...SubscriptionOption,
-) *xsync.MapOf[ReplaceableKey, *Event] {
+ opts SubscriptionOptions,
+) *xsync.MapOf[ReplaceableKey, Event] {
ctx, cancel := context.WithCancelCause(ctx)
- results := xsync.NewMapOf[ReplaceableKey, *Event]()
+ results := xsync.NewMapOf[ReplaceableKey, Event]()
wg := sync.WaitGroup{}
wg.Add(len(urls))
seenAlreadyLatest := xsync.NewMapOf[ReplaceableKey, Timestamp]()
- opts = append(opts, WithCheckDuplicateReplaceable(func(rk ReplaceableKey, ts Timestamp) bool {
+ opts.CheckDuplicateReplaceable = func(rk ReplaceableKey, ts Timestamp) bool {
updated := false
seenAlreadyLatest.Compute(rk, func(latest Timestamp, _ bool) (newValue Timestamp, delete bool) {
if ts > latest {
@@ -328,7 +300,7 @@ func (pool *SimplePool) FetchManyReplaceable(
return latest, false // the one we had was already more recent
})
return updated
- }))
+ }
for _, url := range urls {
go func(nm string) {
@@ -353,7 +325,7 @@ func (pool *SimplePool) FetchManyReplaceable(
hasAuthed := false
subscribe:
- sub, err := relay.Subscribe(ctx, filter, opts...)
+ sub, err := relay.Subscribe(ctx, filter, opts)
if err != nil {
debugLogf("error subscribing to %s with %v: %s", relay, filter, err)
return
@@ -368,9 +340,7 @@ func (pool *SimplePool) FetchManyReplaceable(
case reason := <-sub.ClosedReason:
if strings.HasPrefix(reason, "auth-required:") && pool.authHandler != nil && !hasAuthed {
// relay is requesting auth. if we can we will perform auth and try again
- err := relay.Auth(ctx, func(event *Event) error {
- return pool.authHandler(ctx, RelayEvent{Event: event, Relay: relay})
- })
+ err := relay.Auth(ctx, pool.authHandler)
if err == nil {
hasAuthed = true // so we don't keep doing AUTH again and again
goto subscribe
@@ -401,12 +371,12 @@ func (pool *SimplePool) FetchManyReplaceable(
return results
}
-func (pool *SimplePool) subMany(
+func (pool *Pool) subMany(
ctx context.Context,
urls []string,
filter Filter,
eoseChan chan struct{},
- opts ...SubscriptionOption,
+ opts SubscriptionOptions,
) chan RelayEvent {
ctx, cancel := context.WithCancelCause(ctx)
_ = cancel // do this so `go vet` will stop complaining
@@ -423,6 +393,14 @@ func (pool *SimplePool) subMany(
}()
}
+ opts.CheckDuplicate = func(id ID, relay string) bool {
+ _, exists := seenAlready.Load(id)
+ if exists && pool.duplicateMiddleware != nil {
+ pool.duplicateMiddleware(relay, id)
+ }
+ return exists
+ }
+
pending := xsync.NewCounter()
pending.Add(int64(len(urls)))
for i, url := range urls {
@@ -485,15 +463,7 @@ func (pool *SimplePool) subMany(
hasAuthed = false
subscribe:
- sub, err = relay.Subscribe(ctx, filter, append(opts,
- WithCheckDuplicate(func(id ID, relay string) bool {
- _, exists := seenAlready.Load(id)
- if exists && pool.duplicateMiddleware != nil {
- pool.duplicateMiddleware(relay, id)
- }
- return exists
- }),
- )...)
+ sub, err = relay.Subscribe(ctx, filter, opts)
if err != nil {
debugLogf("%s reconnecting because subscription died\n", nm)
goto reconnect
@@ -546,9 +516,7 @@ func (pool *SimplePool) subMany(
case reason := <-sub.ClosedReason:
if strings.HasPrefix(reason, "auth-required:") && pool.authHandler != nil && !hasAuthed {
// relay is requesting auth. if we can we will perform auth and try again
- err := relay.Auth(ctx, func(event *Event) error {
- return pool.authHandler(ctx, RelayEvent{Event: event, Relay: relay})
- })
+ err := relay.Auth(ctx, pool.authHandler)
if err == nil {
hasAuthed = true // so we don't keep doing AUTH again and again
goto subscribe
@@ -575,32 +543,11 @@ func (pool *SimplePool) subMany(
return events
}
-// Deprecated: SubManyEose is deprecated: use FetchMany instead.
-func (pool *SimplePool) SubManyEose(
+func (pool *Pool) subManyEoseNonOverwriteCheckDuplicate(
ctx context.Context,
urls []string,
filter Filter,
- opts ...SubscriptionOption,
-) chan RelayEvent {
- seenAlready := xsync.NewMapOf[ID, struct{}]()
- return pool.subManyEoseNonOverwriteCheckDuplicate(ctx, urls, filter,
- WithCheckDuplicate(func(id ID, relay string) bool {
- _, exists := seenAlready.LoadOrStore(id, struct{}{})
- if exists && pool.duplicateMiddleware != nil {
- pool.duplicateMiddleware(relay, id)
- }
- return exists
- }),
- opts...,
- )
-}
-
-func (pool *SimplePool) subManyEoseNonOverwriteCheckDuplicate(
- ctx context.Context,
- urls []string,
- filter Filter,
- wcd WithCheckDuplicate,
- opts ...SubscriptionOption,
+ opts SubscriptionOptions,
) chan RelayEvent {
ctx, cancel := context.WithCancelCause(ctx)
@@ -608,8 +555,6 @@ func (pool *SimplePool) subManyEoseNonOverwriteCheckDuplicate(
wg := sync.WaitGroup{}
wg.Add(len(urls))
- opts = append(opts, wcd)
-
go func() {
// this will happen when all subscriptions get an eose (or when they die)
wg.Wait()
@@ -640,7 +585,7 @@ func (pool *SimplePool) subManyEoseNonOverwriteCheckDuplicate(
hasAuthed := false
subscribe:
- sub, err := relay.Subscribe(ctx, filter, opts...)
+ sub, err := relay.Subscribe(ctx, filter, opts)
if err != nil {
debugLogf("error subscribing to %s with %v: %s", relay, filter, err)
return
@@ -655,9 +600,7 @@ func (pool *SimplePool) subManyEoseNonOverwriteCheckDuplicate(
case reason := <-sub.ClosedReason:
if strings.HasPrefix(reason, "auth-required:") && pool.authHandler != nil && !hasAuthed {
// relay is requesting auth. if we can we will perform auth and try again
- err := relay.Auth(ctx, func(event *Event) error {
- return pool.authHandler(ctx, RelayEvent{Event: event, Relay: relay})
- })
+ err := relay.Auth(ctx, pool.authHandler)
if err == nil {
hasAuthed = true // so we don't keep doing AUTH again and again
goto subscribe
@@ -689,11 +632,11 @@ func (pool *SimplePool) subManyEoseNonOverwriteCheckDuplicate(
}
// CountMany aggregates count results from multiple relays using NIP-45 HyperLogLog
-func (pool *SimplePool) CountMany(
+func (pool *Pool) CountMany(
ctx context.Context,
urls []string,
filter Filter,
- opts []SubscriptionOption,
+ opts SubscriptionOptions,
) int {
hll := hyperloglog.New(0) // offset is irrelevant here
@@ -706,7 +649,7 @@ func (pool *SimplePool) CountMany(
if err != nil {
return
}
- ce, err := relay.countInternal(ctx, filter, opts...)
+ ce, err := relay.countInternal(ctx, filter, opts)
if err != nil {
return
}
@@ -722,14 +665,14 @@ func (pool *SimplePool) CountMany(
}
// QuerySingle returns the first event returned by the first relay, cancels everything else.
-func (pool *SimplePool) QuerySingle(
+func (pool *Pool) QuerySingle(
ctx context.Context,
urls []string,
filter Filter,
- opts ...SubscriptionOption,
+ opts SubscriptionOptions,
) *RelayEvent {
ctx, cancel := context.WithCancelCause(ctx)
- for ievt := range pool.SubManyEose(ctx, urls, filter, opts...) {
+ for ievt := range pool.FetchMany(ctx, urls, filter, opts) {
cancel(errors.New("got the first event and ended successfully"))
return &ievt
}
@@ -738,28 +681,30 @@ func (pool *SimplePool) QuerySingle(
}
// BatchedSubManyEose performs batched subscriptions to multiple relays with different filters.
-func (pool *SimplePool) BatchedSubManyEose(
+func (pool *Pool) BatchedSubManyEose(
ctx context.Context,
dfs []DirectedFilter,
- opts ...SubscriptionOption,
+ opts SubscriptionOptions,
) chan RelayEvent {
res := make(chan RelayEvent)
wg := sync.WaitGroup{}
wg.Add(len(dfs))
seenAlready := xsync.NewMapOf[ID, struct{}]()
+ opts.CheckDuplicate = func(id ID, relay string) bool {
+ _, exists := seenAlready.LoadOrStore(id, struct{}{})
+ if exists && pool.duplicateMiddleware != nil {
+ pool.duplicateMiddleware(relay, id)
+ }
+ return exists
+ }
+
for _, df := range dfs {
go func(df DirectedFilter) {
for ie := range pool.subManyEoseNonOverwriteCheckDuplicate(ctx,
[]string{df.Relay},
df.Filter,
- WithCheckDuplicate(func(id ID, relay string) bool {
- _, exists := seenAlready.LoadOrStore(id, struct{}{})
- if exists && pool.duplicateMiddleware != nil {
- pool.duplicateMiddleware(relay, id)
- }
- return exists
- }), opts...,
+ opts,
) {
select {
case res <- ie:
@@ -781,6 +726,6 @@ func (pool *SimplePool) BatchedSubManyEose(
}
// Close closes the pool with the given reason.
-func (pool *SimplePool) Close(reason string) {
+func (pool *Pool) Close(reason string) {
pool.cancel(fmt.Errorf("pool closed with reason: '%s'", reason))
}
diff --git a/relay.go b/relay.go
index 0bc3123..9f5fd69 100644
--- a/relay.go
+++ b/relay.go
@@ -51,7 +51,7 @@ type writeRequest struct {
}
// NewRelay returns a new relay. It takes a context that, when canceled, will close the relay connection.
-func NewRelay(ctx context.Context, url string, opts ...RelayOption) *Relay {
+func NewRelay(ctx context.Context, url string, opts RelayOptions) *Relay {
ctx, cancel := context.WithCancelCause(ctx)
r := &Relay{
URL: NormalizeURL(url),
@@ -64,10 +64,6 @@ func NewRelay(ctx context.Context, url string, opts ...RelayOption) *Relay {
requestHeader: nil,
}
- for _, opt := range opts {
- opt.ApplyRelayOption(r)
- }
-
return r
}
@@ -77,44 +73,23 @@ func NewRelay(ctx context.Context, url string, opts ...RelayOption) *Relay {
//
// The ongoing relay connection uses a background context. To close the connection, call r.Close().
// If you need fine grained long-term connection contexts, use NewRelay() instead.
-func RelayConnect(ctx context.Context, url string, opts ...RelayOption) (*Relay, error) {
- r := NewRelay(context.Background(), url, opts...)
+func RelayConnect(ctx context.Context, url string, opts RelayOptions) (*Relay, error) {
+ r := NewRelay(context.Background(), url, opts)
err := r.Connect(ctx)
return r, err
}
-// RelayOption is the type of the argument passed when instantiating relay connections.
-type RelayOption interface {
- ApplyRelayOption(*Relay)
-}
+type RelayOptions struct {
+ // NoticeHandler just takes notices and is expected to do something with them.
+ // When not given defaults to logging the notices.
+ NoticeHandler func(notice string)
-var (
- _ RelayOption = (WithNoticeHandler)(nil)
- _ RelayOption = (WithCustomHandler)(nil)
- _ RelayOption = (WithRequestHeader)(nil)
-)
+ // CustomHandler, if given, must be a function that handles any relay message
+ // that couldn't be parsed as a standard envelope.
+ CustomHandler func(data string)
-// WithNoticeHandler just takes notices and is expected to do something with them.
-// when not given, defaults to logging the notices.
-type WithNoticeHandler func(notice string)
-
-func (nh WithNoticeHandler) ApplyRelayOption(r *Relay) {
- r.noticeHandler = nh
-}
-
-// WithCustomHandler must be a function that handles any relay message that couldn't be
-// parsed as a standard envelope.
-type WithCustomHandler func(data string)
-
-func (ch WithCustomHandler) ApplyRelayOption(r *Relay) {
- r.customHandler = ch
-}
-
-// WithRequestHeader sets the HTTP request header of the websocket preflight request.
-type WithRequestHeader http.Header
-
-func (ch WithRequestHeader) ApplyRelayOption(r *Relay) {
- r.requestHeader = http.Header(ch)
+ // RequestHeader sets the HTTP request header of the websocket preflight request
+ RequestHeader http.Header
}
// String just returns the relay URL.
@@ -273,21 +248,21 @@ func (r *Relay) ConnectWithTLS(ctx context.Context, tlsConfig *tls.Config) error
continue
} else {
// check if the event matches the desired filter, ignore otherwise
- if !sub.match(&env.Event) {
- InfoLogger.Printf("{%s} filter does not match: %v ~ %v\n", r.URL, sub.Filters, env.Event)
+ if !sub.match(env.Event) {
+ InfoLogger.Printf("{%s} filter does not match: %v ~ %v\n", r.URL, sub.Filter, env.Event)
continue
}
// check signature, ignore invalid, except from trusted (AssumeValid) relays
if !r.AssumeValid {
- if ok, _ := env.Event.CheckSignature(); !ok {
+ if !env.Event.VerifySignature() {
InfoLogger.Printf("{%s} bad signature on %s\n", r.URL, env.Event.ID)
continue
}
}
// dispatch this to the internal .events channel of the subscription
- sub.dispatchEvent(&env.Event)
+ sub.dispatchEvent(env.Event)
}
case *EOSEEnvelope:
if subscription, ok := r.Subscriptions.Load(subIdToSerial(string(*env))); ok {
@@ -334,7 +309,7 @@ func (r *Relay) Publish(ctx context.Context, event Event) error {
//
// You don't have to build the AUTH event yourself, this function takes a function to which the
// event that must be signed will be passed, so it's only necessary to sign that.
-func (r *Relay) Auth(ctx context.Context, sign func(event *Event) error) error {
+func (r *Relay) Auth(ctx context.Context, sign func(context.Context, *Event) error) error {
authEvent := Event{
CreatedAt: Now(),
Kind: KindClientAuthentication,
@@ -344,7 +319,7 @@ func (r *Relay) Auth(ctx context.Context, sign func(event *Event) error) error {
},
Content: "",
}
- if err := sign(&authEvent); err != nil {
+ if err := sign(ctx, &authEvent); err != nil {
return fmt.Errorf("error signing auth event: %w", err)
}
@@ -404,15 +379,15 @@ func (r *Relay) publish(ctx context.Context, id ID, env Envelope) error {
//
// Remember to cancel subscriptions, either by calling `.Unsub()` on them or ensuring their `context.Context` will be canceled at some point.
// Failure to do that will result in a huge number of halted goroutines being created.
-func (r *Relay) Subscribe(ctx context.Context, filters Filters, opts ...SubscriptionOption) (*Subscription, error) {
- sub := r.PrepareSubscription(ctx, filters, opts...)
+func (r *Relay) Subscribe(ctx context.Context, filter Filter, opts SubscriptionOptions) (*Subscription, error) {
+ sub := r.PrepareSubscription(ctx, filter, opts)
if r.Connection == nil {
return nil, fmt.Errorf("not connected to %s", r.URL)
}
if err := sub.Fire(); err != nil {
- return nil, fmt.Errorf("couldn't subscribe to %v at %s: %w", filters, r.URL, err)
+ return nil, fmt.Errorf("couldn't subscribe to %v at %s: %w", filter, r.URL, err)
}
return sub, nil
@@ -422,7 +397,7 @@ func (r *Relay) Subscribe(ctx context.Context, filters Filters, opts ...Subscrip
//
// Remember to cancel subscriptions, either by calling `.Unsub()` on them or ensuring their `context.Context` will be canceled at some point.
// Failure to do that will result in a huge number of halted goroutines being created.
-func (r *Relay) PrepareSubscription(ctx context.Context, filters Filters, opts ...SubscriptionOption) *Subscription {
+func (r *Relay) PrepareSubscription(ctx context.Context, filter Filter, opts SubscriptionOptions) *Subscription {
current := subscriptionIDCounter.Add(1)
ctx, cancel := context.WithCancelCause(ctx)
@@ -431,30 +406,21 @@ func (r *Relay) PrepareSubscription(ctx context.Context, filters Filters, opts .
Context: ctx,
cancel: cancel,
counter: current,
- Events: make(chan *Event),
+ Events: make(chan Event),
EndOfStoredEvents: make(chan struct{}, 1),
ClosedReason: make(chan string, 1),
- Filters: filters,
- match: filters.Match,
+ Filter: filter,
+ match: filter.Matches,
}
- label := ""
- for _, opt := range opts {
- switch o := opt.(type) {
- case WithLabel:
- label = string(o)
- case WithCheckDuplicate:
- sub.checkDuplicate = o
- case WithCheckDuplicateReplaceable:
- sub.checkDuplicateReplaceable = o
- }
- }
+ sub.checkDuplicate = opts.CheckDuplicate
+ sub.checkDuplicateReplaceable = opts.CheckDuplicateReplaceable
// subscription id computation
buf := subIdPool.Get().([]byte)[:0]
buf = strconv.AppendInt(buf, sub.counter, 10)
buf = append(buf, ':')
- buf = append(buf, label...)
+ buf = append(buf, opts.Label...)
defer subIdPool.Put(buf)
sub.id = string(buf)
@@ -467,63 +433,13 @@ func (r *Relay) PrepareSubscription(ctx context.Context, filters Filters, opts .
return sub
}
-// QueryEvents subscribes to events matching the given filter and returns a channel of events.
-//
-// In most cases it's better to use SimplePool instead of this method.
-func (r *Relay) QueryEvents(ctx context.Context, filter Filter) (chan *Event, error) {
- sub, err := r.Subscribe(ctx, Filters{filter})
- if err != nil {
- return nil, err
- }
-
- go func() {
- for {
- select {
- case <-sub.ClosedReason:
- case <-sub.EndOfStoredEvents:
- case <-ctx.Done():
- case <-r.Context().Done():
- }
- sub.unsub(errors.New("QueryEvents() ended"))
- return
- }
- }()
-
- return sub.Events, nil
-}
-
-// QuerySync subscribes to events matching the given filter and returns a slice of events.
-// This method blocks until all events are received or the context is canceled.
-//
-// In most cases it's better to use SimplePool instead of this method.
-func (r *Relay) QuerySync(ctx context.Context, filter Filter) ([]*Event, error) {
- if _, ok := ctx.Deadline(); !ok {
- // if no timeout is set, force it to 7 seconds
- var cancel context.CancelFunc
- ctx, cancel = context.WithTimeoutCause(ctx, 7*time.Second, errors.New("QuerySync() took too long"))
- defer cancel()
- }
-
- events := make([]*Event, 0, max(filter.Limit, 250))
- ch, err := r.QueryEvents(ctx, filter)
- if err != nil {
- return nil, err
- }
-
- for evt := range ch {
- events = append(events, evt)
- }
-
- return events, nil
-}
-
// Count sends a "COUNT" command to the relay and returns the count of events matching the filters.
func (r *Relay) Count(
ctx context.Context,
- filters Filters,
- opts ...SubscriptionOption,
+ filter Filter,
+ opts SubscriptionOptions,
) (int64, []byte, error) {
- v, err := r.countInternal(ctx, filters, opts...)
+ v, err := r.countInternal(ctx, filter, opts)
if err != nil {
return 0, nil, err
}
@@ -531,8 +447,8 @@ func (r *Relay) Count(
return *v.Count, v.HyperLogLog, nil
}
-func (r *Relay) countInternal(ctx context.Context, filters Filters, opts ...SubscriptionOption) (CountEnvelope, error) {
- sub := r.PrepareSubscription(ctx, filters, opts...)
+func (r *Relay) countInternal(ctx context.Context, filter Filter, opts SubscriptionOptions) (CountEnvelope, error) {
+ sub := r.PrepareSubscription(ctx, filter, opts)
sub.countResult = make(chan CountEnvelope)
if err := sub.Fire(); err != nil {
diff --git a/sdk/system.go b/sdk/system.go
index 9a1adfd..9f89888 100644
--- a/sdk/system.go
+++ b/sdk/system.go
@@ -41,7 +41,7 @@ type System struct {
FollowSetsCache cache.Cache32[GenericSets[ProfileRef]]
TopicSetsCache cache.Cache32[GenericSets[Topic]]
Hints hints.HintsDB
- Pool *nostr.SimplePool
+ Pool *nostr.Pool
RelayListRelays *RelayStream
FollowListRelays *RelayStream
MetadataRelays *RelayStream
@@ -118,7 +118,7 @@ func NewSystem(mods ...SystemModifier) *System {
Hints: memoryh.NewHintDB(),
}
- sys.Pool = nostr.NewSimplePool(context.Background(),
+ sys.Pool = nostr.NewPool(context.Background(),
nostr.WithAuthorKindQueryMiddleware(sys.TrackQueryAttempts),
nostr.WithEventMiddleware(sys.TrackEventHintsAndRelays),
nostr.WithDuplicateMiddleware(sys.TrackEventRelaysD),
diff --git a/signature.go b/signature.go
index f648e6d..29b6cd1 100644
--- a/signature.go
+++ b/signature.go
@@ -4,31 +4,30 @@ package nostr
import (
"crypto/sha256"
- "fmt"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/btcsuite/btcd/btcec/v2/schnorr"
)
-// CheckSignature checks if the event signature is valid for the given event.
+// Verify checks if the event signature is valid for the given event.
// It won't look at the ID field, instead it will recompute the id from the entire event body.
-// If the signature is invalid bool will be false and err will be set.
-func (evt Event) CheckSignature() (bool, error) {
+// Returns true if the signature is valid, false otherwise.
+func (evt Event) VerifySignature() bool {
// read and check pubkey
pubkey, err := schnorr.ParsePubKey(evt.PubKey[:])
if err != nil {
- return false, fmt.Errorf("event has invalid pubkey '%s': %w", evt.PubKey, err)
+ return false
}
// read signature
sig, err := schnorr.ParseSignature(evt.Sig[:])
if err != nil {
- return false, fmt.Errorf("failed to parse signature: %w", err)
+ return false
}
// check signature
hash := sha256.Sum256(evt.Serialize())
- return sig.Verify(hash[:], pubkey), nil
+ return sig.Verify(hash[:], pubkey)
}
// Sign signs an event with a given privateKey.
diff --git a/subscription.go b/subscription.go
index c761f33..31ac462 100644
--- a/subscription.go
+++ b/subscription.go
@@ -21,7 +21,7 @@ type Subscription struct {
// the Events channel emits all EVENTs that come in a Subscription
// will be closed when the subscription ends
- Events chan *Event
+ Events chan Event
mu sync.Mutex
// the EndOfStoredEvents channel gets closed when an EOSE comes for that subscription
@@ -41,7 +41,7 @@ type Subscription struct {
// if it returns true that event will not be processed further.
checkDuplicateReplaceable func(rk ReplaceableKey, ts Timestamp) bool
- match func(*Event) bool // this will be either Filters.Match or Filters.MatchIgnoringTimestampConstraints
+ match func(Event) bool // this will be either Filters.Match or Filters.MatchIgnoringTimestampConstraints
live atomic.Bool
eosed atomic.Bool
cancel context.CancelCauseFunc
@@ -51,33 +51,19 @@ type Subscription struct {
storedwg sync.WaitGroup
}
-// SubscriptionOption is the type of the argument passed when instantiating relay connections.
-// Some examples are WithLabel.
-type SubscriptionOption interface {
- IsSubscriptionOption()
+// All SubscriptionOptions fields are optional
+type SubscriptionOptions struct {
+ // Label puts a label on the subscription (it is prepended to the automatic id) that is sent to relays.
+ Label string
+
+ // CheckDuplicate is a function that, when present, is ran on events before they're parsed.
+ // if it returns true the event will be discarded and not processed further.
+ CheckDuplicate func(id ID, relay string) bool
+
+ // CheckDuplicateReplaceable is like CheckDuplicate, but runs on replaceable/addressable events
+ CheckDuplicateReplaceable func(rk ReplaceableKey, ts Timestamp) bool
}
-// WithLabel puts a label on the subscription (it is prepended to the automatic id) that is sent to relays.
-type WithLabel string
-
-func (_ WithLabel) IsSubscriptionOption() {}
-
-// WithCheckDuplicate sets checkDuplicate on the subscription
-type WithCheckDuplicate func(id ID, relay string) bool
-
-func (_ WithCheckDuplicate) IsSubscriptionOption() {}
-
-// WithCheckDuplicateReplaceable sets checkDuplicateReplaceable on the subscription
-type WithCheckDuplicateReplaceable func(rk ReplaceableKey, ts Timestamp) bool
-
-func (_ WithCheckDuplicateReplaceable) IsSubscriptionOption() {}
-
-var (
- _ SubscriptionOption = (WithLabel)("")
- _ SubscriptionOption = (WithCheckDuplicate)(nil)
- _ SubscriptionOption = (WithCheckDuplicateReplaceable)(nil)
-)
-
func (sub *Subscription) start() {
<-sub.Context.Done()
@@ -93,7 +79,7 @@ func (sub *Subscription) start() {
// GetID returns the subscription ID.
func (sub *Subscription) GetID() string { return sub.id }
-func (sub *Subscription) dispatchEvent(evt *Event) {
+func (sub *Subscription) dispatchEvent(evt Event) {
added := false
if !sub.eosed.Load() {
sub.storedwg.Add(1)
diff --git a/types.go b/types.go
index 16dff4e..ac04b3e 100644
--- a/types.go
+++ b/types.go
@@ -6,6 +6,12 @@ import (
"unsafe"
)
+// RelayEvent represents an event received from a specific relay.
+type RelayEvent struct {
+ Event
+ Relay *Relay
+}
+
var (
ZeroID = [32]byte{}
ZeroPK = [32]byte{}
@@ -14,6 +20,7 @@ var (
type PubKey [32]byte
func (pk PubKey) String() string { return hex.EncodeToString(pk[:]) }
+func (pk PubKey) Hex() string { return hex.EncodeToString(pk[:]) }
func PubKeyFromHex(pkh string) (PubKey, error) {
pk := PubKey{}
@@ -49,9 +56,19 @@ func MustPubKeyFromHex(pkh string) PubKey {
return pk
}
+func ContainsPubKey(haystack []PubKey, needle PubKey) bool {
+ for _, cand := range haystack {
+ if cand == needle {
+ return true
+ }
+ }
+ return false
+}
+
type ID [32]byte
func (id ID) String() string { return hex.EncodeToString(id[:]) }
+func (id ID) Hex() string { return hex.EncodeToString(id[:]) }
func IDFromHex(idh string) (ID, error) {
id := ID{}
diff --git a/utils.go b/utils.go
index b7bf200..0f577ae 100644
--- a/utils.go
+++ b/utils.go
@@ -46,39 +46,3 @@ func CompareEventReverse(b, a Event) int {
}
return cmp.Compare(a.CreatedAt, b.CreatedAt)
}
-
-// CompareEventPtr is meant to to be used with slices.Sort
-func CompareEventPtr(a, b *Event) int {
- if a == nil {
- if b == nil {
- return 0
- } else {
- return -1
- }
- } else if b == nil {
- return 1
- }
-
- if a.CreatedAt == b.CreatedAt {
- return bytes.Compare(a.ID[:], b.ID[:])
- }
- return cmp.Compare(a.CreatedAt, b.CreatedAt)
-}
-
-// CompareEventPtrReverse is meant to to be used with slices.Sort
-func CompareEventPtrReverse(b, a *Event) int {
- if a == nil {
- if b == nil {
- return 0
- } else {
- return -1
- }
- } else if b == nil {
- return 1
- }
-
- if a.CreatedAt == b.CreatedAt {
- return bytes.Compare(a.ID[:], b.ID[:])
- }
- return cmp.Compare(a.CreatedAt, b.CreatedAt)
-}
diff --git a/utils_test.go b/utils_test.go
index 1b850ee..9cf8607 100644
--- a/utils_test.go
+++ b/utils_test.go
@@ -53,23 +53,23 @@ func TestEventsCompare(t *testing.T) {
}
func TestEventsComparePtr(t *testing.T) {
- list := []*Event{
+ list := []Event{
{CreatedAt: 12},
{CreatedAt: 8},
{CreatedAt: 26},
{CreatedAt: 1},
}
- slices.SortFunc(list, CompareEventPtr)
- require.Equal(t, []*Event{
+ slices.SortFunc(list, CompareEvent)
+ require.Equal(t, []Event{
{CreatedAt: 1},
{CreatedAt: 8},
{CreatedAt: 12},
{CreatedAt: 26},
}, list)
- slices.SortFunc(list, CompareEventPtrReverse)
- require.Equal(t, []*Event{
+ slices.SortFunc(list, CompareEventReverse)
+ require.Equal(t, []Event{
{CreatedAt: 26},
{CreatedAt: 12},
{CreatedAt: 8},