it never ends.

This commit is contained in:
fiatjaf
2025-04-16 02:59:47 -03:00
parent cb0dd45a32
commit 5b8954461f
53 changed files with 396 additions and 673 deletions

View File

@@ -157,7 +157,7 @@ func (v ReqEnvelope) MarshalJSON() ([]byte, error) {
type CountEnvelope struct { type CountEnvelope struct {
SubscriptionID string SubscriptionID string
Filter Filter
Count *int64 Count *uint32
HyperLogLog []byte HyperLogLog []byte
} }
@@ -176,8 +176,8 @@ func (v *CountEnvelope) FromJSON(data string) error {
v.SubscriptionID = string(unsafe.Slice(unsafe.StringData(arr[1].Str), len(arr[1].Str))) v.SubscriptionID = string(unsafe.Slice(unsafe.StringData(arr[1].Str), len(arr[1].Str)))
var countResult struct { var countResult struct {
Count *int64 `json:"count"` Count *uint32
HLL string `json:"hll"` HLL string
} }
if err := json.Unmarshal(unsafe.Slice(unsafe.StringData(arr[2].Raw), len(arr[2].Raw)), &countResult); err == nil && countResult.Count != nil { if err := json.Unmarshal(unsafe.Slice(unsafe.StringData(arr[2].Raw), len(arr[2].Raw)), &countResult); err == nil && countResult.Count != nil {
v.Count = countResult.Count v.Count = countResult.Count
@@ -205,7 +205,7 @@ func (v CountEnvelope) MarshalJSON() ([]byte, error) {
w.RawString(`"`) w.RawString(`"`)
if v.Count != nil { if v.Count != nil {
w.RawString(`{"count":`) w.RawString(`{"count":`)
w.RawString(strconv.FormatInt(*v.Count, 10)) w.RawString(strconv.FormatUint(uint64(*v.Count), 10))
if v.HyperLogLog != nil { if v.HyperLogLog != nil {
w.RawString(`,"hll":"`) w.RawString(`,"hll":"`)
hllHex := make([]byte, 512) hllHex := make([]byte, 512)

View File

@@ -10,8 +10,8 @@ import (
"github.com/dgraph-io/badger/v4" "github.com/dgraph-io/badger/v4"
) )
func (b *BadgerBackend) CountEvents(filter nostr.Filter) (int64, error) { func (b *BadgerBackend) CountEvents(filter nostr.Filter) (uint32, error) {
var count int64 = 0 var count uint32 = 0
queries, extraFilter, since, err := prepareQueries(filter) queries, extraFilter, since, err := prepareQueries(filter)
if err != nil { if err != nil {
@@ -86,8 +86,8 @@ func (b *BadgerBackend) CountEvents(filter nostr.Filter) (int64, error) {
return count, err return count, err
} }
func (b *BadgerBackend) CountEventsHLL(filter nostr.Filter, offset int) (int64, *hyperloglog.HyperLogLog, error) { func (b *BadgerBackend) CountEventsHLL(filter nostr.Filter, offset int) (uint32, *hyperloglog.HyperLogLog, error) {
var count int64 = 0 var count uint32 = 0
queries, extraFilter, since, err := prepareQueries(filter) queries, extraFilter, since, err := prepareQueries(filter)
if err != nil { if err != nil {

View File

@@ -53,6 +53,6 @@ func (b *BlugeBackend) Init() error {
return nil return nil
} }
func (b *BlugeBackend) CountEvents(nostr.Filter) (int64, error) { func (b *BlugeBackend) CountEvents(nostr.Filter) (uint32, error) {
return 0, errors.New("not supported") return 0, errors.New("not supported")
} }

View File

@@ -5,8 +5,8 @@ import (
"fmt" "fmt"
"os" "os"
"github.com/urfave/cli/v3"
"fiatjaf.com/nostr" "fiatjaf.com/nostr"
"github.com/urfave/cli/v3"
) )
var delete_ = &cli.Command{ var delete_ = &cli.Command{
@@ -17,19 +17,17 @@ var delete_ = &cli.Command{
Action: func(ctx context.Context, c *cli.Command) error { Action: func(ctx context.Context, c *cli.Command) error {
hasError := false hasError := false
for line := range getStdinLinesOrFirstArgument(c) { for line := range getStdinLinesOrFirstArgument(c) {
f := nostr.Filter{IDs: []string{line}} id, err := nostr.IDFromHex(line)
ch, err := db.QueryEvents(ctx, f)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "error querying for %s: %s\n", f, err) fmt.Fprintf(os.Stderr, "invalid id '%s': %s\n", line, err)
hasError = true hasError = true
} }
for evt := range ch {
if err := db.DeleteEvent(ctx, evt); err != nil { if err := db.DeleteEvent(id); err != nil {
fmt.Fprintf(os.Stderr, "error deleting %s: %s\n", evt, err) fmt.Fprintf(os.Stderr, "error deleting '%s': %s\n", id.Hex(), err)
hasError = true hasError = true
} }
} }
}
if hasError { if hasError {
os.Exit(123) os.Exit(123)

View File

@@ -96,7 +96,7 @@ var app = &cli.Command{
if err := json.Unmarshal(scanner.Bytes(), &evt); err != nil { if err := json.Unmarshal(scanner.Bytes(), &evt); err != nil {
log.Printf("invalid event read at line %d: %s (`%s`)\n", i, err, scanner.Text()) log.Printf("invalid event read at line %d: %s (`%s`)\n", i, err, scanner.Text())
} }
db.SaveEvent(ctx, &evt) db.SaveEvent(evt)
i++ i++
} }
}() }()

View File

@@ -6,8 +6,8 @@ import (
"fmt" "fmt"
"os" "os"
"github.com/urfave/cli/v3"
"fiatjaf.com/nostr" "fiatjaf.com/nostr"
"github.com/urfave/cli/v3"
) )
// this is the default command when no subcommands are given, we will just try everything // this is the default command when no subcommands are given, we will just try everything
@@ -21,16 +21,14 @@ var queryOrSave = &cli.Command{
re := &nostr.ReqEnvelope{} re := &nostr.ReqEnvelope{}
e := &nostr.Event{} e := &nostr.Event{}
f := &nostr.Filter{} f := &nostr.Filter{}
if json.Unmarshal([]byte(line), ee) == nil && ee.Event.ID != "" { if json.Unmarshal([]byte(line), ee) == nil && ee.Event.ID != nostr.ZeroID {
e = &ee.Event return doSave(ctx, line, ee.Event)
return doSave(ctx, line, e)
} }
if json.Unmarshal([]byte(line), e) == nil && e.ID != "" { if json.Unmarshal([]byte(line), e) == nil && e.ID != nostr.ZeroID {
return doSave(ctx, line, e) return doSave(ctx, line, *e)
} }
if json.Unmarshal([]byte(line), re) == nil && len(re.Filters) > 0 { if json.Unmarshal([]byte(line), re) == nil {
f = &re.Filters[0] return doQuery(ctx, &re.Filter)
return doQuery(ctx, f)
} }
if json.Unmarshal([]byte(line), f) == nil && len(f.String()) > 2 { if json.Unmarshal([]byte(line), f) == nil && len(f.String()) > 2 {
return doQuery(ctx, f) return doQuery(ctx, f)
@@ -40,21 +38,16 @@ var queryOrSave = &cli.Command{
}, },
} }
func doSave(ctx context.Context, line string, e *nostr.Event) error { func doSave(ctx context.Context, line string, evt nostr.Event) error {
if err := db.SaveEvent(ctx, e); err != nil { if err := db.SaveEvent(evt); err != nil {
return fmt.Errorf("failed to save event '%s': %s", line, err) return fmt.Errorf("failed to save event '%s': %s", line, err)
} }
fmt.Fprintf(os.Stderr, "saved %s", e.ID) fmt.Fprintf(os.Stderr, "saved %s", evt.ID)
return nil return nil
} }
func doQuery(ctx context.Context, f *nostr.Filter) error { func doQuery(ctx context.Context, f *nostr.Filter) error {
ch, err := db.QueryEvents(ctx, *f) for evt := range db.QueryEvents(*f) {
if err != nil {
return fmt.Errorf("error querying: %w", err)
}
for evt := range ch {
fmt.Println(evt) fmt.Println(evt)
} }
return nil return nil

View File

@@ -13,8 +13,8 @@ import (
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
) )
func (b *LMDBBackend) CountEvents(filter nostr.Filter) (int64, error) { func (b *LMDBBackend) CountEvents(filter nostr.Filter) (uint32, error) {
var count int64 = 0 var count uint32 = 0
queries, extraAuthors, extraKinds, extraTagKey, extraTagValues, since, err := b.prepareQueries(filter) queries, extraAuthors, extraKinds, extraTagKey, extraTagValues, since, err := b.prepareQueries(filter)
if err != nil { if err != nil {
@@ -95,12 +95,12 @@ func (b *LMDBBackend) CountEvents(filter nostr.Filter) (int64, error) {
// CountEventsHLL is like CountEvents, but it will build a hyperloglog value while iterating through results, // CountEventsHLL is like CountEvents, but it will build a hyperloglog value while iterating through results,
// following NIP-45 // following NIP-45
func (b *LMDBBackend) CountEventsHLL(filter nostr.Filter, offset int) (int64, *hyperloglog.HyperLogLog, error) { func (b *LMDBBackend) CountEventsHLL(filter nostr.Filter, offset int) (uint32, *hyperloglog.HyperLogLog, error) {
if useCache, _ := b.EnableHLLCacheFor(filter.Kinds[0]); useCache { if useCache, _ := b.EnableHLLCacheFor(filter.Kinds[0]); useCache {
return b.countEventsHLLCached(filter) return b.countEventsHLLCached(filter)
} }
var count int64 = 0 var count uint32 = 0
// this is different than CountEvents because some of these extra checks are not applicable in HLL-valid filters // this is different than CountEvents because some of these extra checks are not applicable in HLL-valid filters
queries, _, extraKinds, extraTagKey, extraTagValues, since, err := b.prepareQueries(filter) queries, _, extraKinds, extraTagKey, extraTagValues, since, err := b.prepareQueries(filter)
@@ -180,7 +180,7 @@ func (b *LMDBBackend) CountEventsHLL(filter nostr.Filter, offset int) (int64, *h
} }
// countEventsHLLCached will just return a cached value from disk (and presumably we don't even have the events required to compute this anymore). // countEventsHLLCached will just return a cached value from disk (and presumably we don't even have the events required to compute this anymore).
func (b *LMDBBackend) countEventsHLLCached(filter nostr.Filter) (int64, *hyperloglog.HyperLogLog, error) { func (b *LMDBBackend) countEventsHLLCached(filter nostr.Filter) (uint32, *hyperloglog.HyperLogLog, error) {
cacheKey := make([]byte, 2+8) cacheKey := make([]byte, 2+8)
binary.BigEndian.PutUint16(cacheKey[0:2], uint16(filter.Kinds[0])) binary.BigEndian.PutUint16(cacheKey[0:2], uint16(filter.Kinds[0]))
switch filter.Kinds[0] { switch filter.Kinds[0] {
@@ -192,7 +192,7 @@ func (b *LMDBBackend) countEventsHLLCached(filter nostr.Filter) (int64, *hyperlo
hex.Decode(cacheKey[2:2+8], []byte(filter.Tags["E"][0][0:8*2])) hex.Decode(cacheKey[2:2+8], []byte(filter.Tags["E"][0][0:8*2]))
} }
var count int64 var count uint32
var hll *hyperloglog.HyperLogLog var hll *hyperloglog.HyperLogLog
err := b.lmdbEnv.View(func(txn *lmdb.Txn) error { err := b.lmdbEnv.View(func(txn *lmdb.Txn) error {
@@ -204,7 +204,7 @@ func (b *LMDBBackend) countEventsHLLCached(filter nostr.Filter) (int64, *hyperlo
return err return err
} }
hll = hyperloglog.NewWithRegisters(val, 0) // offset doesn't matter here hll = hyperloglog.NewWithRegisters(val, 0) // offset doesn't matter here
count = int64(hll.Count()) count = uint32(hll.Count())
return nil return nil
}) })

View File

@@ -5,9 +5,9 @@ import (
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"github.com/PowerDNS/lmdb-go/lmdb"
"fiatjaf.com/nostr/eventstore/internal"
"fiatjaf.com/nostr" "fiatjaf.com/nostr"
"fiatjaf.com/nostr/eventstore/internal"
"github.com/PowerDNS/lmdb-go/lmdb"
) )
type query struct { type query struct {
@@ -143,7 +143,7 @@ func (b *LMDBBackend) prepareQueries(filter nostr.Filter) (
if filter.Authors != nil { if filter.Authors != nil {
extraAuthors = make([][32]byte, len(filter.Authors)) extraAuthors = make([][32]byte, len(filter.Authors))
for i, pk := range filter.Authors { for i, pk := range filter.Authors {
hex.Decode(extraAuthors[i][:], []byte(pk)) extraAuthors[i] = pk
} }
} }

View File

@@ -31,7 +31,7 @@ func (b *LMDBBackend) ReplaceEvent(evt nostr.Event) error {
shouldStore := true shouldStore := true
for _, previous := range results { for _, previous := range results {
if internal.IsOlder(previous.Event, evt) { if internal.IsOlder(previous.Event, evt) {
if err := b.delete(txn, previous.Event); err != nil { if err := b.delete(txn, previous.Event.ID); err != nil {
return fmt.Errorf("failed to delete event %s for replacing: %w", previous.Event.ID, err) return fmt.Errorf("failed to delete event %s for replacing: %w", previous.Event.ID, err)
} }
} else { } else {

View File

@@ -10,8 +10,8 @@ import (
"github.com/PowerDNS/lmdb-go/lmdb" "github.com/PowerDNS/lmdb-go/lmdb"
) )
func (il *IndexingLayer) CountEvents(filter nostr.Filter) (int64, error) { func (il *IndexingLayer) CountEvents(filter nostr.Filter) (uint32, error) {
var count int64 = 0 var count uint32 = 0
queries, extraAuthors, extraKinds, extraTagKey, extraTagValues, since, err := il.prepareQueries(filter) queries, extraAuthors, extraKinds, extraTagKey, extraTagValues, since, err := il.prepareQueries(filter)
if err != nil { if err != nil {

View File

@@ -1,10 +1,10 @@
package nullstore package nullstore
import ( import (
"context" "iter"
"fiatjaf.com/nostr/eventstore"
"fiatjaf.com/nostr" "fiatjaf.com/nostr"
"fiatjaf.com/nostr/eventstore"
) )
var _ eventstore.Store = NullStore{} var _ eventstore.Store = NullStore{}
@@ -17,20 +17,22 @@ func (b NullStore) Init() error {
func (b NullStore) Close() {} func (b NullStore) Close() {}
func (b NullStore) DeleteEvent(ctx context.Context, evt *nostr.Event) error { func (b NullStore) DeleteEvent(id nostr.ID) error {
return nil return nil
} }
func (b NullStore) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) { func (b NullStore) QueryEvents(filter nostr.Filter) iter.Seq[nostr.Event] {
ch := make(chan *nostr.Event) return func(yield func(nostr.Event) bool) {}
close(ch)
return ch, nil
} }
func (b NullStore) SaveEvent(ctx context.Context, evt *nostr.Event) error { func (b NullStore) SaveEvent(evt nostr.Event) error {
return nil return nil
} }
func (b NullStore) ReplaceEvent(ctx context.Context, evt *nostr.Event) error { func (b NullStore) ReplaceEvent(evt nostr.Event) error {
return nil return nil
} }
func (b NullStore) CountEvents(filter nostr.Filter) (uint32, error) {
return 0, nil
}

View File

@@ -69,8 +69,8 @@ func (b *SliceStore) QueryEvents(filter nostr.Filter) iter.Seq[nostr.Event] {
} }
} }
func (b *SliceStore) CountEvents(filter nostr.Filter) (int64, error) { func (b *SliceStore) CountEvents(filter nostr.Filter) (uint32, error) {
var val int64 var val uint32
for _, event := range b.internal { for _, event := range b.internal {
if filter.Matches(event) { if filter.Matches(event) {
val++ val++

View File

@@ -29,5 +29,5 @@ type Store interface {
ReplaceEvent(nostr.Event) error ReplaceEvent(nostr.Event) error
// CountEvents counts all events that match a given filter // CountEvents counts all events that match a given filter
CountEvents(nostr.Filter) (int64, error) CountEvents(nostr.Filter) (uint32, error)
} }

View File

@@ -1,34 +0,0 @@
package count
import (
"context"
"fiatjaf.com/nostr/eventstore"
"fiatjaf.com/nostr"
)
type Wrapper struct {
eventstore.Store
}
var _ eventstore.Store = (*Wrapper)(nil)
func (w Wrapper) CountEvents(ctx context.Context, filter nostr.Filter) (int64, error) {
if counter, ok := w.Store.(eventstore.Counter); ok {
return counter.CountEvents(ctx, filter)
}
ch, err := w.Store.QueryEvents(ctx, filter)
if err != nil {
return 0, err
}
if ch == nil {
return 0, nil
}
var count int64
for range ch {
count++
}
return count, nil
}

View File

@@ -1,21 +0,0 @@
package disablesearch
import (
"context"
"fiatjaf.com/nostr/eventstore"
"fiatjaf.com/nostr"
)
type Wrapper struct {
eventstore.Store
}
var _ eventstore.Store = (*Wrapper)(nil)
func (w Wrapper) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) {
if filter.Search != "" {
return nil, nil
}
return w.Store.QueryEvents(ctx, filter)
}

View File

@@ -1,17 +1,20 @@
package eventstore package wrappers
import ( import (
"context" "context"
"fmt" "fmt"
"fiatjaf.com/nostr" "fiatjaf.com/nostr"
"fiatjaf.com/nostr/eventstore"
) )
type RelayWrapper struct { var _ nostr.Publisher = StorePublisher{}
Store
type StorePublisher struct {
eventstore.Store
} }
func (w RelayWrapper) Publish(ctx context.Context, evt nostr.Event) error { func (w StorePublisher) Publish(ctx context.Context, evt nostr.Event) error {
if nostr.IsEphemeralKind(evt.Kind) { if nostr.IsEphemeralKind(evt.Kind) {
// do not store ephemeral events // do not store ephemeral events
return nil return nil
@@ -22,7 +25,7 @@ func (w RelayWrapper) Publish(ctx context.Context, evt nostr.Event) error {
if nostr.IsRegularKind(evt.Kind) { if nostr.IsRegularKind(evt.Kind) {
// regular events are just saved directly // regular events are just saved directly
if err := w.SaveEvent(evt); err != nil && err != ErrDupEvent { if err := w.SaveEvent(evt); err != nil && err != eventstore.ErrDupEvent {
return fmt.Errorf("failed to save: %w", err) return fmt.Errorf("failed to save: %w", err)
} }
return nil return nil

View File

@@ -1,4 +1,4 @@
package test package wrappers
import ( import (
"context" "context"
@@ -7,7 +7,6 @@ import (
"time" "time"
"fiatjaf.com/nostr" "fiatjaf.com/nostr"
"fiatjaf.com/nostr/eventstore"
"fiatjaf.com/nostr/eventstore/slicestore" "fiatjaf.com/nostr/eventstore/slicestore"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@@ -21,7 +20,7 @@ func TestRelayWrapper(t *testing.T) {
s.Init() s.Init()
defer s.Close() defer s.Close()
w := eventstore.RelayWrapper{Store: s} w := StorePublisher{Store: s}
evt1 := nostr.Event{ evt1 := nostr.Event{
Kind: 3, Kind: 3,

View File

@@ -0,0 +1,26 @@
package wrappers
import (
"context"
"fiatjaf.com/nostr"
"fiatjaf.com/nostr/eventstore"
)
var _ nostr.Querier = StoreQuerier{}
type StoreQuerier struct {
eventstore.Store
}
func (w StoreQuerier) QueryEvents(ctx context.Context, filter nostr.Filter) (chan nostr.Event, error) {
ch := make(chan nostr.Event)
go func() {
for evt := range w.Store.QueryEvents(filter) {
ch <- evt
}
}()
return ch, nil
}

View File

@@ -1,24 +0,0 @@
package skipevent
import (
"context"
"fiatjaf.com/nostr/eventstore"
"fiatjaf.com/nostr"
)
type Wrapper struct {
eventstore.Store
Skip func(ctx context.Context, evt *nostr.Event) bool
}
var _ eventstore.Store = (*Wrapper)(nil)
func (w Wrapper) SaveEvent(ctx context.Context, evt *nostr.Event) error {
if w.Skip(ctx, evt) {
return nil
}
return w.Store.SaveEvent(ctx, evt)
}

View File

@@ -1,129 +0,0 @@
package main
import (
"context"
"fmt"
"io"
"os"
"strings"
"time"
jsoniter "github.com/json-iterator/go"
"fiatjaf.com/nostr"
"fiatjaf.com/nostr/nip19"
)
func main() {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
// connect to relay
url := "wss://relay.stoner.com"
relay, err := nostr.RelayConnect(ctx, url)
if err != nil {
panic(err)
}
reader := os.Stdin
var npub string
var b [64]byte
fmt.Fprintf(os.Stderr, "using %s\n----\nexample subscription for three most recent notes mentioning user\npaste npub key: ", url)
if n, err := reader.Read(b[:]); err == nil {
npub = strings.TrimSpace(fmt.Sprintf("%s", b[:n]))
} else {
panic(err)
}
// create filters
var filters nostr.Filters
if _, v, err := nip19.Decode(npub); err == nil {
t := make(map[string][]string)
// making a "p" tag for the above public key.
// this filters for messages tagged with the user, mainly replies.
t["p"] = []string{v.(string)}
filters = []nostr.Filter{{
Kinds: []int{nostr.KindTextNote},
Tags: t,
// limit = 3, get the three most recent notes
Limit: 3,
}}
} else {
panic("not a valid npub!")
}
// create a subscription and submit to relay
// results will be returned on the sub.Events channel
sub, _ := relay.Subscribe(ctx, filters)
// we will append the returned events to this slice
evs := make([]nostr.Event, 0)
go func() {
<-sub.EndOfStoredEvents
cancel()
}()
for ev := range sub.Events {
evs = append(evs, *ev)
}
filename := "example_output.json"
if f, err := os.Create(filename); err == nil {
fmt.Fprintf(os.Stderr, "returned events saved to %s\n", filename)
// encode the returned events in a file
enc := jsoniter.NewEncoder(f)
enc.SetIndent("", " ")
enc.Encode(evs)
f.Close()
} else {
panic(err)
}
fmt.Fprintf(os.Stderr, "----\nexample publication of note.\npaste nsec key (leave empty to autogenerate): ")
var nsec string
if n, err := reader.Read(b[:]); err == nil {
nsec = strings.TrimSpace(fmt.Sprintf("%s", b[:n]))
} else {
panic(err)
}
var sk string
ev := nostr.Event{}
if _, s, e := nip19.Decode(nsec); e == nil {
sk = s.(string)
} else {
sk = nostr.GeneratePrivateKey()
}
if pub, e := nostr.GetPublicKey(sk); e == nil {
ev.PubKey = pub
if npub, e := nip19.EncodePublicKey(pub); e == nil {
fmt.Fprintln(os.Stderr, "using:", npub)
}
} else {
panic(e)
}
ev.CreatedAt = nostr.Now()
ev.Kind = nostr.KindTextNote
var content string
fmt.Fprintln(os.Stderr, "enter content of note, ending with an empty newline (ctrl+d):")
for {
if n, err := reader.Read(b[:]); err == nil {
content = fmt.Sprintf("%s%s", content, fmt.Sprintf("%s", b[:n]))
} else if err == io.EOF {
break
} else {
panic(err)
}
}
ev.Content = strings.TrimSpace(content)
ev.Sign(sk)
for _, url := range []string{"wss://relay.stoner.com"} {
ctx := context.WithValue(context.Background(), "url", url)
relay, e := nostr.RelayConnect(ctx, url)
if e != nil {
fmt.Println(e)
continue
}
fmt.Println("posting to: ", url)
relay.Publish(ctx, ev)
}
}

18
interfaces.go Normal file
View File

@@ -0,0 +1,18 @@
package nostr
import (
"context"
)
type Publisher interface {
Publish(context.Context, Event) error
}
type Querier interface {
QueryEvents(context.Context, Filter) (chan Event, error)
}
type QuerierPublisher interface {
Querier
Publisher
}

View File

@@ -25,7 +25,7 @@ var (
// SignerOptions contains configuration options for creating a new signer. // SignerOptions contains configuration options for creating a new signer.
type SignerOptions struct { type SignerOptions struct {
// BunkerClientSecretKey is the secret key used for the bunker client // BunkerClientSecretKey is the secret key used for the bunker client
BunkerClientSecretKey string BunkerClientSecretKey nostr.SecretKey
// BunkerSignTimeout is the timeout duration for bunker signing operations // BunkerSignTimeout is the timeout duration for bunker signing operations
BunkerSignTimeout time.Duration BunkerSignTimeout time.Duration
@@ -60,7 +60,7 @@ func New(ctx context.Context, pool *nostr.Pool, input string, opts *SignerOption
if strings.HasPrefix(input, "ncryptsec") { if strings.HasPrefix(input, "ncryptsec") {
if opts.PasswordHandler != nil { if opts.PasswordHandler != nil {
return &EncryptedKeySigner{input, "", opts.PasswordHandler}, nil return &EncryptedKeySigner{input, nostr.ZeroPK, opts.PasswordHandler}, nil
} }
sec, err := nip49.Decrypt(input, opts.Password) sec, err := nip49.Decrypt(input, opts.Password)
if err != nil { if err != nil {
@@ -70,12 +70,12 @@ func New(ctx context.Context, pool *nostr.Pool, input string, opts *SignerOption
return nil, fmt.Errorf("failed to decrypt with given password: %w", err) return nil, fmt.Errorf("failed to decrypt with given password: %w", err)
} }
pk := nostr.GetPublicKey(sec) pk := nostr.GetPublicKey(sec)
return KeySigner{sec, pk, xsync.NewMapOf[string, [32]byte]()}, nil return KeySigner{sec, pk, xsync.NewMapOf[nostr.PubKey, [32]byte]()}, nil
} else if nip46.IsValidBunkerURL(input) || nip05.IsValidIdentifier(input) { } else if nip46.IsValidBunkerURL(input) || nip05.IsValidIdentifier(input) {
bcsk := nostr.GeneratePrivateKey() bcsk := nostr.Generate()
oa := func(url string) { println("auth_url received but not handled") } oa := func(url string) { println("auth_url received but not handled") }
if opts.BunkerClientSecretKey != "" { if opts.BunkerClientSecretKey != [32]byte{} {
bcsk = opts.BunkerClientSecretKey bcsk = opts.BunkerClientSecretKey
} }
if opts.BunkerAuthHandler != nil { if opts.BunkerAuthHandler != nil {
@@ -88,13 +88,15 @@ func New(ctx context.Context, pool *nostr.Pool, input string, opts *SignerOption
} }
return BunkerSigner{bunker}, nil return BunkerSigner{bunker}, nil
} else if prefix, parsed, err := nip19.Decode(input); err == nil && prefix == "nsec" { } else if prefix, parsed, err := nip19.Decode(input); err == nil && prefix == "nsec" {
sec := parsed.(string) sec := parsed.(nostr.SecretKey)
pk, _ := nostr.GetPublicKey(sec) pk := nostr.GetPublicKey(sec)
return KeySigner{sec, pk, xsync.NewMapOf[string, [32]byte]()}, nil return KeySigner{sec, pk, xsync.NewMapOf[nostr.PubKey, [32]byte]()}, nil
} else if _, err := hex.DecodeString(input); err == nil && len(input) <= 64 { } else if _, err := hex.DecodeString(input); err == nil && len(input) <= 64 {
input = strings.Repeat("0", 64-len(input)) + input // if the key is like '01', fill all the left zeroes input := nostr.MustSecretKeyFromHex(
pk, _ := nostr.GetPublicKey(input) strings.Repeat("0", 64-len(input)) + input, // if the key is like '01', fill all the left zeroes
return KeySigner{input, pk, xsync.NewMapOf[string, [32]byte]()}, nil )
pk := nostr.GetPublicKey(input)
return KeySigner{input, pk, xsync.NewMapOf[nostr.PubKey, [32]byte]()}, nil
} }
return nil, fmt.Errorf("unsupported input '%s'", input) return nil, fmt.Errorf("unsupported input '%s'", input)

View File

@@ -14,24 +14,24 @@ var (
// ReadOnlyUser is a nostr.User that has this public key // ReadOnlyUser is a nostr.User that has this public key
type ReadOnlyUser struct { type ReadOnlyUser struct {
pk string pk nostr.PubKey
} }
func NewReadOnlyUser(pk string) ReadOnlyUser { func NewReadOnlyUser(pk nostr.PubKey) ReadOnlyUser {
return ReadOnlyUser{pk} return ReadOnlyUser{pk}
} }
// GetPublicKey returns the public key associated with this signer. // GetPublicKey returns the public key associated with this signer.
func (ros ReadOnlyUser) GetPublicKey(context.Context) (string, error) { func (ros ReadOnlyUser) GetPublicKey(context.Context) (nostr.PubKey, error) {
return ros.pk, nil return ros.pk, nil
} }
// ReadOnlySigner is like a ReadOnlyUser, but has a fake GetPublicKey method that doesn't work. // ReadOnlySigner is like a ReadOnlyUser, but has a fake GetPublicKey method that doesn't work.
type ReadOnlySigner struct { type ReadOnlySigner struct {
pk string pk nostr.PubKey
} }
func NewReadOnlySigner(pk string) ReadOnlySigner { func NewReadOnlySigner(pk nostr.PubKey) ReadOnlySigner {
return ReadOnlySigner{pk} return ReadOnlySigner{pk}
} }
@@ -41,6 +41,6 @@ func (ros ReadOnlySigner) SignEvent(context.Context, *nostr.Event) error {
} }
// GetPublicKey returns the public key associated with this signer. // GetPublicKey returns the public key associated with this signer.
func (ros ReadOnlySigner) GetPublicKey(context.Context) (string, error) { func (ros ReadOnlySigner) GetPublicKey(context.Context) (nostr.PubKey, error) {
return ros.pk, nil return ros.pk, nil
} }

View File

@@ -2,6 +2,7 @@ package khatru
import ( import (
"context" "context"
"iter"
"log" "log"
"net/http" "net/http"
"os" "os"
@@ -58,16 +59,16 @@ type Relay struct {
// hooks that will be called at various times // hooks that will be called at various times
RejectEvent func(ctx context.Context, event *nostr.Event) (reject bool, msg string) RejectEvent func(ctx context.Context, event *nostr.Event) (reject bool, msg string)
OverwriteDeletionOutcome func(ctx context.Context, target *nostr.Event, deletion *nostr.Event) (acceptDeletion bool, msg string) OverwriteDeletionOutcome func(ctx context.Context, target *nostr.Event, deletion *nostr.Event) (acceptDeletion bool, msg string)
StoreEvent func(ctx context.Context, event *nostr.Event) error StoreEvent func(ctx context.Context, event nostr.Event) error
ReplaceEvent func(ctx context.Context, event *nostr.Event) error ReplaceEvent func(ctx context.Context, event nostr.Event) error
DeleteEvent func(ctx context.Context, event *nostr.Event) error DeleteEvent func(ctx context.Context, id nostr.ID) error
OnEventSaved func(ctx context.Context, event *nostr.Event) OnEventSaved func(ctx context.Context, event nostr.Event)
OnEphemeralEvent func(ctx context.Context, event *nostr.Event) OnEphemeralEvent func(ctx context.Context, event nostr.Event)
RejectFilter func(ctx context.Context, filter nostr.Filter) (reject bool, msg string) RejectFilter func(ctx context.Context, filter nostr.Filter) (reject bool, msg string)
RejectCountFilter func(ctx context.Context, filter nostr.Filter) (reject bool, msg string) RejectCountFilter func(ctx context.Context, filter nostr.Filter) (reject bool, msg string)
QueryEvents func(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) QueryEvents func(ctx context.Context, filter nostr.Filter) iter.Seq[nostr.Event]
CountEvents func(ctx context.Context, filter nostr.Filter) (int64, error) CountEvents func(ctx context.Context, filter nostr.Filter) (uint32, error)
CountEventsHLL func(ctx context.Context, filter nostr.Filter, offset int) (int64, *hyperloglog.HyperLogLog, error) CountEventsHLL func(ctx context.Context, filter nostr.Filter, offset int) (uint32, *hyperloglog.HyperLogLog, error)
RejectConnection func(r *http.Request) bool RejectConnection func(r *http.Request) bool
OnConnect func(ctx context.Context) OnConnect func(ctx context.Context)
OnDisconnect func(ctx context.Context) OnDisconnect func(ctx context.Context)

View File

@@ -12,9 +12,8 @@ func GetThreadRoot(tags nostr.Tags) *nostr.EventPointer {
firstE := tags.Find("e") firstE := tags.Find("e")
if firstE != nil { if firstE != nil {
return &nostr.EventPointer{ p, _ := nostr.EventPointerFromTag(firstE)
ID: firstE[1], return &p
}
} }
return nil return nil
@@ -63,9 +62,8 @@ func GetImmediateParent(tags nostr.Tags) *nostr.EventPointer {
if lastE != nil { if lastE != nil {
// if we reached this point and we have at least one "e" we'll use that (the last) // if we reached this point and we have at least one "e" we'll use that (the last)
// (we don't bother looking for relay or author hints because these clients don't add these anyway) // (we don't bother looking for relay or author hints because these clients don't add these anyway)
return &nostr.EventPointer{ p, _ := nostr.EventPointerFromTag(lastE)
ID: lastE[1], return &p
}
} }
return nil return nil

View File

@@ -9,11 +9,11 @@ import (
"fiatjaf.com/nostr/nip59" "fiatjaf.com/nostr/nip59"
) )
func GetDMRelays(ctx context.Context, pubkey string, pool *nostr.Pool, relaysToQuery []string) []string { func GetDMRelays(ctx context.Context, pubkey nostr.PubKey, pool *nostr.Pool, relaysToQuery []string) []string {
ie := pool.QuerySingle(ctx, relaysToQuery, nostr.Filter{ ie := pool.QuerySingle(ctx, relaysToQuery, nostr.Filter{
Authors: []string{pubkey}, Authors: []nostr.PubKey{pubkey},
Kinds: []int{nostr.KindDMRelayList}, Kinds: []uint16{nostr.KindDMRelayList},
}) }, nostr.SubscriptionOptions{Label: "dm-relays"})
if ie == nil { if ie == nil {
return nil return nil
} }
@@ -39,7 +39,7 @@ func PublishMessage(
ourRelays []string, ourRelays []string,
theirRelays []string, theirRelays []string,
kr nostr.Keyer, kr nostr.Keyer,
recipientPubKey string, recipientPubKey nostr.PubKey,
modify func(*nostr.Event), modify func(*nostr.Event),
) error { ) error {
toUs, toThem, err := PrepareMessage(ctx, content, tags, kr, recipientPubKey, modify) toUs, toThem, err := PrepareMessage(ctx, content, tags, kr, recipientPubKey, modify)
@@ -56,7 +56,7 @@ func PublishMessage(
err = r.Publish(ctx, event) err = r.Publish(ctx, event)
if err != nil && strings.HasPrefix(err.Error(), "auth-required:") { if err != nil && strings.HasPrefix(err.Error(), "auth-required:") {
authErr := r.Auth(ctx, func(ae *nostr.Event) error { return kr.SignEvent(ctx, ae) }) authErr := r.Auth(ctx, kr.SignEvent)
if authErr == nil { if authErr == nil {
err = r.Publish(ctx, event) err = r.Publish(ctx, event)
} }
@@ -92,7 +92,7 @@ func PrepareMessage(
content string, content string,
tags nostr.Tags, tags nostr.Tags,
kr nostr.Keyer, kr nostr.Keyer,
recipientPubKey string, recipientPubKey nostr.PubKey,
modify func(*nostr.Event), modify func(*nostr.Event),
) (toUs nostr.Event, toThem nostr.Event, err error) { ) (toUs nostr.Event, toThem nostr.Event, err error) {
ourPubkey, err := kr.GetPublicKey(ctx) ourPubkey, err := kr.GetPublicKey(ctx)
@@ -103,7 +103,7 @@ func PrepareMessage(
rumor := nostr.Event{ rumor := nostr.Event{
Kind: nostr.KindDirectMessage, Kind: nostr.KindDirectMessage,
Content: content, Content: content,
Tags: append(tags, nostr.Tag{"p", recipientPubKey}), Tags: append(tags, nostr.Tag{"p", recipientPubKey.Hex()}),
CreatedAt: nostr.Now(), CreatedAt: nostr.Now(),
PubKey: ourPubkey, PubKey: ourPubkey,
} }
@@ -154,13 +154,15 @@ func ListenForMessages(
} }
for ie := range pool.SubscribeMany(ctx, ourRelays, nostr.Filter{ for ie := range pool.SubscribeMany(ctx, ourRelays, nostr.Filter{
Kinds: []int{nostr.KindGiftWrap}, Kinds: []uint16{nostr.KindGiftWrap},
Tags: nostr.TagMap{"p": []string{pk}}, Tags: nostr.TagMap{"p": []string{pk.Hex()}},
Since: &since, Since: &since,
}) { }, nostr.SubscriptionOptions{Label: "mydms"}) {
rumor, err := nip59.GiftUnwrap( rumor, err := nip59.GiftUnwrap(
*ie.Event, ie.Event,
func(otherpubkey, ciphertext string) (string, error) { return kr.Decrypt(ctx, ciphertext, otherpubkey) }, func(otherpubkey nostr.PubKey, ciphertext string) (string, error) {
return kr.Decrypt(ctx, ciphertext, otherpubkey)
},
) )
if err != nil { if err != nil {
nostr.InfoLogger.Printf("[nip17] failed to unwrap received message '%s' from %s: %s\n", ie.Event, ie.Relay.URL, err) nostr.InfoLogger.Printf("[nip17] failed to unwrap received message '%s' from %s: %s\n", ie.Event, ie.Relay.URL, err)

View File

@@ -25,7 +25,7 @@ func Decode(bech32string string) (prefix string, value any, err error) {
if len(data) != 32 { if len(data) != 32 {
return prefix, nil, fmt.Errorf("nsec should be 32 bytes (%d)", len(data)) return prefix, nil, fmt.Errorf("nsec should be 32 bytes (%d)", len(data))
} }
return prefix, [32]byte(data[0:32]), nil return prefix, nostr.SecretKey(data[0:32]), nil
case "note": case "note":
if len(data) != 32 { if len(data) != 32 {
return prefix, nil, fmt.Errorf("note should be 32 bytes (%d)", len(data)) return prefix, nil, fmt.Errorf("note should be 32 bytes (%d)", len(data))

View File

@@ -10,32 +10,24 @@ func EncodePointer(pointer nostr.Pointer) string {
switch v := pointer.(type) { switch v := pointer.(type) {
case nostr.ProfilePointer: case nostr.ProfilePointer:
if v.Relays == nil { if v.Relays == nil {
res, _ := EncodePublicKey(v.PublicKey) return EncodeNpub(v.PublicKey)
return res
} else { } else {
res, _ := EncodeProfile(v.PublicKey, v.Relays) return EncodeNprofile(v.PublicKey, v.Relays)
return res
} }
case *nostr.ProfilePointer: case *nostr.ProfilePointer:
if v.Relays == nil { if v.Relays == nil {
res, _ := EncodePublicKey(v.PublicKey) return EncodeNpub(v.PublicKey)
return res
} else { } else {
res, _ := EncodeProfile(v.PublicKey, v.Relays) return EncodeNprofile(v.PublicKey, v.Relays)
return res
} }
case nostr.EventPointer: case nostr.EventPointer:
res, _ := EncodeEvent(v.ID, v.Relays, v.Author) return EncodeNevent(v.ID, v.Relays, v.Author)
return res
case *nostr.EventPointer: case *nostr.EventPointer:
res, _ := EncodeEvent(v.ID, v.Relays, v.Author) return EncodeNevent(v.ID, v.Relays, v.Author)
return res
case nostr.EntityPointer: case nostr.EntityPointer:
res, _ := EncodeEntity(v.PublicKey, v.Kind, v.Identifier, v.Relays) return EncodeNaddr(v.PublicKey, v.Kind, v.Identifier, v.Relays)
return res
case *nostr.EntityPointer: case *nostr.EntityPointer:
res, _ := EncodeEntity(v.PublicKey, v.Kind, v.Identifier, v.Relays) return EncodeNaddr(v.PublicKey, v.Kind, v.Identifier, v.Relays)
return res
} }
return "" return ""
} }
@@ -48,13 +40,13 @@ func ToPointer(code string) (nostr.Pointer, error) {
switch prefix { switch prefix {
case "npub": case "npub":
return nostr.ProfilePointer{PublicKey: data.(string)}, nil return nostr.ProfilePointer{PublicKey: data.([32]byte)}, nil
case "nprofile": case "nprofile":
return data.(nostr.ProfilePointer), nil return data.(nostr.ProfilePointer), nil
case "nevent": case "nevent":
return data.(nostr.EventPointer), nil return data.(nostr.EventPointer), nil
case "note": case "note":
return nostr.EventPointer{ID: data.(string)}, nil return nostr.EventPointer{ID: data.([32]byte)}, nil
case "naddr": case "naddr":
return data.(nostr.EntityPointer), nil return data.(nostr.EntityPointer), nil
default: default:

View File

@@ -224,20 +224,20 @@ func (group *Group) MergeInMetadataEvent(evt *nostr.Event) error {
group.LastMetadataUpdate = evt.CreatedAt group.LastMetadataUpdate = evt.CreatedAt
group.Name = group.Address.ID group.Name = group.Address.ID
if tag := evt.Tags.GetFirst([]string{"name", ""}); tag != nil { if tag := evt.Tags.Find("name"); tag != nil {
group.Name = (*tag)[1] group.Name = tag[1]
} }
if tag := evt.Tags.GetFirst([]string{"about", ""}); tag != nil { if tag := evt.Tags.Find("about"); tag != nil {
group.About = (*tag)[1] group.About = tag[1]
} }
if tag := evt.Tags.GetFirst([]string{"picture", ""}); tag != nil { if tag := evt.Tags.Find("picture"); tag != nil {
group.Picture = (*tag)[1] group.Picture = tag[1]
} }
if tag := evt.Tags.GetFirst([]string{"private"}); tag != nil { if tag := evt.Tags.Find("private"); tag != nil {
group.Private = true group.Private = true
} }
if tag := evt.Tags.GetFirst([]string{"closed"}); tag != nil { if tag := evt.Tags.Find("closed"); tag != nil {
group.Closed = true group.Closed = true
} }

View File

@@ -11,7 +11,7 @@ type Role struct {
Description string Description string
} }
type KindRange []int type KindRange []uint16
var ModerationEventKinds = KindRange{ var ModerationEventKinds = KindRange{
nostr.KindSimpleGroupPutUser, nostr.KindSimpleGroupPutUser,
@@ -30,7 +30,7 @@ var MetadataEventKinds = KindRange{
nostr.KindSimpleGroupRoles, nostr.KindSimpleGroupRoles,
} }
func (kr KindRange) Includes(kind int) bool { func (kr KindRange) Includes(kind uint16) bool {
_, ok := slices.BinarySearch(kr, kind) _, ok := slices.BinarySearch(kr, kind)
return ok return ok
} }

View File

@@ -20,8 +20,8 @@ func TestGroupEventBackAndForth(t *testing.T) {
meta1 := group1.ToMetadataEvent() meta1 := group1.ToMetadataEvent()
require.Equal(t, "xyz", meta1.Tags.GetD(), "translation of group1 to metadata event failed: %s", meta1) require.Equal(t, "xyz", meta1.Tags.GetD(), "translation of group1 to metadata event failed: %s", meta1)
require.NotNil(t, meta1.Tags.GetFirst([]string{"name", "banana"}), "translation of group1 to metadata event failed: %s", meta1) require.NotNil(t, meta1.Tags.FindWithValue("name", "banana"), "translation of group1 to metadata event failed: %s", meta1)
require.NotNil(t, meta1.Tags.GetFirst([]string{"private"}), "translation of group1 to metadata event failed: %s", meta1) require.NotNil(t, meta1.Tags.Find("private"), "translation of group1 to metadata event failed: %s", meta1)
group2, _ := NewGroup("groups.com'abc") group2, _ := NewGroup("groups.com'abc")
group2.Members[ALICE] = []*Role{{Name: "nada"}} group2.Members[ALICE] = []*Role{{Name: "nada"}}
@@ -32,16 +32,16 @@ func TestGroupEventBackAndForth(t *testing.T) {
require.Equal(t, "abc", admins2.Tags.GetD(), "translation of group2 to admins event failed") require.Equal(t, "abc", admins2.Tags.GetD(), "translation of group2 to admins event failed")
require.Equal(t, 3, len(admins2.Tags), "translation of group2 to admins event failed") require.Equal(t, 3, len(admins2.Tags), "translation of group2 to admins event failed")
require.NotNil(t, admins2.Tags.GetFirst([]string{"p", ALICE, "nada"}), "translation of group2 to admins event failed") require.True(t, admins2.Tags.FindWithValue("p", ALICE)[2] == "nada", "translation of group2 to admins event failed")
require.NotNil(t, admins2.Tags.GetFirst([]string{"p", BOB, "nada"}), "translation of group2 to admins event failed") require.True(t, admins2.Tags.FindWithValue("p", BOB)[2] == "nada", "translation of group2 to admins event failed")
members2 := group2.ToMembersEvent() members2 := group2.ToMembersEvent()
require.Equal(t, "abc", members2.Tags.GetD(), "translation of group2 to members2 event failed") require.Equal(t, "abc", members2.Tags.GetD(), "translation of group2 to members2 event failed")
require.Equal(t, 5, len(members2.Tags), "translation of group2 to members2 event failed") require.Equal(t, 5, len(members2.Tags), "translation of group2 to members2 event failed")
require.NotNil(t, members2.Tags.GetFirst([]string{"p", ALICE}), "translation of group2 to members2 event failed") require.NotNil(t, members2.Tags.FindWithValue("p", ALICE), "translation of group2 to members2 event failed")
require.NotNil(t, members2.Tags.GetFirst([]string{"p", BOB}), "translation of group2 to members2 event failed") require.NotNil(t, members2.Tags.FindWithValue("p", BOB), "translation of group2 to members2 event failed")
require.NotNil(t, members2.Tags.GetFirst([]string{"p", CAROL}), "translation of group2 to members2 event failed") require.NotNil(t, members2.Tags.FindWithValue("p", CAROL), "translation of group2 to members2 event failed")
require.NotNil(t, members2.Tags.GetFirst([]string{"p", DEREK}), "translation of group2 to members2 event failed") require.NotNil(t, members2.Tags.FindWithValue("p", DEREK), "translation of group2 to members2 event failed")
group1.MergeInMembersEvent(members2) group1.MergeInMembersEvent(members2)
require.Equal(t, 4, len(group1.Members), "merge of members2 into group1 failed") require.Equal(t, 4, len(group1.Members), "merge of members2 into group1 failed")

View File

@@ -3,8 +3,8 @@ package nip34
import ( import (
"strings" "strings"
"github.com/bluekeyes/go-gitdiff/gitdiff"
"fiatjaf.com/nostr" "fiatjaf.com/nostr"
"github.com/bluekeyes/go-gitdiff/gitdiff"
) )
type Patch struct { type Patch struct {
@@ -35,7 +35,7 @@ func ParsePatch(event nostr.Event) Patch {
continue continue
} }
patch.Repository.Kind = nostr.KindRepositoryAnnouncement patch.Repository.Kind = nostr.KindRepositoryAnnouncement
patch.Repository.PublicKey = spl[1] patch.Repository.PublicKey, _ = nostr.PubKeyFromHex(spl[1])
patch.Repository.Identifier = spl[2] patch.Repository.Identifier = spl[2]
if len(tag) >= 3 { if len(tag) >= 3 {
patch.Repository.Relays = []string{tag[2]} patch.Repository.Relays = []string{tag[2]}

View File

@@ -1,9 +1,6 @@
package nip34 package nip34
import ( import (
"context"
"fmt"
"fiatjaf.com/nostr" "fiatjaf.com/nostr"
) )
@@ -97,33 +94,3 @@ func (r Repository) ToEvent() *nostr.Event {
CreatedAt: nostr.Now(), CreatedAt: nostr.Now(),
} }
} }
func (repo Repository) FetchState(ctx context.Context, s nostr.RelayStore) *RepositoryState {
res, _ := s.QuerySync(ctx, nostr.Filter{
Kinds: []int{nostr.KindRepositoryState},
Tags: nostr.TagMap{
"d": []string{repo.Tags.GetD()},
},
})
if len(res) == 0 {
return nil
}
rs := ParseRepositoryState(*res[0])
return &rs
}
func (repo Repository) GetPatchesSync(ctx context.Context, s nostr.RelayStore) []Patch {
res, _ := s.QuerySync(ctx, nostr.Filter{
Kinds: []int{nostr.KindPatch},
Tags: nostr.TagMap{
"a": []string{fmt.Sprintf("%d:%s:%s", nostr.KindRepositoryAnnouncement, repo.Event.PubKey, repo.ID)},
},
})
patches := make([]Patch, len(res))
for i, evt := range res {
patches[i] = ParsePatch(*evt)
}
return patches
}

View File

@@ -55,7 +55,7 @@ func (s Session) MakeResponse(
evt.Content = ciphertext evt.Content = ciphertext
evt.CreatedAt = nostr.Now() evt.CreatedAt = nostr.Now()
evt.Kind = nostr.KindNostrConnect evt.Kind = nostr.KindNostrConnect
evt.Tags = nostr.Tags{nostr.Tag{"p", requester}} evt.Tags = nostr.Tags{nostr.Tag{"p", requester.Hex()}}
return resp, evt, nil return resp, evt, nil
} }

View File

@@ -39,7 +39,7 @@ type BunkerClient struct {
// pool can be passed to reuse an existing pool, otherwise a new pool will be created. // pool can be passed to reuse an existing pool, otherwise a new pool will be created.
func ConnectBunker( func ConnectBunker(
ctx context.Context, ctx context.Context,
clientSecretKey nostr.PubKey, clientSecretKey nostr.SecretKey,
bunkerURLOrNIP05 string, bunkerURLOrNIP05 string,
pool *nostr.Pool, pool *nostr.Pool,
onAuth func(string), onAuth func(string),

View File

@@ -35,7 +35,7 @@ func GiftWrap(
return nostr.Event{}, err return nostr.Event{}, err
} }
nonceKey := nostr.GeneratePrivateKey() nonceKey := nostr.Generate()
temporaryConversationKey, err := nip44.GenerateConversationKey(recipient, nonceKey) temporaryConversationKey, err := nip44.GenerateConversationKey(recipient, nonceKey)
if err != nil { if err != nil {
return nostr.Event{}, err return nostr.Event{}, err

View File

@@ -20,7 +20,7 @@ type HistoryEntry struct {
} }
type TokenRef struct { type TokenRef struct {
EventID string EventID nostr.ID
Created bool Created bool
IsNutzap bool IsNutzap bool
} }
@@ -47,7 +47,7 @@ func (h HistoryEntry) toEvent(ctx context.Context, kr nostr.Keyer, evt *nostr.Ev
for _, tf := range h.TokenReferences { for _, tf := range h.TokenReferences {
if tf.IsNutzap { if tf.IsNutzap {
evt.Tags = append(evt.Tags, nostr.Tag{"e", tf.EventID, "", "redeemed"}) evt.Tags = append(evt.Tags, nostr.Tag{"e", tf.EventID.Hex(), "", "redeemed"})
continue continue
} }
@@ -56,7 +56,7 @@ func (h HistoryEntry) toEvent(ctx context.Context, kr nostr.Keyer, evt *nostr.Ev
marker = "created" marker = "created"
} }
encryptedTags = append(encryptedTags, nostr.Tag{"e", tf.EventID, "", marker}) encryptedTags = append(encryptedTags, nostr.Tag{"e", tf.EventID.Hex(), "", marker})
} }
jsonb, _ := json.Marshal(encryptedTags) jsonb, _ := json.Marshal(encryptedTags)
@@ -129,11 +129,12 @@ func (h *HistoryEntry) parse(ctx context.Context, kr nostr.Keyer, evt *nostr.Eve
if len(tag) < 4 { if len(tag) < 4 {
return fmt.Errorf("'e' tag must have at least 4 items") return fmt.Errorf("'e' tag must have at least 4 items")
} }
if !nostr.IsValid32ByteHex(tag[1]) { id, err := nostr.IDFromHex(tag[1])
return fmt.Errorf("'e' tag has invalid event id %s", tag[1]) if err != nil {
return fmt.Errorf("'e' tag has invalid event id %s: %w", tag[1])
} }
tf := TokenRef{EventID: tag[1]} tf := TokenRef{EventID: id}
switch tag[3] { switch tag[3] {
case "created": case "created":
tf.Created = true tf.Created = true

View File

@@ -5,10 +5,10 @@ import (
"fmt" "fmt"
"slices" "slices"
"github.com/elnosh/gonuts/cashu"
"github.com/elnosh/gonuts/cashu/nuts/nut10"
"fiatjaf.com/nostr" "fiatjaf.com/nostr"
"fiatjaf.com/nostr/nip60/client" "fiatjaf.com/nostr/nip60/client"
"github.com/elnosh/gonuts/cashu"
"github.com/elnosh/gonuts/cashu/nuts/nut10"
) )
type receiveSettings struct { type receiveSettings struct {

View File

@@ -6,13 +6,13 @@ import (
"fmt" "fmt"
"slices" "slices"
"fiatjaf.com/nostr"
"fiatjaf.com/nostr/nip60/client"
"github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/btcec/v2"
"github.com/elnosh/gonuts/cashu" "github.com/elnosh/gonuts/cashu"
"github.com/elnosh/gonuts/cashu/nuts/nut02" "github.com/elnosh/gonuts/cashu/nuts/nut02"
"github.com/elnosh/gonuts/cashu/nuts/nut10" "github.com/elnosh/gonuts/cashu/nuts/nut10"
"github.com/elnosh/gonuts/cashu/nuts/nut11" "github.com/elnosh/gonuts/cashu/nuts/nut11"
"fiatjaf.com/nostr"
"fiatjaf.com/nostr/nip60/client"
) )
type SendOption func(opts *sendSettings) type SendOption func(opts *sendSettings)
@@ -23,10 +23,9 @@ type sendSettings struct {
refundtimelock int64 refundtimelock int64
} }
func WithP2PK(pubkey string) SendOption { func WithP2PK(pubkey nostr.PubKey) SendOption {
return func(opts *sendSettings) { return func(opts *sendSettings) {
pkb, _ := hex.DecodeString(pubkey) opts.p2pk, _ = btcec.ParsePubKey(append([]byte{2}, pubkey[:]...))
opts.p2pk, _ = btcec.ParsePubKey(pkb)
} }
} }
@@ -132,7 +131,7 @@ func (w *Wallet) saveChangeAndDeleteUsedTokens(
mintedAt: nostr.Now(), mintedAt: nostr.Now(),
Mint: mintURL, Mint: mintURL,
Proofs: changeProofs, Proofs: changeProofs,
Deleted: make([]string, 0, len(usedTokenIndexes)), Deleted: make([]nostr.ID, 0, len(usedTokenIndexes)),
event: &nostr.Event{}, event: &nostr.Event{},
} }
@@ -144,7 +143,7 @@ func (w *Wallet) saveChangeAndDeleteUsedTokens(
deleteEvent := nostr.Event{ deleteEvent := nostr.Event{
CreatedAt: nostr.Now(), CreatedAt: nostr.Now(),
Kind: 5, Kind: 5,
Tags: nostr.Tags{{"e", token.event.ID}, {"k", "7375"}}, Tags: nostr.Tags{{"e", token.event.ID.Hex()}, {"k", "7375"}},
} }
w.kr.SignEvent(ctx, &deleteEvent) w.kr.SignEvent(ctx, &deleteEvent)

View File

@@ -5,14 +5,14 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/elnosh/gonuts/cashu"
"fiatjaf.com/nostr" "fiatjaf.com/nostr"
"github.com/elnosh/gonuts/cashu"
) )
type Token struct { type Token struct {
Mint string `json:"mint"` Mint string `json:"mint"`
Proofs cashu.Proofs `json:"proofs"` Proofs cashu.Proofs `json:"proofs"`
Deleted []string `json:"del,omitempty"` Deleted []nostr.ID `json:"del,omitempty"`
mintedAt nostr.Timestamp mintedAt nostr.Timestamp
event *nostr.Event event *nostr.Event
@@ -20,7 +20,7 @@ type Token struct {
func (t Token) ID() string { func (t Token) ID() string {
if t.event != nil { if t.event != nil {
return t.event.ID return t.event.ID.Hex()
} }
return "<not-published>" return "<not-published>"

View File

@@ -9,9 +9,9 @@ import (
"sync" "sync"
"time" "time"
"fiatjaf.com/nostr"
"github.com/btcsuite/btcd/btcec/v2" "github.com/btcsuite/btcd/btcec/v2"
"github.com/decred/dcrd/dcrec/secp256k1/v4" "github.com/decred/dcrd/dcrec/secp256k1/v4"
"fiatjaf.com/nostr"
) )
type Wallet struct { type Wallet struct {
@@ -19,7 +19,7 @@ type Wallet struct {
tokensMu sync.Mutex tokensMu sync.Mutex
event *nostr.Event event *nostr.Event
pendingDeletions []string // token events that should be deleted pendingDeletions []nostr.ID // token events that should be deleted
kr nostr.Keyer kr nostr.Keyer
@@ -34,7 +34,7 @@ type Wallet struct {
) )
// Processed, if not nil, is called every time a received event is processed // Processed, if not nil, is called every time a received event is processed
Processed func(*nostr.Event, error) Processed func(nostr.Event, error)
// Stable is closed when we have gotten an EOSE from all relays // Stable is closed when we have gotten an EOSE from all relays
Stable chan struct{} Stable chan struct{}
@@ -77,7 +77,7 @@ func loadWalletFromPool(
return nil return nil
} }
kinds := []int{17375, 7375} kinds := []uint16{17375, 7375}
if withHistory { if withHistory {
kinds = append(kinds, 7376) kinds = append(kinds, 7376)
} }
@@ -86,16 +86,18 @@ func loadWalletFromPool(
events := pool.SubscribeManyNotifyEOSE( events := pool.SubscribeManyNotifyEOSE(
ctx, ctx,
relays, relays,
nostr.Filter{Kinds: kinds, Authors: []string{pk}}, nostr.Filter{Kinds: kinds, Authors: []nostr.PubKey{pk}},
eoseChanE, eoseChanE,
nostr.SubscriptionOptions{},
) )
eoseChanD := make(chan struct{}) eoseChanD := make(chan struct{})
deletions := pool.SubscribeManyNotifyEOSE( deletions := pool.SubscribeManyNotifyEOSE(
ctx, ctx,
relays, relays,
nostr.Filter{Kinds: []int{5}, Tags: nostr.TagMap{"k": []string{"7375"}}, Authors: []string{pk}}, nostr.Filter{Kinds: []uint16{5}, Tags: nostr.TagMap{"k": []string{"7375"}}, Authors: []nostr.PubKey{pk}},
eoseChanD, eoseChanD,
nostr.SubscriptionOptions{},
) )
eoseChan := make(chan struct{}) eoseChan := make(chan struct{})
@@ -116,7 +118,7 @@ func loadWallet(
eoseChan chan struct{}, eoseChan chan struct{},
) *Wallet { ) *Wallet {
w := &Wallet{ w := &Wallet{
pendingDeletions: make([]string, 0, 128), pendingDeletions: make([]nostr.ID, 0, 128),
kr: kr, kr: kr,
Stable: make(chan struct{}), Stable: make(chan struct{}),
Tokens: make([]Token, 0, 128), Tokens: make([]Token, 0, 128),
@@ -143,11 +145,15 @@ func loadWallet(
w.Lock() w.Lock()
if !eosed { if !eosed {
for tag := range ie.Event.Tags.FindAll("e") { for tag := range ie.Event.Tags.FindAll("e") {
w.pendingDeletions = append(w.pendingDeletions, tag[1]) if id, err := nostr.IDFromHex(tag[1]); err == nil {
w.pendingDeletions = append(w.pendingDeletions, id)
}
} }
} else { } else {
for tag := range ie.Event.Tags.FindAll("e") { for tag := range ie.Event.Tags.FindAll("e") {
w.removeDeletedToken(tag[1]) if id, err := nostr.IDFromHex(tag[1]); err == nil {
w.removeDeletedToken(id)
}
} }
} }
w.Unlock() w.Unlock()
@@ -159,7 +165,7 @@ func loadWallet(
w.Lock() w.Lock()
switch ie.Event.Kind { switch ie.Event.Kind {
case 17375: case 17375:
if err := w.parse(ctx, kr, ie.Event); err != nil { if err := w.parse(ctx, kr, &ie.Event); err != nil {
if w.Processed != nil { if w.Processed != nil {
w.Processed(ie.Event, err) w.Processed(ie.Event, err)
} }
@@ -169,11 +175,11 @@ func loadWallet(
// if this metadata is newer than what we had, update // if this metadata is newer than what we had, update
if w.event == nil || ie.Event.CreatedAt > w.event.CreatedAt { if w.event == nil || ie.Event.CreatedAt > w.event.CreatedAt {
w.parse(ctx, kr, ie.Event) // this will either fail or set the new metadata w.parse(ctx, kr, &ie.Event) // this will either fail or set the new metadata
} }
case 7375: // token case 7375: // token
token := Token{} token := Token{}
if err := token.parse(ctx, kr, ie.Event); err != nil { if err := token.parse(ctx, kr, &ie.Event); err != nil {
if w.Processed != nil { if w.Processed != nil {
w.Processed(ie.Event, err) w.Processed(ie.Event, err)
} }
@@ -200,7 +206,7 @@ func loadWallet(
case 7376: // history case 7376: // history
he := HistoryEntry{} he := HistoryEntry{}
if err := he.parse(ctx, kr, ie.Event); err != nil { if err := he.parse(ctx, kr, &ie.Event); err != nil {
if w.Processed != nil { if w.Processed != nil {
w.Processed(ie.Event, err) w.Processed(ie.Event, err)
} }
@@ -230,7 +236,7 @@ func (w *Wallet) Close() error {
return nil return nil
} }
func (w *Wallet) removeDeletedToken(eventId string) { func (w *Wallet) removeDeletedToken(eventId nostr.ID) {
for t := len(w.Tokens) - 1; t >= 0; t-- { for t := len(w.Tokens) - 1; t >= 0; t-- {
token := w.Tokens[t] token := w.Tokens[t]
if token.event != nil && token.event.ID == eventId { if token.event != nil && token.event.ID == eventId {

View File

@@ -1,6 +1,7 @@
package nip60 package nip60
import ( import (
"bytes"
"cmp" "cmp"
"context" "context"
"fmt" "fmt"
@@ -8,17 +9,17 @@ import (
"testing" "testing"
"time" "time"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/elnosh/gonuts/cashu"
"fiatjaf.com/nostr" "fiatjaf.com/nostr"
"fiatjaf.com/nostr/keyer" "fiatjaf.com/nostr/keyer"
"github.com/btcsuite/btcd/btcec/v2"
"github.com/elnosh/gonuts/cashu"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"golang.org/x/exp/rand" "golang.org/x/exp/rand"
) )
func TestWallet(t *testing.T) { func TestWallet(t *testing.T) {
ctx := context.Background() ctx := context.Background()
kr, err := keyer.NewPlainKeySigner("040cbf11f24b080ad9d8669d7514d9f3b7b1f58e5a6dcb75549352b041656537") kr, err := keyer.NewPlainKeySigner(nostr.MustSecretKeyFromHex("040cbf11f24b080ad9d8669d7514d9f3b7b1f58e5a6dcb75549352b041656537"))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@@ -53,7 +54,7 @@ func TestWallet(t *testing.T) {
Amount: 100, Amount: 100,
createdAt: nostr.Timestamp(time.Now().Add(-3 * time.Hour).Unix()), createdAt: nostr.Timestamp(time.Now().Add(-3 * time.Hour).Unix()),
TokenReferences: []TokenRef{ TokenReferences: []TokenRef{
{Created: true, EventID: "645babb9051f46ddc97d960e68f82934e627f136dde7b860bf87c9213d937b58"}, {Created: true, EventID: nostr.MustIDFromHex("645babb9051f46ddc97d960e68f82934e627f136dde7b860bf87c9213d937b58")},
}, },
}, },
{ {
@@ -61,8 +62,8 @@ func TestWallet(t *testing.T) {
Amount: 200, Amount: 200,
createdAt: nostr.Timestamp(time.Now().Add(-2 * time.Hour).Unix()), createdAt: nostr.Timestamp(time.Now().Add(-2 * time.Hour).Unix()),
TokenReferences: []TokenRef{ TokenReferences: []TokenRef{
{Created: false, EventID: "add072ae7d7a027748e03024267a1c073f3fbc26cca468ba8630d039a7f5df72"}, {Created: false, EventID: nostr.MustIDFromHex("add072ae7d7a027748e03024267a1c073f3fbc26cca468ba8630d039a7f5df72")},
{Created: true, EventID: "b8460b5589b68a0d9a017ac3784d17a0729046206aa631f7f4b763b738e36cf8"}, {Created: true, EventID: nostr.MustIDFromHex("b8460b5589b68a0d9a017ac3784d17a0729046206aa631f7f4b763b738e36cf8")},
}, },
}, },
{ {
@@ -70,52 +71,52 @@ func TestWallet(t *testing.T) {
Amount: 300, Amount: 300,
createdAt: nostr.Timestamp(time.Now().Add(-1 * time.Hour).Unix()), createdAt: nostr.Timestamp(time.Now().Add(-1 * time.Hour).Unix()),
TokenReferences: []TokenRef{ TokenReferences: []TokenRef{
{Created: false, EventID: "61f86031d0ab95e9134a3ab955e96104cb1f4d610172838d28aa7ae9dc1cc924"}, {Created: false, EventID: nostr.MustIDFromHex("61f86031d0ab95e9134a3ab955e96104cb1f4d610172838d28aa7ae9dc1cc924")},
{Created: true, EventID: "588b78e4af06e960434239e7367a0bedf84747d4c52ff943f5e8b7daa3e1b601", IsNutzap: true}, {Created: true, EventID: nostr.MustIDFromHex("588b78e4af06e960434239e7367a0bedf84747d4c52ff943f5e8b7daa3e1b601"), IsNutzap: true},
{Created: false, EventID: "8f14c0a4ff1bf85ccc26bf0125b9a289552f9b59bbb310b163d6a88a7bbd4ebc"}, {Created: false, EventID: nostr.MustIDFromHex("8f14c0a4ff1bf85ccc26bf0125b9a289552f9b59bbb310b163d6a88a7bbd4ebc")},
{Created: true, EventID: "41a6f442b7c3c9e2f1e8c4835c00f17c56b3e3be4c9f7cf7bc4cdd705b1b61db", IsNutzap: true}, {Created: true, EventID: nostr.MustIDFromHex("41a6f442b7c3c9e2f1e8c4835c00f17c56b3e3be4c9f7cf7bc4cdd705b1b61db"), IsNutzap: true},
}, },
}, },
}, },
} }
// turn everything into events // turn everything into events
events := make([]*nostr.Event, 0, 7) events := make([]nostr.Event, 0, 7)
// wallet metadata event // wallet metadata event
metaEvent := &nostr.Event{} metaEvent := nostr.Event{}
err = w.toEvent(ctx, kr, metaEvent) err = w.toEvent(ctx, kr, &metaEvent)
require.NoError(t, err) require.NoError(t, err)
events = append(events, metaEvent) events = append(events, metaEvent)
// token events // token events
for i := range w.Tokens { for i := range w.Tokens {
evt := &nostr.Event{} evt := nostr.Event{}
evt.Tags = nostr.Tags{} evt.Tags = nostr.Tags{}
err := w.Tokens[i].toEvent(ctx, kr, evt) err := w.Tokens[i].toEvent(ctx, kr, &evt)
require.NoError(t, err) require.NoError(t, err)
w.Tokens[i].event = evt w.Tokens[i].event = &evt
events = append(events, evt) events = append(events, evt)
} }
// history events // history events
for i := range w.History { for i := range w.History {
evt := &nostr.Event{} evt := nostr.Event{}
evt.Tags = nostr.Tags{} evt.Tags = nostr.Tags{}
err := w.History[i].toEvent(ctx, kr, evt) err := w.History[i].toEvent(ctx, kr, &evt)
require.NoError(t, err) require.NoError(t, err)
w.History[i].event = evt w.History[i].event = &evt
events = append(events, evt) events = append(events, evt)
} }
// test different orderings // test different orderings
testCases := []struct { testCases := []struct {
name string name string
sort func([]*nostr.Event) sort func([]nostr.Event)
}{ }{
{ {
name: "random order", name: "random order",
sort: func(evts []*nostr.Event) { sort: func(evts []nostr.Event) {
r := rand.New(rand.NewSource(42)) // deterministic r := rand.New(rand.NewSource(42)) // deterministic
r.Shuffle(len(evts), func(i, j int) { r.Shuffle(len(evts), func(i, j int) {
evts[i], evts[j] = evts[j], evts[i] evts[i], evts[j] = evts[j], evts[i]
@@ -124,16 +125,16 @@ func TestWallet(t *testing.T) {
}, },
{ {
name: "most recent first", name: "most recent first",
sort: func(evts []*nostr.Event) { sort: func(evts []nostr.Event) {
slices.SortFunc(evts, func(a, b *nostr.Event) int { slices.SortFunc(evts, func(a, b nostr.Event) int {
return int(b.CreatedAt - a.CreatedAt) return int(b.CreatedAt - a.CreatedAt)
}) })
}, },
}, },
{ {
name: "least recent first", name: "least recent first",
sort: func(evts []*nostr.Event) { sort: func(evts []nostr.Event) {
slices.SortFunc(evts, func(a, b *nostr.Event) int { slices.SortFunc(evts, func(a, b nostr.Event) int {
return int(a.CreatedAt - b.CreatedAt) return int(a.CreatedAt - b.CreatedAt)
}) })
}, },
@@ -143,7 +144,7 @@ func TestWallet(t *testing.T) {
for _, tc := range testCases { for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
// make a copy and sort it // make a copy and sort it
eventsCopy := make([]*nostr.Event, len(events)) eventsCopy := make([]nostr.Event, len(events))
copy(eventsCopy, events) copy(eventsCopy, events)
tc.sort(eventsCopy) tc.sort(eventsCopy)
@@ -162,7 +163,7 @@ func TestWallet(t *testing.T) {
// load wallet from events // load wallet from events
loaded := loadWallet(ctx, kr, evtChan, make(chan nostr.RelayEvent), eoseChan) loaded := loadWallet(ctx, kr, evtChan, make(chan nostr.RelayEvent), eoseChan)
loaded.Processed = func(evt *nostr.Event, err error) { loaded.Processed = func(evt nostr.Event, err error) {
fmt.Println("processed", evt.Kind, err) fmt.Println("processed", evt.Kind, err)
} }
@@ -174,8 +175,8 @@ func TestWallet(t *testing.T) {
slices.SortFunc(loaded.History, func(a, b HistoryEntry) int { return cmp.Compare(a.createdAt, b.createdAt) }) slices.SortFunc(loaded.History, func(a, b HistoryEntry) int { return cmp.Compare(a.createdAt, b.createdAt) })
slices.SortFunc(w.History, func(a, b HistoryEntry) int { return cmp.Compare(a.createdAt, b.createdAt) }) slices.SortFunc(w.History, func(a, b HistoryEntry) int { return cmp.Compare(a.createdAt, b.createdAt) })
for i := range w.History { for i := range w.History {
slices.SortFunc(loaded.History[i].TokenReferences, func(a, b TokenRef) int { return cmp.Compare(a.EventID, b.EventID) }) slices.SortFunc(loaded.History[i].TokenReferences, func(a, b TokenRef) int { return bytes.Compare(a.EventID[:], b.EventID[:]) })
slices.SortFunc(w.History[i].TokenReferences, func(a, b TokenRef) int { return cmp.Compare(a.EventID, b.EventID) }) slices.SortFunc(w.History[i].TokenReferences, func(a, b TokenRef) int { return bytes.Compare(a.EventID[:], b.EventID[:]) })
require.Equal(t, loaded.History[i], w.History[i]) require.Equal(t, loaded.History[i], w.History[i])
} }
require.ElementsMatch(t, loaded.Mints, w.Mints) require.ElementsMatch(t, loaded.Mints, w.Mints)

View File

@@ -20,24 +20,24 @@ func SendNutzap(
kr nostr.Keyer, kr nostr.Keyer,
w *nip60.Wallet, w *nip60.Wallet,
pool *nostr.Pool, pool *nostr.Pool,
targetUserPublickey string, targetUserPublickey nostr.PubKey,
getUserReadRelays func(context.Context, string, int) []string, getUserReadRelays func(context.Context, nostr.PubKey, int) []string,
relays []string, relays []string,
eventId string, // can be "" if not targeting a specific event eventId string, // can be "" if not targeting a specific event
amount uint64, amount uint64,
message string, message string,
) (chan nostr.PublishResult, error) { ) (chan nostr.PublishResult, error) {
ie := pool.QuerySingle(ctx, relays, nostr.Filter{Kinds: []int{10019}, Authors: []string{targetUserPublickey}}) ie := pool.QuerySingle(ctx, relays, nostr.Filter{Kinds: []uint16{10019}, Authors: []nostr.PubKey{targetUserPublickey}}, nostr.SubscriptionOptions{})
if ie == nil { if ie == nil {
return nil, NutzapsNotAccepted return nil, NutzapsNotAccepted
} }
info := Info{} info := Info{}
if err := info.ParseEvent(ie.Event); err != nil { if err := info.ParseEvent(&ie.Event); err != nil {
return nil, err return nil, err
} }
if len(info.Mints) == 0 || info.PublicKey == "" { if len(info.Mints) == 0 || info.PublicKey == nostr.ZeroPK {
return nil, NutzapsNotAccepted return nil, NutzapsNotAccepted
} }
@@ -55,7 +55,7 @@ func SendNutzap(
Tags: make(nostr.Tags, 0, 8), Tags: make(nostr.Tags, 0, 8),
} }
nutzap.Tags = append(nutzap.Tags, nostr.Tag{"p", targetUserPublickey}) nutzap.Tags = append(nutzap.Tags, nostr.Tag{"p", targetUserPublickey.Hex()})
if eventId != "" { if eventId != "" {
nutzap.Tags = append(nutzap.Tags, nostr.Tag{"e", eventId}) nutzap.Tags = append(nutzap.Tags, nostr.Tag{"e", eventId})
} }

View File

@@ -0,0 +1,35 @@
package vector
import (
"bytes"
"cmp"
"fiatjaf.com/nostr/nip77/negentropy"
)
func itemCompare(a, b negentropy.Item) int {
if a.Timestamp == b.Timestamp {
return bytes.Compare(a.ID[:], b.ID[:])
}
return cmp.Compare(a.Timestamp, b.Timestamp)
}
// binary search with custom function
func searchItemWithBound(items []negentropy.Item, bound negentropy.Bound) int {
n := len(items)
// Define x[-1] < target and x[n] >= target.
// Invariant: x[i-1] < target, x[j] >= target.
i, j := 0, n
for i < j {
h := int(uint(i+j) >> 1) // avoid overflow when computing h
// i ≤ h < j
if items[h].Timestamp < bound.Timestamp ||
(items[h].Timestamp == bound.Timestamp && bytes.Compare(items[h].ID[:], bound.IDPrefix) == -1) {
i = h + 1 // preserves x[i-1] < target
} else {
j = h // preserves x[j] >= target
}
}
// i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i.
return i
}

View File

@@ -1,7 +1,6 @@
package vector package vector
import ( import (
"fmt"
"iter" "iter"
"slices" "slices"
@@ -24,10 +23,6 @@ func New() *Vector {
} }
func (v *Vector) Insert(createdAt nostr.Timestamp, id nostr.ID) { func (v *Vector) Insert(createdAt nostr.Timestamp, id nostr.ID) {
if len(id) != 64 {
panic(fmt.Errorf("bad id size for added item: expected %d bytes, got %d", 32, len(id)/2))
}
item := negentropy.Item{Timestamp: createdAt, ID: id} item := negentropy.Item{Timestamp: createdAt, ID: id}
v.items = append(v.items, item) v.items = append(v.items, item)
} }
@@ -39,12 +34,12 @@ func (v *Vector) Seal() {
panic("trying to seal an already sealed vector") panic("trying to seal an already sealed vector")
} }
v.sealed = true v.sealed = true
slices.SortFunc(v.items, negentropy.ItemCompare) slices.SortFunc(v.items, itemCompare)
} }
func (v *Vector) GetBound(idx int) negentropy.Bound { func (v *Vector) GetBound(idx int) negentropy.Bound {
if idx < len(v.items) { if idx < len(v.items) {
return negentropy.Bound{Item: v.items[idx]} return negentropy.Bound{Timestamp: v.items[idx].Timestamp, IDPrefix: v.items[idx].ID[:]}
} }
return negentropy.InfiniteBound return negentropy.InfiniteBound
} }
@@ -60,7 +55,7 @@ func (v *Vector) Range(begin, end int) iter.Seq2[int, negentropy.Item] {
} }
func (v *Vector) FindLowerBound(begin, end int, bound negentropy.Bound) int { func (v *Vector) FindLowerBound(begin, end int, bound negentropy.Bound) int {
idx, _ := slices.BinarySearchFunc(v.items[begin:end], bound.Item, negentropy.ItemCompare) idx := searchItemWithBound(v.items[begin:end], bound)
return begin + idx return begin + idx
} }

View File

@@ -1,8 +1,6 @@
package negentropy package negentropy
import ( import (
"bytes"
"cmp"
"fmt" "fmt"
"fiatjaf.com/nostr" "fiatjaf.com/nostr"
@@ -36,13 +34,6 @@ type Item struct {
ID nostr.ID ID nostr.ID
} }
func ItemCompare(a, b Item) int {
if a.Timestamp == b.Timestamp {
return bytes.Compare(a.ID[:], b.ID[:])
}
return cmp.Compare(a.Timestamp, b.Timestamp)
}
func (i Item) String() string { return fmt.Sprintf("Item<%d:%x>", i.Timestamp, i.ID[:]) } func (i Item) String() string { return fmt.Sprintf("Item<%d:%x>", i.Timestamp, i.ID[:]) }
type Bound struct { type Bound struct {

View File

@@ -13,8 +13,8 @@ import (
type direction struct { type direction struct {
label string label string
items chan nostr.ID items chan nostr.ID
source nostr.RelayStore source nostr.QuerierPublisher
target nostr.RelayStore target nostr.QuerierPublisher
} }
type Direction int type Direction int
@@ -27,21 +27,21 @@ const (
func NegentropySync( func NegentropySync(
ctx context.Context, ctx context.Context,
store nostr.RelayStore, store nostr.QuerierPublisher,
url string, url string,
filter nostr.Filter, filter nostr.Filter,
dir Direction, dir Direction,
) error { ) error {
id := "go-nostr-tmp" // for now we can't have more than one subscription in the same connection id := "nl-tmp" // for now we can't have more than one subscription in the same connection
data, err := store.QuerySync(ctx, filter)
if err != nil {
return fmt.Errorf("failed to query our local store: %w", err)
}
vec := vector.New() vec := vector.New()
neg := negentropy.New(vec, 1024*1024) neg := negentropy.New(vec, 1024*1024)
for _, evt := range data { ch, err := store.QueryEvents(ctx, filter)
if err != nil {
return err
}
for evt := range ch {
vec.Insert(evt.CreatedAt, evt.ID) vec.Insert(evt.CreatedAt, evt.ID)
} }
vec.Seal() vec.Seal()
@@ -49,7 +49,8 @@ func NegentropySync(
result := make(chan error) result := make(chan error)
var r *nostr.Relay var r *nostr.Relay
r, err = nostr.RelayConnect(ctx, url, nostr.WithCustomHandler(func(data string) { r, err = nostr.RelayConnect(ctx, url, nostr.RelayOptions{
CustomHandler: func(data string) {
envelope := ParseNegMessage(data) envelope := ParseNegMessage(data)
if envelope == nil { if envelope == nil {
return return
@@ -73,7 +74,8 @@ func NegentropySync(
r.Write(msgb) r.Write(msgb)
} }
} }
})) },
})
if err != nil { if err != nil {
return err return err
} }
@@ -122,7 +124,7 @@ func NegentropySync(
return return
} }
for evt := range evtch { for evt := range evtch {
dir.target.Publish(ctx, *evt) dir.target.Publish(ctx, evt)
} }
} }

View File

@@ -433,12 +433,39 @@ func (r *Relay) PrepareSubscription(ctx context.Context, filter Filter, opts Sub
return sub return sub
} }
// implement Querier interface
func (r *Relay) QueryEvents(ctx context.Context, filter Filter) (chan Event, error) {
sub, err := r.Subscribe(ctx, filter, SubscriptionOptions{Label: "queryevents"})
if err != nil {
return nil, err
}
ch := make(chan Event)
go func() {
for {
select {
case evt := <-sub.Events:
ch <- evt
case <-sub.EndOfStoredEvents:
return
case <-sub.ClosedReason:
return
case <-ctx.Done():
return
}
}
}()
return ch, nil
}
// Count sends a "COUNT" command to the relay and returns the count of events matching the filters. // Count sends a "COUNT" command to the relay and returns the count of events matching the filters.
func (r *Relay) Count( func (r *Relay) Count(
ctx context.Context, ctx context.Context,
filter Filter, filter Filter,
opts SubscriptionOptions, opts SubscriptionOptions,
) (int64, []byte, error) { ) (uint32, []byte, error) {
v, err := r.countInternal(ctx, filter, opts) v, err := r.countInternal(ctx, filter, opts)
if err != nil { if err != nil {
return 0, nil, err return 0, nil, err

View File

@@ -2,8 +2,6 @@ package sdk
import ( import (
"context" "context"
"encoding/hex"
"fmt"
"slices" "slices"
"sync" "sync"
"sync/atomic" "sync/atomic"
@@ -16,10 +14,10 @@ const (
pubkeyStreamOldestPrefix = byte('O') pubkeyStreamOldestPrefix = byte('O')
) )
func makePubkeyStreamKey(prefix byte, pubkey string) []byte { func makePubkeyStreamKey(prefix byte, pubkey nostr.PubKey) []byte {
key := make([]byte, 1+8) key := make([]byte, 1+8)
key[0] = prefix key[0] = prefix
hex.Decode(key[1:], []byte(pubkey[0:16])) copy(key[1:], pubkey[0:8])
return key return key
} }
@@ -30,9 +28,9 @@ func makePubkeyStreamKey(prefix byte, pubkey string) []byte {
func (sys *System) StreamLiveFeed( func (sys *System) StreamLiveFeed(
ctx context.Context, ctx context.Context,
pubkeys []nostr.PubKey, pubkeys []nostr.PubKey,
kinds []int, kinds []uint16,
) (<-chan *nostr.Event, error) { ) (<-chan nostr.Event, error) {
events := make(chan *nostr.Event) events := make(chan nostr.Event)
active := atomic.Int32{} active := atomic.Int32{}
active.Add(int32(len(pubkeys))) active.Add(int32(len(pubkeys)))
@@ -61,15 +59,17 @@ func (sys *System) StreamLiveFeed(
} }
filter := nostr.Filter{ filter := nostr.Filter{
Authors: []string{pubkey}, Authors: []nostr.PubKey{pubkey},
Since: since, Since: since,
Kinds: kinds, Kinds: kinds,
} }
go func() { go func() {
sub := sys.Pool.SubscribeMany(ctx, relays, filter, nostr.WithLabel("livefeed")) sub := sys.Pool.SubscribeMany(ctx, relays, filter, nostr.SubscriptionOptions{
Label: "livefeed",
})
for evt := range sub { for evt := range sub {
sys.StoreRelay.Publish(ctx, *evt.Event) sys.Publisher.Publish(ctx, evt.Event)
if latest < evt.CreatedAt { if latest < evt.CreatedAt {
latest = evt.CreatedAt latest = evt.CreatedAt
serial++ serial++
@@ -101,8 +101,8 @@ func (sys *System) StreamLiveFeed(
// for events or if we should just return what we have stored locally. // for events or if we should just return what we have stored locally.
func (sys *System) FetchFeedPage( func (sys *System) FetchFeedPage(
ctx context.Context, ctx context.Context,
pubkeys []string, pubkeys []nostr.PubKey,
kinds []int, kinds []uint16,
until nostr.Timestamp, until nostr.Timestamp,
totalLimit int, totalLimit int,
) ([]*nostr.Event, error) { ) ([]*nostr.Event, error) {
@@ -123,23 +123,23 @@ func (sys *System) FetchFeedPage(
} }
} }
filter := nostr.Filter{Authors: []string{pubkey}, Kinds: kinds} filter := nostr.Filter{Authors: []nostr.PubKey{pubkey}, Kinds: kinds}
if until > oldestTimestamp { if until > oldestTimestamp {
// we can use our local database // we can use our local database
filter.Until = &until filter.Until = &until
res, err := sys.StoreRelay.QuerySync(ctx, filter)
if err != nil {
return nil, fmt.Errorf("query failure at '%s': %w", pubkey, err)
}
if len(res) >= limitPerKey { count := 0
for evt := range sys.Store.QueryEvents(filter) {
events = append(events, evt)
count++
if count >= limitPerKey {
// we got enough from the local store // we got enough from the local store
events = append(events, res...)
wg.Done() wg.Done()
continue continue
} }
} }
}
// if we didn't get enough events from local database // if we didn't get enough events from local database
// OR if we are requesting for very old stuff // OR if we are requesting for very old stuff

View File

@@ -10,10 +10,10 @@ import (
) )
func runTestWith(t *testing.T, hdb hints.HintsDB) { func runTestWith(t *testing.T, hdb hints.HintsDB) {
const key1 = "0000000000000000000000000000000000000000000000000000000000000001" key1 := nostr.MustPubKeyFromHex("0000000000000000000000000000000000000000000000000000000000000001")
const key2 = "0000000000000000000000000000000000000000000000000000000000000002" key2 := nostr.MustPubKeyFromHex("0000000000000000000000000000000000000000000000000000000000000002")
const key3 = "0000000000000000000000000000000000000000000000000000000000000003" key3 := nostr.MustPubKeyFromHex("0000000000000000000000000000000000000000000000000000000000000003")
const key4 = "0000000000000000000000000000000000000000000000000000000000000004" key4 := nostr.MustPubKeyFromHex("0000000000000000000000000000000000000000000000000000000000000004")
const relayA = "wss://aaa.com" const relayA = "wss://aaa.com"
const relayB = "wss://bbb.net" const relayB = "wss://bbb.net"
const relayC = "wss://ccc.org" const relayC = "wss://ccc.org"

View File

@@ -14,7 +14,7 @@ import (
// ProfileMetadata represents user profile information from kind 0 events. // ProfileMetadata represents user profile information from kind 0 events.
// It contains both the raw event and parsed metadata fields. // It contains both the raw event and parsed metadata fields.
type ProfileMetadata struct { type ProfileMetadata struct {
PubKey string `json:"-"` // must always be set otherwise things will break PubKey nostr.PubKey `json:"-"` // must always be set otherwise things will break
Event *nostr.Event `json:"-"` // may be empty if a profile metadata event wasn't found Event *nostr.Event `json:"-"` // may be empty if a profile metadata event wasn't found
// every one of these may be empty // every one of these may be empty
@@ -33,8 +33,7 @@ type ProfileMetadata struct {
// Npub returns the NIP-19 npub encoding of the profile's public key. // Npub returns the NIP-19 npub encoding of the profile's public key.
func (p ProfileMetadata) Npub() string { func (p ProfileMetadata) Npub() string {
v, _ := nip19.EncodePublicKey(p.PubKey) return nip19.EncodeNpub(p.PubKey)
return v
} }
// NpubShort returns a shortened version of the NIP-19 npub encoding, // NpubShort returns a shortened version of the NIP-19 npub encoding,
@@ -47,8 +46,7 @@ func (p ProfileMetadata) NpubShort() string {
// Nprofile returns the NIP-19 nprofile encoding of the profile, // Nprofile returns the NIP-19 nprofile encoding of the profile,
// including relay hints from the user's outbox. // including relay hints from the user's outbox.
func (p ProfileMetadata) Nprofile(ctx context.Context, sys *System, nrelays int) string { func (p ProfileMetadata) Nprofile(ctx context.Context, sys *System, nrelays int) string {
v, _ := nip19.EncodeProfile(p.PubKey, sys.FetchOutboxRelays(ctx, p.PubKey, 2)) return nip19.EncodeNprofile(p.PubKey, sys.FetchOutboxRelays(ctx, p.PubKey, 2))
return v
} }
// ShortName returns the best available name for display purposes. // ShortName returns the best available name for display purposes.
@@ -105,7 +103,7 @@ func (sys System) FetchProfileFromInput(ctx context.Context, nip19OrNip05Code st
// FetchProfileMetadata fetches metadata for a given user from the local cache, or from the local store, // FetchProfileMetadata fetches metadata for a given user from the local cache, or from the local store,
// or, failing these, from the target user's defined outbox relays -- then caches the result. // or, failing these, from the target user's defined outbox relays -- then caches the result.
// It always returns a ProfileMetadata, even if no metadata was found (in which case only the PubKey field is set). // It always returns a ProfileMetadata, even if no metadata was found (in which case only the PubKey field is set).
func (sys *System) FetchProfileMetadata(ctx context.Context, pubkey string) (pm ProfileMetadata) { func (sys *System) FetchProfileMetadata(ctx context.Context, pubkey nostr.PubKey) (pm ProfileMetadata) {
if v, ok := sys.MetadataCache.Get(pubkey); ok { if v, ok := sys.MetadataCache.Get(pubkey); ok {
return v return v
} }

View File

@@ -5,6 +5,7 @@ import (
"math/rand/v2" "math/rand/v2"
"fiatjaf.com/nostr" "fiatjaf.com/nostr"
"fiatjaf.com/nostr/eventstore/wrappers"
"fiatjaf.com/nostr/sdk/cache" "fiatjaf.com/nostr/sdk/cache"
cache_memory "fiatjaf.com/nostr/sdk/cache/memory" cache_memory "fiatjaf.com/nostr/sdk/cache/memory"
"fiatjaf.com/nostr/sdk/dataloader" "fiatjaf.com/nostr/sdk/dataloader"
@@ -51,7 +52,7 @@ type System struct {
NoteSearchRelays *RelayStream NoteSearchRelays *RelayStream
Store eventstore.Store Store eventstore.Store
StoreRelay nostr.RelayStore Publisher wrappers.StorePublisher
replaceableLoaders []*dataloader.Loader[nostr.PubKey, *nostr.Event] replaceableLoaders []*dataloader.Loader[nostr.PubKey, *nostr.Event]
addressableLoaders []*dataloader.Loader[nostr.PubKey, []*nostr.Event] addressableLoaders []*dataloader.Loader[nostr.PubKey, []*nostr.Event]

126
tags.go
View File

@@ -4,53 +4,10 @@ import (
"errors" "errors"
"iter" "iter"
"slices" "slices"
"strings"
) )
type Tag []string type Tag []string
// Deprecated: this is too cumbersome for no reason when what we actually want is
// the simpler logic present in Find and FindWithValue.
func (tag Tag) StartsWith(prefix []string) bool {
prefixLen := len(prefix)
if prefixLen > len(tag) {
return false
}
// check initial elements for equality
for i := 0; i < prefixLen-1; i++ {
if prefix[i] != tag[i] {
return false
}
}
// check last element just for a prefix
return strings.HasPrefix(tag[prefixLen-1], prefix[prefixLen-1])
}
// Deprecated: write these inline instead
func (tag Tag) Key() string {
if len(tag) > 0 {
return tag[0]
}
return ""
}
// Deprecated: write these inline instead
func (tag Tag) Value() string {
if len(tag) > 1 {
return tag[1]
}
return ""
}
// Deprecated: write these inline instead
func (tag Tag) Relay() string {
if len(tag) > 2 && (tag[0] == "e" || tag[0] == "p") {
return NormalizeURL(tag[2])
}
return ""
}
type Tags []Tag type Tags []Tag
// GetD gets the first "d" tag (for parameterized replaceable events) value or "" // GetD gets the first "d" tag (for parameterized replaceable events) value or ""
@@ -63,89 +20,6 @@ func (tags Tags) GetD() string {
return "" return ""
} }
// Deprecated: use Find or FindWithValue instead
func (tags Tags) GetFirst(tagPrefix []string) *Tag {
for _, v := range tags {
if v.StartsWith(tagPrefix) {
return &v
}
}
return nil
}
// Deprecated: use FindLast or FindLastWithValue instead
func (tags Tags) GetLast(tagPrefix []string) *Tag {
for i := len(tags) - 1; i >= 0; i-- {
v := tags[i]
if v.StartsWith(tagPrefix) {
return &v
}
}
return nil
}
// Deprecated: use FindAll instead
func (tags Tags) GetAll(tagPrefix []string) Tags {
result := make(Tags, 0, len(tags))
for _, v := range tags {
if v.StartsWith(tagPrefix) {
result = append(result, v)
}
}
return result
}
// Deprecated: use FindAll instead
func (tags Tags) All(tagPrefix []string) iter.Seq2[int, Tag] {
return func(yield func(int, Tag) bool) {
for i, v := range tags {
if v.StartsWith(tagPrefix) {
if !yield(i, v) {
break
}
}
}
}
}
// Deprecated: this is useless, write your own
func (tags Tags) FilterOut(tagPrefix []string) Tags {
filtered := make(Tags, 0, len(tags))
for _, v := range tags {
if !v.StartsWith(tagPrefix) {
filtered = append(filtered, v)
}
}
return filtered
}
// Deprecated: this is useless, write your own
func (tags *Tags) FilterOutInPlace(tagPrefix []string) {
for i := 0; i < len(*tags); i++ {
tag := (*tags)[i]
if tag.StartsWith(tagPrefix) {
// remove this by swapping the last tag into this place
last := len(*tags) - 1
(*tags)[i] = (*tags)[last]
*tags = (*tags)[0:last]
i-- // this is so we can match this just swapped item in the next iteration
}
}
}
// Deprecated: write your own instead with Find() and append()
func (tags Tags) AppendUnique(tag Tag) Tags {
n := len(tag)
if n > 2 {
n = 2
}
if tags.GetFirst(tag[:n]) == nil {
return append(tags, tag)
}
return tags
}
// Find returns the first tag with the given key/tagName that also has one value (i.e. at least 2 items) // Find returns the first tag with the given key/tagName that also has one value (i.e. at least 2 items)
func (tags Tags) Find(key string) Tag { func (tags Tags) Find(key string) Tag {
for _, v := range tags { for _, v := range tags {