it never ends.
This commit is contained in:
@@ -157,7 +157,7 @@ func (v ReqEnvelope) MarshalJSON() ([]byte, error) {
|
||||
type CountEnvelope struct {
|
||||
SubscriptionID string
|
||||
Filter
|
||||
Count *int64
|
||||
Count *uint32
|
||||
HyperLogLog []byte
|
||||
}
|
||||
|
||||
@@ -176,8 +176,8 @@ func (v *CountEnvelope) FromJSON(data string) error {
|
||||
v.SubscriptionID = string(unsafe.Slice(unsafe.StringData(arr[1].Str), len(arr[1].Str)))
|
||||
|
||||
var countResult struct {
|
||||
Count *int64 `json:"count"`
|
||||
HLL string `json:"hll"`
|
||||
Count *uint32
|
||||
HLL string
|
||||
}
|
||||
if err := json.Unmarshal(unsafe.Slice(unsafe.StringData(arr[2].Raw), len(arr[2].Raw)), &countResult); err == nil && countResult.Count != nil {
|
||||
v.Count = countResult.Count
|
||||
@@ -205,7 +205,7 @@ func (v CountEnvelope) MarshalJSON() ([]byte, error) {
|
||||
w.RawString(`"`)
|
||||
if v.Count != nil {
|
||||
w.RawString(`{"count":`)
|
||||
w.RawString(strconv.FormatInt(*v.Count, 10))
|
||||
w.RawString(strconv.FormatUint(uint64(*v.Count), 10))
|
||||
if v.HyperLogLog != nil {
|
||||
w.RawString(`,"hll":"`)
|
||||
hllHex := make([]byte, 512)
|
||||
|
||||
@@ -10,8 +10,8 @@ import (
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
)
|
||||
|
||||
func (b *BadgerBackend) CountEvents(filter nostr.Filter) (int64, error) {
|
||||
var count int64 = 0
|
||||
func (b *BadgerBackend) CountEvents(filter nostr.Filter) (uint32, error) {
|
||||
var count uint32 = 0
|
||||
|
||||
queries, extraFilter, since, err := prepareQueries(filter)
|
||||
if err != nil {
|
||||
@@ -86,8 +86,8 @@ func (b *BadgerBackend) CountEvents(filter nostr.Filter) (int64, error) {
|
||||
return count, err
|
||||
}
|
||||
|
||||
func (b *BadgerBackend) CountEventsHLL(filter nostr.Filter, offset int) (int64, *hyperloglog.HyperLogLog, error) {
|
||||
var count int64 = 0
|
||||
func (b *BadgerBackend) CountEventsHLL(filter nostr.Filter, offset int) (uint32, *hyperloglog.HyperLogLog, error) {
|
||||
var count uint32 = 0
|
||||
|
||||
queries, extraFilter, since, err := prepareQueries(filter)
|
||||
if err != nil {
|
||||
|
||||
@@ -53,6 +53,6 @@ func (b *BlugeBackend) Init() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BlugeBackend) CountEvents(nostr.Filter) (int64, error) {
|
||||
func (b *BlugeBackend) CountEvents(nostr.Filter) (uint32, error) {
|
||||
return 0, errors.New("not supported")
|
||||
}
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/urfave/cli/v3"
|
||||
"fiatjaf.com/nostr"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
var delete_ = &cli.Command{
|
||||
@@ -17,17 +17,15 @@ var delete_ = &cli.Command{
|
||||
Action: func(ctx context.Context, c *cli.Command) error {
|
||||
hasError := false
|
||||
for line := range getStdinLinesOrFirstArgument(c) {
|
||||
f := nostr.Filter{IDs: []string{line}}
|
||||
ch, err := db.QueryEvents(ctx, f)
|
||||
id, err := nostr.IDFromHex(line)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error querying for %s: %s\n", f, err)
|
||||
fmt.Fprintf(os.Stderr, "invalid id '%s': %s\n", line, err)
|
||||
hasError = true
|
||||
}
|
||||
for evt := range ch {
|
||||
if err := db.DeleteEvent(ctx, evt); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error deleting %s: %s\n", evt, err)
|
||||
hasError = true
|
||||
}
|
||||
|
||||
if err := db.DeleteEvent(id); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error deleting '%s': %s\n", id.Hex(), err)
|
||||
hasError = true
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -96,7 +96,7 @@ var app = &cli.Command{
|
||||
if err := json.Unmarshal(scanner.Bytes(), &evt); err != nil {
|
||||
log.Printf("invalid event read at line %d: %s (`%s`)\n", i, err, scanner.Text())
|
||||
}
|
||||
db.SaveEvent(ctx, &evt)
|
||||
db.SaveEvent(evt)
|
||||
i++
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/urfave/cli/v3"
|
||||
"fiatjaf.com/nostr"
|
||||
"github.com/urfave/cli/v3"
|
||||
)
|
||||
|
||||
// this is the default command when no subcommands are given, we will just try everything
|
||||
@@ -21,16 +21,14 @@ var queryOrSave = &cli.Command{
|
||||
re := &nostr.ReqEnvelope{}
|
||||
e := &nostr.Event{}
|
||||
f := &nostr.Filter{}
|
||||
if json.Unmarshal([]byte(line), ee) == nil && ee.Event.ID != "" {
|
||||
e = &ee.Event
|
||||
return doSave(ctx, line, e)
|
||||
if json.Unmarshal([]byte(line), ee) == nil && ee.Event.ID != nostr.ZeroID {
|
||||
return doSave(ctx, line, ee.Event)
|
||||
}
|
||||
if json.Unmarshal([]byte(line), e) == nil && e.ID != "" {
|
||||
return doSave(ctx, line, e)
|
||||
if json.Unmarshal([]byte(line), e) == nil && e.ID != nostr.ZeroID {
|
||||
return doSave(ctx, line, *e)
|
||||
}
|
||||
if json.Unmarshal([]byte(line), re) == nil && len(re.Filters) > 0 {
|
||||
f = &re.Filters[0]
|
||||
return doQuery(ctx, f)
|
||||
if json.Unmarshal([]byte(line), re) == nil {
|
||||
return doQuery(ctx, &re.Filter)
|
||||
}
|
||||
if json.Unmarshal([]byte(line), f) == nil && len(f.String()) > 2 {
|
||||
return doQuery(ctx, f)
|
||||
@@ -40,21 +38,16 @@ var queryOrSave = &cli.Command{
|
||||
},
|
||||
}
|
||||
|
||||
func doSave(ctx context.Context, line string, e *nostr.Event) error {
|
||||
if err := db.SaveEvent(ctx, e); err != nil {
|
||||
func doSave(ctx context.Context, line string, evt nostr.Event) error {
|
||||
if err := db.SaveEvent(evt); err != nil {
|
||||
return fmt.Errorf("failed to save event '%s': %s", line, err)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "saved %s", e.ID)
|
||||
fmt.Fprintf(os.Stderr, "saved %s", evt.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func doQuery(ctx context.Context, f *nostr.Filter) error {
|
||||
ch, err := db.QueryEvents(ctx, *f)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error querying: %w", err)
|
||||
}
|
||||
|
||||
for evt := range ch {
|
||||
for evt := range db.QueryEvents(*f) {
|
||||
fmt.Println(evt)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -13,8 +13,8 @@ import (
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
func (b *LMDBBackend) CountEvents(filter nostr.Filter) (int64, error) {
|
||||
var count int64 = 0
|
||||
func (b *LMDBBackend) CountEvents(filter nostr.Filter) (uint32, error) {
|
||||
var count uint32 = 0
|
||||
|
||||
queries, extraAuthors, extraKinds, extraTagKey, extraTagValues, since, err := b.prepareQueries(filter)
|
||||
if err != nil {
|
||||
@@ -95,12 +95,12 @@ func (b *LMDBBackend) CountEvents(filter nostr.Filter) (int64, error) {
|
||||
|
||||
// CountEventsHLL is like CountEvents, but it will build a hyperloglog value while iterating through results,
|
||||
// following NIP-45
|
||||
func (b *LMDBBackend) CountEventsHLL(filter nostr.Filter, offset int) (int64, *hyperloglog.HyperLogLog, error) {
|
||||
func (b *LMDBBackend) CountEventsHLL(filter nostr.Filter, offset int) (uint32, *hyperloglog.HyperLogLog, error) {
|
||||
if useCache, _ := b.EnableHLLCacheFor(filter.Kinds[0]); useCache {
|
||||
return b.countEventsHLLCached(filter)
|
||||
}
|
||||
|
||||
var count int64 = 0
|
||||
var count uint32 = 0
|
||||
|
||||
// this is different than CountEvents because some of these extra checks are not applicable in HLL-valid filters
|
||||
queries, _, extraKinds, extraTagKey, extraTagValues, since, err := b.prepareQueries(filter)
|
||||
@@ -180,7 +180,7 @@ func (b *LMDBBackend) CountEventsHLL(filter nostr.Filter, offset int) (int64, *h
|
||||
}
|
||||
|
||||
// countEventsHLLCached will just return a cached value from disk (and presumably we don't even have the events required to compute this anymore).
|
||||
func (b *LMDBBackend) countEventsHLLCached(filter nostr.Filter) (int64, *hyperloglog.HyperLogLog, error) {
|
||||
func (b *LMDBBackend) countEventsHLLCached(filter nostr.Filter) (uint32, *hyperloglog.HyperLogLog, error) {
|
||||
cacheKey := make([]byte, 2+8)
|
||||
binary.BigEndian.PutUint16(cacheKey[0:2], uint16(filter.Kinds[0]))
|
||||
switch filter.Kinds[0] {
|
||||
@@ -192,7 +192,7 @@ func (b *LMDBBackend) countEventsHLLCached(filter nostr.Filter) (int64, *hyperlo
|
||||
hex.Decode(cacheKey[2:2+8], []byte(filter.Tags["E"][0][0:8*2]))
|
||||
}
|
||||
|
||||
var count int64
|
||||
var count uint32
|
||||
var hll *hyperloglog.HyperLogLog
|
||||
|
||||
err := b.lmdbEnv.View(func(txn *lmdb.Txn) error {
|
||||
@@ -204,7 +204,7 @@ func (b *LMDBBackend) countEventsHLLCached(filter nostr.Filter) (int64, *hyperlo
|
||||
return err
|
||||
}
|
||||
hll = hyperloglog.NewWithRegisters(val, 0) // offset doesn't matter here
|
||||
count = int64(hll.Count())
|
||||
count = uint32(hll.Count())
|
||||
return nil
|
||||
})
|
||||
|
||||
|
||||
@@ -5,9 +5,9 @@ import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
"fiatjaf.com/nostr/eventstore/internal"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore/internal"
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
)
|
||||
|
||||
type query struct {
|
||||
@@ -143,7 +143,7 @@ func (b *LMDBBackend) prepareQueries(filter nostr.Filter) (
|
||||
if filter.Authors != nil {
|
||||
extraAuthors = make([][32]byte, len(filter.Authors))
|
||||
for i, pk := range filter.Authors {
|
||||
hex.Decode(extraAuthors[i][:], []byte(pk))
|
||||
extraAuthors[i] = pk
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ func (b *LMDBBackend) ReplaceEvent(evt nostr.Event) error {
|
||||
shouldStore := true
|
||||
for _, previous := range results {
|
||||
if internal.IsOlder(previous.Event, evt) {
|
||||
if err := b.delete(txn, previous.Event); err != nil {
|
||||
if err := b.delete(txn, previous.Event.ID); err != nil {
|
||||
return fmt.Errorf("failed to delete event %s for replacing: %w", previous.Event.ID, err)
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -10,8 +10,8 @@ import (
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
)
|
||||
|
||||
func (il *IndexingLayer) CountEvents(filter nostr.Filter) (int64, error) {
|
||||
var count int64 = 0
|
||||
func (il *IndexingLayer) CountEvents(filter nostr.Filter) (uint32, error) {
|
||||
var count uint32 = 0
|
||||
|
||||
queries, extraAuthors, extraKinds, extraTagKey, extraTagValues, since, err := il.prepareQueries(filter)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
package nullstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"iter"
|
||||
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
)
|
||||
|
||||
var _ eventstore.Store = NullStore{}
|
||||
@@ -17,20 +17,22 @@ func (b NullStore) Init() error {
|
||||
|
||||
func (b NullStore) Close() {}
|
||||
|
||||
func (b NullStore) DeleteEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
func (b NullStore) DeleteEvent(id nostr.ID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b NullStore) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) {
|
||||
ch := make(chan *nostr.Event)
|
||||
close(ch)
|
||||
return ch, nil
|
||||
func (b NullStore) QueryEvents(filter nostr.Filter) iter.Seq[nostr.Event] {
|
||||
return func(yield func(nostr.Event) bool) {}
|
||||
}
|
||||
|
||||
func (b NullStore) SaveEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
func (b NullStore) SaveEvent(evt nostr.Event) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b NullStore) ReplaceEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
func (b NullStore) ReplaceEvent(evt nostr.Event) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b NullStore) CountEvents(filter nostr.Filter) (uint32, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
@@ -69,8 +69,8 @@ func (b *SliceStore) QueryEvents(filter nostr.Filter) iter.Seq[nostr.Event] {
|
||||
}
|
||||
}
|
||||
|
||||
func (b *SliceStore) CountEvents(filter nostr.Filter) (int64, error) {
|
||||
var val int64
|
||||
func (b *SliceStore) CountEvents(filter nostr.Filter) (uint32, error) {
|
||||
var val uint32
|
||||
for _, event := range b.internal {
|
||||
if filter.Matches(event) {
|
||||
val++
|
||||
|
||||
@@ -29,5 +29,5 @@ type Store interface {
|
||||
ReplaceEvent(nostr.Event) error
|
||||
|
||||
// CountEvents counts all events that match a given filter
|
||||
CountEvents(nostr.Filter) (int64, error)
|
||||
CountEvents(nostr.Filter) (uint32, error)
|
||||
}
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
package count
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"fiatjaf.com/nostr"
|
||||
)
|
||||
|
||||
type Wrapper struct {
|
||||
eventstore.Store
|
||||
}
|
||||
|
||||
var _ eventstore.Store = (*Wrapper)(nil)
|
||||
|
||||
func (w Wrapper) CountEvents(ctx context.Context, filter nostr.Filter) (int64, error) {
|
||||
if counter, ok := w.Store.(eventstore.Counter); ok {
|
||||
return counter.CountEvents(ctx, filter)
|
||||
}
|
||||
|
||||
ch, err := w.Store.QueryEvents(ctx, filter)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if ch == nil {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
var count int64
|
||||
for range ch {
|
||||
count++
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
package disablesearch
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"fiatjaf.com/nostr"
|
||||
)
|
||||
|
||||
type Wrapper struct {
|
||||
eventstore.Store
|
||||
}
|
||||
|
||||
var _ eventstore.Store = (*Wrapper)(nil)
|
||||
|
||||
func (w Wrapper) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) {
|
||||
if filter.Search != "" {
|
||||
return nil, nil
|
||||
}
|
||||
return w.Store.QueryEvents(ctx, filter)
|
||||
}
|
||||
@@ -1,17 +1,20 @@
|
||||
package eventstore
|
||||
package wrappers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
)
|
||||
|
||||
type RelayWrapper struct {
|
||||
Store
|
||||
var _ nostr.Publisher = StorePublisher{}
|
||||
|
||||
type StorePublisher struct {
|
||||
eventstore.Store
|
||||
}
|
||||
|
||||
func (w RelayWrapper) Publish(ctx context.Context, evt nostr.Event) error {
|
||||
func (w StorePublisher) Publish(ctx context.Context, evt nostr.Event) error {
|
||||
if nostr.IsEphemeralKind(evt.Kind) {
|
||||
// do not store ephemeral events
|
||||
return nil
|
||||
@@ -22,7 +25,7 @@ func (w RelayWrapper) Publish(ctx context.Context, evt nostr.Event) error {
|
||||
|
||||
if nostr.IsRegularKind(evt.Kind) {
|
||||
// regular events are just saved directly
|
||||
if err := w.SaveEvent(evt); err != nil && err != ErrDupEvent {
|
||||
if err := w.SaveEvent(evt); err != nil && err != eventstore.ErrDupEvent {
|
||||
return fmt.Errorf("failed to save: %w", err)
|
||||
}
|
||||
return nil
|
||||
@@ -1,4 +1,4 @@
|
||||
package test
|
||||
package wrappers
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"time"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"fiatjaf.com/nostr/eventstore/slicestore"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -21,7 +20,7 @@ func TestRelayWrapper(t *testing.T) {
|
||||
s.Init()
|
||||
defer s.Close()
|
||||
|
||||
w := eventstore.RelayWrapper{Store: s}
|
||||
w := StorePublisher{Store: s}
|
||||
|
||||
evt1 := nostr.Event{
|
||||
Kind: 3,
|
||||
26
eventstore/wrappers/querier.go
Normal file
26
eventstore/wrappers/querier.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package wrappers
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
)
|
||||
|
||||
var _ nostr.Querier = StoreQuerier{}
|
||||
|
||||
type StoreQuerier struct {
|
||||
eventstore.Store
|
||||
}
|
||||
|
||||
func (w StoreQuerier) QueryEvents(ctx context.Context, filter nostr.Filter) (chan nostr.Event, error) {
|
||||
ch := make(chan nostr.Event)
|
||||
|
||||
go func() {
|
||||
for evt := range w.Store.QueryEvents(filter) {
|
||||
ch <- evt
|
||||
}
|
||||
}()
|
||||
|
||||
return ch, nil
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
package skipevent
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"fiatjaf.com/nostr/eventstore"
|
||||
"fiatjaf.com/nostr"
|
||||
)
|
||||
|
||||
type Wrapper struct {
|
||||
eventstore.Store
|
||||
|
||||
Skip func(ctx context.Context, evt *nostr.Event) bool
|
||||
}
|
||||
|
||||
var _ eventstore.Store = (*Wrapper)(nil)
|
||||
|
||||
func (w Wrapper) SaveEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
if w.Skip(ctx, evt) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return w.Store.SaveEvent(ctx, evt)
|
||||
}
|
||||
@@ -1,129 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/nip19"
|
||||
)
|
||||
|
||||
func main() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
|
||||
// connect to relay
|
||||
url := "wss://relay.stoner.com"
|
||||
relay, err := nostr.RelayConnect(ctx, url)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
reader := os.Stdin
|
||||
var npub string
|
||||
var b [64]byte
|
||||
fmt.Fprintf(os.Stderr, "using %s\n----\nexample subscription for three most recent notes mentioning user\npaste npub key: ", url)
|
||||
if n, err := reader.Read(b[:]); err == nil {
|
||||
npub = strings.TrimSpace(fmt.Sprintf("%s", b[:n]))
|
||||
} else {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// create filters
|
||||
var filters nostr.Filters
|
||||
if _, v, err := nip19.Decode(npub); err == nil {
|
||||
t := make(map[string][]string)
|
||||
// making a "p" tag for the above public key.
|
||||
// this filters for messages tagged with the user, mainly replies.
|
||||
t["p"] = []string{v.(string)}
|
||||
filters = []nostr.Filter{{
|
||||
Kinds: []int{nostr.KindTextNote},
|
||||
Tags: t,
|
||||
// limit = 3, get the three most recent notes
|
||||
Limit: 3,
|
||||
}}
|
||||
} else {
|
||||
panic("not a valid npub!")
|
||||
}
|
||||
|
||||
// create a subscription and submit to relay
|
||||
// results will be returned on the sub.Events channel
|
||||
sub, _ := relay.Subscribe(ctx, filters)
|
||||
|
||||
// we will append the returned events to this slice
|
||||
evs := make([]nostr.Event, 0)
|
||||
|
||||
go func() {
|
||||
<-sub.EndOfStoredEvents
|
||||
cancel()
|
||||
}()
|
||||
for ev := range sub.Events {
|
||||
evs = append(evs, *ev)
|
||||
}
|
||||
|
||||
filename := "example_output.json"
|
||||
if f, err := os.Create(filename); err == nil {
|
||||
fmt.Fprintf(os.Stderr, "returned events saved to %s\n", filename)
|
||||
// encode the returned events in a file
|
||||
enc := jsoniter.NewEncoder(f)
|
||||
enc.SetIndent("", " ")
|
||||
enc.Encode(evs)
|
||||
f.Close()
|
||||
} else {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "----\nexample publication of note.\npaste nsec key (leave empty to autogenerate): ")
|
||||
var nsec string
|
||||
if n, err := reader.Read(b[:]); err == nil {
|
||||
nsec = strings.TrimSpace(fmt.Sprintf("%s", b[:n]))
|
||||
} else {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var sk string
|
||||
ev := nostr.Event{}
|
||||
if _, s, e := nip19.Decode(nsec); e == nil {
|
||||
sk = s.(string)
|
||||
} else {
|
||||
sk = nostr.GeneratePrivateKey()
|
||||
}
|
||||
if pub, e := nostr.GetPublicKey(sk); e == nil {
|
||||
ev.PubKey = pub
|
||||
if npub, e := nip19.EncodePublicKey(pub); e == nil {
|
||||
fmt.Fprintln(os.Stderr, "using:", npub)
|
||||
}
|
||||
} else {
|
||||
panic(e)
|
||||
}
|
||||
|
||||
ev.CreatedAt = nostr.Now()
|
||||
ev.Kind = nostr.KindTextNote
|
||||
var content string
|
||||
fmt.Fprintln(os.Stderr, "enter content of note, ending with an empty newline (ctrl+d):")
|
||||
for {
|
||||
if n, err := reader.Read(b[:]); err == nil {
|
||||
content = fmt.Sprintf("%s%s", content, fmt.Sprintf("%s", b[:n]))
|
||||
} else if err == io.EOF {
|
||||
break
|
||||
} else {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
ev.Content = strings.TrimSpace(content)
|
||||
ev.Sign(sk)
|
||||
for _, url := range []string{"wss://relay.stoner.com"} {
|
||||
ctx := context.WithValue(context.Background(), "url", url)
|
||||
relay, e := nostr.RelayConnect(ctx, url)
|
||||
if e != nil {
|
||||
fmt.Println(e)
|
||||
continue
|
||||
}
|
||||
fmt.Println("posting to: ", url)
|
||||
relay.Publish(ctx, ev)
|
||||
}
|
||||
}
|
||||
18
interfaces.go
Normal file
18
interfaces.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package nostr
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
type Publisher interface {
|
||||
Publish(context.Context, Event) error
|
||||
}
|
||||
|
||||
type Querier interface {
|
||||
QueryEvents(context.Context, Filter) (chan Event, error)
|
||||
}
|
||||
|
||||
type QuerierPublisher interface {
|
||||
Querier
|
||||
Publisher
|
||||
}
|
||||
24
keyer/lib.go
24
keyer/lib.go
@@ -25,7 +25,7 @@ var (
|
||||
// SignerOptions contains configuration options for creating a new signer.
|
||||
type SignerOptions struct {
|
||||
// BunkerClientSecretKey is the secret key used for the bunker client
|
||||
BunkerClientSecretKey string
|
||||
BunkerClientSecretKey nostr.SecretKey
|
||||
|
||||
// BunkerSignTimeout is the timeout duration for bunker signing operations
|
||||
BunkerSignTimeout time.Duration
|
||||
@@ -60,7 +60,7 @@ func New(ctx context.Context, pool *nostr.Pool, input string, opts *SignerOption
|
||||
|
||||
if strings.HasPrefix(input, "ncryptsec") {
|
||||
if opts.PasswordHandler != nil {
|
||||
return &EncryptedKeySigner{input, "", opts.PasswordHandler}, nil
|
||||
return &EncryptedKeySigner{input, nostr.ZeroPK, opts.PasswordHandler}, nil
|
||||
}
|
||||
sec, err := nip49.Decrypt(input, opts.Password)
|
||||
if err != nil {
|
||||
@@ -70,12 +70,12 @@ func New(ctx context.Context, pool *nostr.Pool, input string, opts *SignerOption
|
||||
return nil, fmt.Errorf("failed to decrypt with given password: %w", err)
|
||||
}
|
||||
pk := nostr.GetPublicKey(sec)
|
||||
return KeySigner{sec, pk, xsync.NewMapOf[string, [32]byte]()}, nil
|
||||
return KeySigner{sec, pk, xsync.NewMapOf[nostr.PubKey, [32]byte]()}, nil
|
||||
} else if nip46.IsValidBunkerURL(input) || nip05.IsValidIdentifier(input) {
|
||||
bcsk := nostr.GeneratePrivateKey()
|
||||
bcsk := nostr.Generate()
|
||||
oa := func(url string) { println("auth_url received but not handled") }
|
||||
|
||||
if opts.BunkerClientSecretKey != "" {
|
||||
if opts.BunkerClientSecretKey != [32]byte{} {
|
||||
bcsk = opts.BunkerClientSecretKey
|
||||
}
|
||||
if opts.BunkerAuthHandler != nil {
|
||||
@@ -88,13 +88,15 @@ func New(ctx context.Context, pool *nostr.Pool, input string, opts *SignerOption
|
||||
}
|
||||
return BunkerSigner{bunker}, nil
|
||||
} else if prefix, parsed, err := nip19.Decode(input); err == nil && prefix == "nsec" {
|
||||
sec := parsed.(string)
|
||||
pk, _ := nostr.GetPublicKey(sec)
|
||||
return KeySigner{sec, pk, xsync.NewMapOf[string, [32]byte]()}, nil
|
||||
sec := parsed.(nostr.SecretKey)
|
||||
pk := nostr.GetPublicKey(sec)
|
||||
return KeySigner{sec, pk, xsync.NewMapOf[nostr.PubKey, [32]byte]()}, nil
|
||||
} else if _, err := hex.DecodeString(input); err == nil && len(input) <= 64 {
|
||||
input = strings.Repeat("0", 64-len(input)) + input // if the key is like '01', fill all the left zeroes
|
||||
pk, _ := nostr.GetPublicKey(input)
|
||||
return KeySigner{input, pk, xsync.NewMapOf[string, [32]byte]()}, nil
|
||||
input := nostr.MustSecretKeyFromHex(
|
||||
strings.Repeat("0", 64-len(input)) + input, // if the key is like '01', fill all the left zeroes
|
||||
)
|
||||
pk := nostr.GetPublicKey(input)
|
||||
return KeySigner{input, pk, xsync.NewMapOf[nostr.PubKey, [32]byte]()}, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unsupported input '%s'", input)
|
||||
|
||||
@@ -14,24 +14,24 @@ var (
|
||||
|
||||
// ReadOnlyUser is a nostr.User that has this public key
|
||||
type ReadOnlyUser struct {
|
||||
pk string
|
||||
pk nostr.PubKey
|
||||
}
|
||||
|
||||
func NewReadOnlyUser(pk string) ReadOnlyUser {
|
||||
func NewReadOnlyUser(pk nostr.PubKey) ReadOnlyUser {
|
||||
return ReadOnlyUser{pk}
|
||||
}
|
||||
|
||||
// GetPublicKey returns the public key associated with this signer.
|
||||
func (ros ReadOnlyUser) GetPublicKey(context.Context) (string, error) {
|
||||
func (ros ReadOnlyUser) GetPublicKey(context.Context) (nostr.PubKey, error) {
|
||||
return ros.pk, nil
|
||||
}
|
||||
|
||||
// ReadOnlySigner is like a ReadOnlyUser, but has a fake GetPublicKey method that doesn't work.
|
||||
type ReadOnlySigner struct {
|
||||
pk string
|
||||
pk nostr.PubKey
|
||||
}
|
||||
|
||||
func NewReadOnlySigner(pk string) ReadOnlySigner {
|
||||
func NewReadOnlySigner(pk nostr.PubKey) ReadOnlySigner {
|
||||
return ReadOnlySigner{pk}
|
||||
}
|
||||
|
||||
@@ -41,6 +41,6 @@ func (ros ReadOnlySigner) SignEvent(context.Context, *nostr.Event) error {
|
||||
}
|
||||
|
||||
// GetPublicKey returns the public key associated with this signer.
|
||||
func (ros ReadOnlySigner) GetPublicKey(context.Context) (string, error) {
|
||||
func (ros ReadOnlySigner) GetPublicKey(context.Context) (nostr.PubKey, error) {
|
||||
return ros.pk, nil
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package khatru
|
||||
|
||||
import (
|
||||
"context"
|
||||
"iter"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
@@ -58,16 +59,16 @@ type Relay struct {
|
||||
// hooks that will be called at various times
|
||||
RejectEvent func(ctx context.Context, event *nostr.Event) (reject bool, msg string)
|
||||
OverwriteDeletionOutcome func(ctx context.Context, target *nostr.Event, deletion *nostr.Event) (acceptDeletion bool, msg string)
|
||||
StoreEvent func(ctx context.Context, event *nostr.Event) error
|
||||
ReplaceEvent func(ctx context.Context, event *nostr.Event) error
|
||||
DeleteEvent func(ctx context.Context, event *nostr.Event) error
|
||||
OnEventSaved func(ctx context.Context, event *nostr.Event)
|
||||
OnEphemeralEvent func(ctx context.Context, event *nostr.Event)
|
||||
StoreEvent func(ctx context.Context, event nostr.Event) error
|
||||
ReplaceEvent func(ctx context.Context, event nostr.Event) error
|
||||
DeleteEvent func(ctx context.Context, id nostr.ID) error
|
||||
OnEventSaved func(ctx context.Context, event nostr.Event)
|
||||
OnEphemeralEvent func(ctx context.Context, event nostr.Event)
|
||||
RejectFilter func(ctx context.Context, filter nostr.Filter) (reject bool, msg string)
|
||||
RejectCountFilter func(ctx context.Context, filter nostr.Filter) (reject bool, msg string)
|
||||
QueryEvents func(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error)
|
||||
CountEvents func(ctx context.Context, filter nostr.Filter) (int64, error)
|
||||
CountEventsHLL func(ctx context.Context, filter nostr.Filter, offset int) (int64, *hyperloglog.HyperLogLog, error)
|
||||
QueryEvents func(ctx context.Context, filter nostr.Filter) iter.Seq[nostr.Event]
|
||||
CountEvents func(ctx context.Context, filter nostr.Filter) (uint32, error)
|
||||
CountEventsHLL func(ctx context.Context, filter nostr.Filter, offset int) (uint32, *hyperloglog.HyperLogLog, error)
|
||||
RejectConnection func(r *http.Request) bool
|
||||
OnConnect func(ctx context.Context)
|
||||
OnDisconnect func(ctx context.Context)
|
||||
|
||||
@@ -12,9 +12,8 @@ func GetThreadRoot(tags nostr.Tags) *nostr.EventPointer {
|
||||
|
||||
firstE := tags.Find("e")
|
||||
if firstE != nil {
|
||||
return &nostr.EventPointer{
|
||||
ID: firstE[1],
|
||||
}
|
||||
p, _ := nostr.EventPointerFromTag(firstE)
|
||||
return &p
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -63,9 +62,8 @@ func GetImmediateParent(tags nostr.Tags) *nostr.EventPointer {
|
||||
if lastE != nil {
|
||||
// if we reached this point and we have at least one "e" we'll use that (the last)
|
||||
// (we don't bother looking for relay or author hints because these clients don't add these anyway)
|
||||
return &nostr.EventPointer{
|
||||
ID: lastE[1],
|
||||
}
|
||||
p, _ := nostr.EventPointerFromTag(lastE)
|
||||
return &p
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -9,11 +9,11 @@ import (
|
||||
"fiatjaf.com/nostr/nip59"
|
||||
)
|
||||
|
||||
func GetDMRelays(ctx context.Context, pubkey string, pool *nostr.Pool, relaysToQuery []string) []string {
|
||||
func GetDMRelays(ctx context.Context, pubkey nostr.PubKey, pool *nostr.Pool, relaysToQuery []string) []string {
|
||||
ie := pool.QuerySingle(ctx, relaysToQuery, nostr.Filter{
|
||||
Authors: []string{pubkey},
|
||||
Kinds: []int{nostr.KindDMRelayList},
|
||||
})
|
||||
Authors: []nostr.PubKey{pubkey},
|
||||
Kinds: []uint16{nostr.KindDMRelayList},
|
||||
}, nostr.SubscriptionOptions{Label: "dm-relays"})
|
||||
if ie == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -39,7 +39,7 @@ func PublishMessage(
|
||||
ourRelays []string,
|
||||
theirRelays []string,
|
||||
kr nostr.Keyer,
|
||||
recipientPubKey string,
|
||||
recipientPubKey nostr.PubKey,
|
||||
modify func(*nostr.Event),
|
||||
) error {
|
||||
toUs, toThem, err := PrepareMessage(ctx, content, tags, kr, recipientPubKey, modify)
|
||||
@@ -56,7 +56,7 @@ func PublishMessage(
|
||||
|
||||
err = r.Publish(ctx, event)
|
||||
if err != nil && strings.HasPrefix(err.Error(), "auth-required:") {
|
||||
authErr := r.Auth(ctx, func(ae *nostr.Event) error { return kr.SignEvent(ctx, ae) })
|
||||
authErr := r.Auth(ctx, kr.SignEvent)
|
||||
if authErr == nil {
|
||||
err = r.Publish(ctx, event)
|
||||
}
|
||||
@@ -92,7 +92,7 @@ func PrepareMessage(
|
||||
content string,
|
||||
tags nostr.Tags,
|
||||
kr nostr.Keyer,
|
||||
recipientPubKey string,
|
||||
recipientPubKey nostr.PubKey,
|
||||
modify func(*nostr.Event),
|
||||
) (toUs nostr.Event, toThem nostr.Event, err error) {
|
||||
ourPubkey, err := kr.GetPublicKey(ctx)
|
||||
@@ -103,7 +103,7 @@ func PrepareMessage(
|
||||
rumor := nostr.Event{
|
||||
Kind: nostr.KindDirectMessage,
|
||||
Content: content,
|
||||
Tags: append(tags, nostr.Tag{"p", recipientPubKey}),
|
||||
Tags: append(tags, nostr.Tag{"p", recipientPubKey.Hex()}),
|
||||
CreatedAt: nostr.Now(),
|
||||
PubKey: ourPubkey,
|
||||
}
|
||||
@@ -154,13 +154,15 @@ func ListenForMessages(
|
||||
}
|
||||
|
||||
for ie := range pool.SubscribeMany(ctx, ourRelays, nostr.Filter{
|
||||
Kinds: []int{nostr.KindGiftWrap},
|
||||
Tags: nostr.TagMap{"p": []string{pk}},
|
||||
Kinds: []uint16{nostr.KindGiftWrap},
|
||||
Tags: nostr.TagMap{"p": []string{pk.Hex()}},
|
||||
Since: &since,
|
||||
}) {
|
||||
}, nostr.SubscriptionOptions{Label: "mydms"}) {
|
||||
rumor, err := nip59.GiftUnwrap(
|
||||
*ie.Event,
|
||||
func(otherpubkey, ciphertext string) (string, error) { return kr.Decrypt(ctx, ciphertext, otherpubkey) },
|
||||
ie.Event,
|
||||
func(otherpubkey nostr.PubKey, ciphertext string) (string, error) {
|
||||
return kr.Decrypt(ctx, ciphertext, otherpubkey)
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
nostr.InfoLogger.Printf("[nip17] failed to unwrap received message '%s' from %s: %s\n", ie.Event, ie.Relay.URL, err)
|
||||
|
||||
@@ -25,7 +25,7 @@ func Decode(bech32string string) (prefix string, value any, err error) {
|
||||
if len(data) != 32 {
|
||||
return prefix, nil, fmt.Errorf("nsec should be 32 bytes (%d)", len(data))
|
||||
}
|
||||
return prefix, [32]byte(data[0:32]), nil
|
||||
return prefix, nostr.SecretKey(data[0:32]), nil
|
||||
case "note":
|
||||
if len(data) != 32 {
|
||||
return prefix, nil, fmt.Errorf("note should be 32 bytes (%d)", len(data))
|
||||
|
||||
@@ -10,32 +10,24 @@ func EncodePointer(pointer nostr.Pointer) string {
|
||||
switch v := pointer.(type) {
|
||||
case nostr.ProfilePointer:
|
||||
if v.Relays == nil {
|
||||
res, _ := EncodePublicKey(v.PublicKey)
|
||||
return res
|
||||
return EncodeNpub(v.PublicKey)
|
||||
} else {
|
||||
res, _ := EncodeProfile(v.PublicKey, v.Relays)
|
||||
return res
|
||||
return EncodeNprofile(v.PublicKey, v.Relays)
|
||||
}
|
||||
case *nostr.ProfilePointer:
|
||||
if v.Relays == nil {
|
||||
res, _ := EncodePublicKey(v.PublicKey)
|
||||
return res
|
||||
return EncodeNpub(v.PublicKey)
|
||||
} else {
|
||||
res, _ := EncodeProfile(v.PublicKey, v.Relays)
|
||||
return res
|
||||
return EncodeNprofile(v.PublicKey, v.Relays)
|
||||
}
|
||||
case nostr.EventPointer:
|
||||
res, _ := EncodeEvent(v.ID, v.Relays, v.Author)
|
||||
return res
|
||||
return EncodeNevent(v.ID, v.Relays, v.Author)
|
||||
case *nostr.EventPointer:
|
||||
res, _ := EncodeEvent(v.ID, v.Relays, v.Author)
|
||||
return res
|
||||
return EncodeNevent(v.ID, v.Relays, v.Author)
|
||||
case nostr.EntityPointer:
|
||||
res, _ := EncodeEntity(v.PublicKey, v.Kind, v.Identifier, v.Relays)
|
||||
return res
|
||||
return EncodeNaddr(v.PublicKey, v.Kind, v.Identifier, v.Relays)
|
||||
case *nostr.EntityPointer:
|
||||
res, _ := EncodeEntity(v.PublicKey, v.Kind, v.Identifier, v.Relays)
|
||||
return res
|
||||
return EncodeNaddr(v.PublicKey, v.Kind, v.Identifier, v.Relays)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
@@ -48,13 +40,13 @@ func ToPointer(code string) (nostr.Pointer, error) {
|
||||
|
||||
switch prefix {
|
||||
case "npub":
|
||||
return nostr.ProfilePointer{PublicKey: data.(string)}, nil
|
||||
return nostr.ProfilePointer{PublicKey: data.([32]byte)}, nil
|
||||
case "nprofile":
|
||||
return data.(nostr.ProfilePointer), nil
|
||||
case "nevent":
|
||||
return data.(nostr.EventPointer), nil
|
||||
case "note":
|
||||
return nostr.EventPointer{ID: data.(string)}, nil
|
||||
return nostr.EventPointer{ID: data.([32]byte)}, nil
|
||||
case "naddr":
|
||||
return data.(nostr.EntityPointer), nil
|
||||
default:
|
||||
|
||||
@@ -224,20 +224,20 @@ func (group *Group) MergeInMetadataEvent(evt *nostr.Event) error {
|
||||
group.LastMetadataUpdate = evt.CreatedAt
|
||||
group.Name = group.Address.ID
|
||||
|
||||
if tag := evt.Tags.GetFirst([]string{"name", ""}); tag != nil {
|
||||
group.Name = (*tag)[1]
|
||||
if tag := evt.Tags.Find("name"); tag != nil {
|
||||
group.Name = tag[1]
|
||||
}
|
||||
if tag := evt.Tags.GetFirst([]string{"about", ""}); tag != nil {
|
||||
group.About = (*tag)[1]
|
||||
if tag := evt.Tags.Find("about"); tag != nil {
|
||||
group.About = tag[1]
|
||||
}
|
||||
if tag := evt.Tags.GetFirst([]string{"picture", ""}); tag != nil {
|
||||
group.Picture = (*tag)[1]
|
||||
if tag := evt.Tags.Find("picture"); tag != nil {
|
||||
group.Picture = tag[1]
|
||||
}
|
||||
|
||||
if tag := evt.Tags.GetFirst([]string{"private"}); tag != nil {
|
||||
if tag := evt.Tags.Find("private"); tag != nil {
|
||||
group.Private = true
|
||||
}
|
||||
if tag := evt.Tags.GetFirst([]string{"closed"}); tag != nil {
|
||||
if tag := evt.Tags.Find("closed"); tag != nil {
|
||||
group.Closed = true
|
||||
}
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ type Role struct {
|
||||
Description string
|
||||
}
|
||||
|
||||
type KindRange []int
|
||||
type KindRange []uint16
|
||||
|
||||
var ModerationEventKinds = KindRange{
|
||||
nostr.KindSimpleGroupPutUser,
|
||||
@@ -30,7 +30,7 @@ var MetadataEventKinds = KindRange{
|
||||
nostr.KindSimpleGroupRoles,
|
||||
}
|
||||
|
||||
func (kr KindRange) Includes(kind int) bool {
|
||||
func (kr KindRange) Includes(kind uint16) bool {
|
||||
_, ok := slices.BinarySearch(kr, kind)
|
||||
return ok
|
||||
}
|
||||
|
||||
@@ -20,8 +20,8 @@ func TestGroupEventBackAndForth(t *testing.T) {
|
||||
meta1 := group1.ToMetadataEvent()
|
||||
|
||||
require.Equal(t, "xyz", meta1.Tags.GetD(), "translation of group1 to metadata event failed: %s", meta1)
|
||||
require.NotNil(t, meta1.Tags.GetFirst([]string{"name", "banana"}), "translation of group1 to metadata event failed: %s", meta1)
|
||||
require.NotNil(t, meta1.Tags.GetFirst([]string{"private"}), "translation of group1 to metadata event failed: %s", meta1)
|
||||
require.NotNil(t, meta1.Tags.FindWithValue("name", "banana"), "translation of group1 to metadata event failed: %s", meta1)
|
||||
require.NotNil(t, meta1.Tags.Find("private"), "translation of group1 to metadata event failed: %s", meta1)
|
||||
|
||||
group2, _ := NewGroup("groups.com'abc")
|
||||
group2.Members[ALICE] = []*Role{{Name: "nada"}}
|
||||
@@ -32,16 +32,16 @@ func TestGroupEventBackAndForth(t *testing.T) {
|
||||
|
||||
require.Equal(t, "abc", admins2.Tags.GetD(), "translation of group2 to admins event failed")
|
||||
require.Equal(t, 3, len(admins2.Tags), "translation of group2 to admins event failed")
|
||||
require.NotNil(t, admins2.Tags.GetFirst([]string{"p", ALICE, "nada"}), "translation of group2 to admins event failed")
|
||||
require.NotNil(t, admins2.Tags.GetFirst([]string{"p", BOB, "nada"}), "translation of group2 to admins event failed")
|
||||
require.True(t, admins2.Tags.FindWithValue("p", ALICE)[2] == "nada", "translation of group2 to admins event failed")
|
||||
require.True(t, admins2.Tags.FindWithValue("p", BOB)[2] == "nada", "translation of group2 to admins event failed")
|
||||
|
||||
members2 := group2.ToMembersEvent()
|
||||
require.Equal(t, "abc", members2.Tags.GetD(), "translation of group2 to members2 event failed")
|
||||
require.Equal(t, 5, len(members2.Tags), "translation of group2 to members2 event failed")
|
||||
require.NotNil(t, members2.Tags.GetFirst([]string{"p", ALICE}), "translation of group2 to members2 event failed")
|
||||
require.NotNil(t, members2.Tags.GetFirst([]string{"p", BOB}), "translation of group2 to members2 event failed")
|
||||
require.NotNil(t, members2.Tags.GetFirst([]string{"p", CAROL}), "translation of group2 to members2 event failed")
|
||||
require.NotNil(t, members2.Tags.GetFirst([]string{"p", DEREK}), "translation of group2 to members2 event failed")
|
||||
require.NotNil(t, members2.Tags.FindWithValue("p", ALICE), "translation of group2 to members2 event failed")
|
||||
require.NotNil(t, members2.Tags.FindWithValue("p", BOB), "translation of group2 to members2 event failed")
|
||||
require.NotNil(t, members2.Tags.FindWithValue("p", CAROL), "translation of group2 to members2 event failed")
|
||||
require.NotNil(t, members2.Tags.FindWithValue("p", DEREK), "translation of group2 to members2 event failed")
|
||||
|
||||
group1.MergeInMembersEvent(members2)
|
||||
require.Equal(t, 4, len(group1.Members), "merge of members2 into group1 failed")
|
||||
|
||||
@@ -3,8 +3,8 @@ package nip34
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/bluekeyes/go-gitdiff/gitdiff"
|
||||
"fiatjaf.com/nostr"
|
||||
"github.com/bluekeyes/go-gitdiff/gitdiff"
|
||||
)
|
||||
|
||||
type Patch struct {
|
||||
@@ -35,7 +35,7 @@ func ParsePatch(event nostr.Event) Patch {
|
||||
continue
|
||||
}
|
||||
patch.Repository.Kind = nostr.KindRepositoryAnnouncement
|
||||
patch.Repository.PublicKey = spl[1]
|
||||
patch.Repository.PublicKey, _ = nostr.PubKeyFromHex(spl[1])
|
||||
patch.Repository.Identifier = spl[2]
|
||||
if len(tag) >= 3 {
|
||||
patch.Repository.Relays = []string{tag[2]}
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
package nip34
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
)
|
||||
|
||||
@@ -97,33 +94,3 @@ func (r Repository) ToEvent() *nostr.Event {
|
||||
CreatedAt: nostr.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
func (repo Repository) FetchState(ctx context.Context, s nostr.RelayStore) *RepositoryState {
|
||||
res, _ := s.QuerySync(ctx, nostr.Filter{
|
||||
Kinds: []int{nostr.KindRepositoryState},
|
||||
Tags: nostr.TagMap{
|
||||
"d": []string{repo.Tags.GetD()},
|
||||
},
|
||||
})
|
||||
|
||||
if len(res) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
rs := ParseRepositoryState(*res[0])
|
||||
return &rs
|
||||
}
|
||||
|
||||
func (repo Repository) GetPatchesSync(ctx context.Context, s nostr.RelayStore) []Patch {
|
||||
res, _ := s.QuerySync(ctx, nostr.Filter{
|
||||
Kinds: []int{nostr.KindPatch},
|
||||
Tags: nostr.TagMap{
|
||||
"a": []string{fmt.Sprintf("%d:%s:%s", nostr.KindRepositoryAnnouncement, repo.Event.PubKey, repo.ID)},
|
||||
},
|
||||
})
|
||||
patches := make([]Patch, len(res))
|
||||
for i, evt := range res {
|
||||
patches[i] = ParsePatch(*evt)
|
||||
}
|
||||
return patches
|
||||
}
|
||||
|
||||
@@ -55,7 +55,7 @@ func (s Session) MakeResponse(
|
||||
evt.Content = ciphertext
|
||||
evt.CreatedAt = nostr.Now()
|
||||
evt.Kind = nostr.KindNostrConnect
|
||||
evt.Tags = nostr.Tags{nostr.Tag{"p", requester}}
|
||||
evt.Tags = nostr.Tags{nostr.Tag{"p", requester.Hex()}}
|
||||
|
||||
return resp, evt, nil
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@ type BunkerClient struct {
|
||||
// pool can be passed to reuse an existing pool, otherwise a new pool will be created.
|
||||
func ConnectBunker(
|
||||
ctx context.Context,
|
||||
clientSecretKey nostr.PubKey,
|
||||
clientSecretKey nostr.SecretKey,
|
||||
bunkerURLOrNIP05 string,
|
||||
pool *nostr.Pool,
|
||||
onAuth func(string),
|
||||
|
||||
@@ -35,7 +35,7 @@ func GiftWrap(
|
||||
return nostr.Event{}, err
|
||||
}
|
||||
|
||||
nonceKey := nostr.GeneratePrivateKey()
|
||||
nonceKey := nostr.Generate()
|
||||
temporaryConversationKey, err := nip44.GenerateConversationKey(recipient, nonceKey)
|
||||
if err != nil {
|
||||
return nostr.Event{}, err
|
||||
|
||||
@@ -20,7 +20,7 @@ type HistoryEntry struct {
|
||||
}
|
||||
|
||||
type TokenRef struct {
|
||||
EventID string
|
||||
EventID nostr.ID
|
||||
Created bool
|
||||
IsNutzap bool
|
||||
}
|
||||
@@ -47,7 +47,7 @@ func (h HistoryEntry) toEvent(ctx context.Context, kr nostr.Keyer, evt *nostr.Ev
|
||||
|
||||
for _, tf := range h.TokenReferences {
|
||||
if tf.IsNutzap {
|
||||
evt.Tags = append(evt.Tags, nostr.Tag{"e", tf.EventID, "", "redeemed"})
|
||||
evt.Tags = append(evt.Tags, nostr.Tag{"e", tf.EventID.Hex(), "", "redeemed"})
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -56,7 +56,7 @@ func (h HistoryEntry) toEvent(ctx context.Context, kr nostr.Keyer, evt *nostr.Ev
|
||||
marker = "created"
|
||||
}
|
||||
|
||||
encryptedTags = append(encryptedTags, nostr.Tag{"e", tf.EventID, "", marker})
|
||||
encryptedTags = append(encryptedTags, nostr.Tag{"e", tf.EventID.Hex(), "", marker})
|
||||
}
|
||||
|
||||
jsonb, _ := json.Marshal(encryptedTags)
|
||||
@@ -129,11 +129,12 @@ func (h *HistoryEntry) parse(ctx context.Context, kr nostr.Keyer, evt *nostr.Eve
|
||||
if len(tag) < 4 {
|
||||
return fmt.Errorf("'e' tag must have at least 4 items")
|
||||
}
|
||||
if !nostr.IsValid32ByteHex(tag[1]) {
|
||||
return fmt.Errorf("'e' tag has invalid event id %s", tag[1])
|
||||
id, err := nostr.IDFromHex(tag[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("'e' tag has invalid event id %s: %w", tag[1])
|
||||
}
|
||||
|
||||
tf := TokenRef{EventID: tag[1]}
|
||||
tf := TokenRef{EventID: id}
|
||||
switch tag[3] {
|
||||
case "created":
|
||||
tf.Created = true
|
||||
|
||||
@@ -5,10 +5,10 @@ import (
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/elnosh/gonuts/cashu"
|
||||
"github.com/elnosh/gonuts/cashu/nuts/nut10"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/nip60/client"
|
||||
"github.com/elnosh/gonuts/cashu"
|
||||
"github.com/elnosh/gonuts/cashu/nuts/nut10"
|
||||
)
|
||||
|
||||
type receiveSettings struct {
|
||||
|
||||
@@ -6,13 +6,13 @@ import (
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/nip60/client"
|
||||
"github.com/btcsuite/btcd/btcec/v2"
|
||||
"github.com/elnosh/gonuts/cashu"
|
||||
"github.com/elnosh/gonuts/cashu/nuts/nut02"
|
||||
"github.com/elnosh/gonuts/cashu/nuts/nut10"
|
||||
"github.com/elnosh/gonuts/cashu/nuts/nut11"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/nip60/client"
|
||||
)
|
||||
|
||||
type SendOption func(opts *sendSettings)
|
||||
@@ -23,10 +23,9 @@ type sendSettings struct {
|
||||
refundtimelock int64
|
||||
}
|
||||
|
||||
func WithP2PK(pubkey string) SendOption {
|
||||
func WithP2PK(pubkey nostr.PubKey) SendOption {
|
||||
return func(opts *sendSettings) {
|
||||
pkb, _ := hex.DecodeString(pubkey)
|
||||
opts.p2pk, _ = btcec.ParsePubKey(pkb)
|
||||
opts.p2pk, _ = btcec.ParsePubKey(append([]byte{2}, pubkey[:]...))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -132,7 +131,7 @@ func (w *Wallet) saveChangeAndDeleteUsedTokens(
|
||||
mintedAt: nostr.Now(),
|
||||
Mint: mintURL,
|
||||
Proofs: changeProofs,
|
||||
Deleted: make([]string, 0, len(usedTokenIndexes)),
|
||||
Deleted: make([]nostr.ID, 0, len(usedTokenIndexes)),
|
||||
event: &nostr.Event{},
|
||||
}
|
||||
|
||||
@@ -144,7 +143,7 @@ func (w *Wallet) saveChangeAndDeleteUsedTokens(
|
||||
deleteEvent := nostr.Event{
|
||||
CreatedAt: nostr.Now(),
|
||||
Kind: 5,
|
||||
Tags: nostr.Tags{{"e", token.event.ID}, {"k", "7375"}},
|
||||
Tags: nostr.Tags{{"e", token.event.ID.Hex()}, {"k", "7375"}},
|
||||
}
|
||||
w.kr.SignEvent(ctx, &deleteEvent)
|
||||
|
||||
|
||||
@@ -5,14 +5,14 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/elnosh/gonuts/cashu"
|
||||
"fiatjaf.com/nostr"
|
||||
"github.com/elnosh/gonuts/cashu"
|
||||
)
|
||||
|
||||
type Token struct {
|
||||
Mint string `json:"mint"`
|
||||
Proofs cashu.Proofs `json:"proofs"`
|
||||
Deleted []string `json:"del,omitempty"`
|
||||
Deleted []nostr.ID `json:"del,omitempty"`
|
||||
|
||||
mintedAt nostr.Timestamp
|
||||
event *nostr.Event
|
||||
@@ -20,7 +20,7 @@ type Token struct {
|
||||
|
||||
func (t Token) ID() string {
|
||||
if t.event != nil {
|
||||
return t.event.ID
|
||||
return t.event.ID.Hex()
|
||||
}
|
||||
|
||||
return "<not-published>"
|
||||
|
||||
@@ -9,9 +9,9 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"github.com/btcsuite/btcd/btcec/v2"
|
||||
"github.com/decred/dcrd/dcrec/secp256k1/v4"
|
||||
"fiatjaf.com/nostr"
|
||||
)
|
||||
|
||||
type Wallet struct {
|
||||
@@ -19,7 +19,7 @@ type Wallet struct {
|
||||
tokensMu sync.Mutex
|
||||
event *nostr.Event
|
||||
|
||||
pendingDeletions []string // token events that should be deleted
|
||||
pendingDeletions []nostr.ID // token events that should be deleted
|
||||
|
||||
kr nostr.Keyer
|
||||
|
||||
@@ -34,7 +34,7 @@ type Wallet struct {
|
||||
)
|
||||
|
||||
// Processed, if not nil, is called every time a received event is processed
|
||||
Processed func(*nostr.Event, error)
|
||||
Processed func(nostr.Event, error)
|
||||
|
||||
// Stable is closed when we have gotten an EOSE from all relays
|
||||
Stable chan struct{}
|
||||
@@ -77,7 +77,7 @@ func loadWalletFromPool(
|
||||
return nil
|
||||
}
|
||||
|
||||
kinds := []int{17375, 7375}
|
||||
kinds := []uint16{17375, 7375}
|
||||
if withHistory {
|
||||
kinds = append(kinds, 7376)
|
||||
}
|
||||
@@ -86,16 +86,18 @@ func loadWalletFromPool(
|
||||
events := pool.SubscribeManyNotifyEOSE(
|
||||
ctx,
|
||||
relays,
|
||||
nostr.Filter{Kinds: kinds, Authors: []string{pk}},
|
||||
nostr.Filter{Kinds: kinds, Authors: []nostr.PubKey{pk}},
|
||||
eoseChanE,
|
||||
nostr.SubscriptionOptions{},
|
||||
)
|
||||
|
||||
eoseChanD := make(chan struct{})
|
||||
deletions := pool.SubscribeManyNotifyEOSE(
|
||||
ctx,
|
||||
relays,
|
||||
nostr.Filter{Kinds: []int{5}, Tags: nostr.TagMap{"k": []string{"7375"}}, Authors: []string{pk}},
|
||||
nostr.Filter{Kinds: []uint16{5}, Tags: nostr.TagMap{"k": []string{"7375"}}, Authors: []nostr.PubKey{pk}},
|
||||
eoseChanD,
|
||||
nostr.SubscriptionOptions{},
|
||||
)
|
||||
|
||||
eoseChan := make(chan struct{})
|
||||
@@ -116,7 +118,7 @@ func loadWallet(
|
||||
eoseChan chan struct{},
|
||||
) *Wallet {
|
||||
w := &Wallet{
|
||||
pendingDeletions: make([]string, 0, 128),
|
||||
pendingDeletions: make([]nostr.ID, 0, 128),
|
||||
kr: kr,
|
||||
Stable: make(chan struct{}),
|
||||
Tokens: make([]Token, 0, 128),
|
||||
@@ -143,11 +145,15 @@ func loadWallet(
|
||||
w.Lock()
|
||||
if !eosed {
|
||||
for tag := range ie.Event.Tags.FindAll("e") {
|
||||
w.pendingDeletions = append(w.pendingDeletions, tag[1])
|
||||
if id, err := nostr.IDFromHex(tag[1]); err == nil {
|
||||
w.pendingDeletions = append(w.pendingDeletions, id)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for tag := range ie.Event.Tags.FindAll("e") {
|
||||
w.removeDeletedToken(tag[1])
|
||||
if id, err := nostr.IDFromHex(tag[1]); err == nil {
|
||||
w.removeDeletedToken(id)
|
||||
}
|
||||
}
|
||||
}
|
||||
w.Unlock()
|
||||
@@ -159,7 +165,7 @@ func loadWallet(
|
||||
w.Lock()
|
||||
switch ie.Event.Kind {
|
||||
case 17375:
|
||||
if err := w.parse(ctx, kr, ie.Event); err != nil {
|
||||
if err := w.parse(ctx, kr, &ie.Event); err != nil {
|
||||
if w.Processed != nil {
|
||||
w.Processed(ie.Event, err)
|
||||
}
|
||||
@@ -169,11 +175,11 @@ func loadWallet(
|
||||
|
||||
// if this metadata is newer than what we had, update
|
||||
if w.event == nil || ie.Event.CreatedAt > w.event.CreatedAt {
|
||||
w.parse(ctx, kr, ie.Event) // this will either fail or set the new metadata
|
||||
w.parse(ctx, kr, &ie.Event) // this will either fail or set the new metadata
|
||||
}
|
||||
case 7375: // token
|
||||
token := Token{}
|
||||
if err := token.parse(ctx, kr, ie.Event); err != nil {
|
||||
if err := token.parse(ctx, kr, &ie.Event); err != nil {
|
||||
if w.Processed != nil {
|
||||
w.Processed(ie.Event, err)
|
||||
}
|
||||
@@ -200,7 +206,7 @@ func loadWallet(
|
||||
|
||||
case 7376: // history
|
||||
he := HistoryEntry{}
|
||||
if err := he.parse(ctx, kr, ie.Event); err != nil {
|
||||
if err := he.parse(ctx, kr, &ie.Event); err != nil {
|
||||
if w.Processed != nil {
|
||||
w.Processed(ie.Event, err)
|
||||
}
|
||||
@@ -230,7 +236,7 @@ func (w *Wallet) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Wallet) removeDeletedToken(eventId string) {
|
||||
func (w *Wallet) removeDeletedToken(eventId nostr.ID) {
|
||||
for t := len(w.Tokens) - 1; t >= 0; t-- {
|
||||
token := w.Tokens[t]
|
||||
if token.event != nil && token.event.ID == eventId {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package nip60
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"context"
|
||||
"fmt"
|
||||
@@ -8,17 +9,17 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec/v2"
|
||||
"github.com/elnosh/gonuts/cashu"
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/keyer"
|
||||
"github.com/btcsuite/btcd/btcec/v2"
|
||||
"github.com/elnosh/gonuts/cashu"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/exp/rand"
|
||||
)
|
||||
|
||||
func TestWallet(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
kr, err := keyer.NewPlainKeySigner("040cbf11f24b080ad9d8669d7514d9f3b7b1f58e5a6dcb75549352b041656537")
|
||||
kr, err := keyer.NewPlainKeySigner(nostr.MustSecretKeyFromHex("040cbf11f24b080ad9d8669d7514d9f3b7b1f58e5a6dcb75549352b041656537"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -53,7 +54,7 @@ func TestWallet(t *testing.T) {
|
||||
Amount: 100,
|
||||
createdAt: nostr.Timestamp(time.Now().Add(-3 * time.Hour).Unix()),
|
||||
TokenReferences: []TokenRef{
|
||||
{Created: true, EventID: "645babb9051f46ddc97d960e68f82934e627f136dde7b860bf87c9213d937b58"},
|
||||
{Created: true, EventID: nostr.MustIDFromHex("645babb9051f46ddc97d960e68f82934e627f136dde7b860bf87c9213d937b58")},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -61,8 +62,8 @@ func TestWallet(t *testing.T) {
|
||||
Amount: 200,
|
||||
createdAt: nostr.Timestamp(time.Now().Add(-2 * time.Hour).Unix()),
|
||||
TokenReferences: []TokenRef{
|
||||
{Created: false, EventID: "add072ae7d7a027748e03024267a1c073f3fbc26cca468ba8630d039a7f5df72"},
|
||||
{Created: true, EventID: "b8460b5589b68a0d9a017ac3784d17a0729046206aa631f7f4b763b738e36cf8"},
|
||||
{Created: false, EventID: nostr.MustIDFromHex("add072ae7d7a027748e03024267a1c073f3fbc26cca468ba8630d039a7f5df72")},
|
||||
{Created: true, EventID: nostr.MustIDFromHex("b8460b5589b68a0d9a017ac3784d17a0729046206aa631f7f4b763b738e36cf8")},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -70,52 +71,52 @@ func TestWallet(t *testing.T) {
|
||||
Amount: 300,
|
||||
createdAt: nostr.Timestamp(time.Now().Add(-1 * time.Hour).Unix()),
|
||||
TokenReferences: []TokenRef{
|
||||
{Created: false, EventID: "61f86031d0ab95e9134a3ab955e96104cb1f4d610172838d28aa7ae9dc1cc924"},
|
||||
{Created: true, EventID: "588b78e4af06e960434239e7367a0bedf84747d4c52ff943f5e8b7daa3e1b601", IsNutzap: true},
|
||||
{Created: false, EventID: "8f14c0a4ff1bf85ccc26bf0125b9a289552f9b59bbb310b163d6a88a7bbd4ebc"},
|
||||
{Created: true, EventID: "41a6f442b7c3c9e2f1e8c4835c00f17c56b3e3be4c9f7cf7bc4cdd705b1b61db", IsNutzap: true},
|
||||
{Created: false, EventID: nostr.MustIDFromHex("61f86031d0ab95e9134a3ab955e96104cb1f4d610172838d28aa7ae9dc1cc924")},
|
||||
{Created: true, EventID: nostr.MustIDFromHex("588b78e4af06e960434239e7367a0bedf84747d4c52ff943f5e8b7daa3e1b601"), IsNutzap: true},
|
||||
{Created: false, EventID: nostr.MustIDFromHex("8f14c0a4ff1bf85ccc26bf0125b9a289552f9b59bbb310b163d6a88a7bbd4ebc")},
|
||||
{Created: true, EventID: nostr.MustIDFromHex("41a6f442b7c3c9e2f1e8c4835c00f17c56b3e3be4c9f7cf7bc4cdd705b1b61db"), IsNutzap: true},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// turn everything into events
|
||||
events := make([]*nostr.Event, 0, 7)
|
||||
events := make([]nostr.Event, 0, 7)
|
||||
|
||||
// wallet metadata event
|
||||
metaEvent := &nostr.Event{}
|
||||
err = w.toEvent(ctx, kr, metaEvent)
|
||||
metaEvent := nostr.Event{}
|
||||
err = w.toEvent(ctx, kr, &metaEvent)
|
||||
require.NoError(t, err)
|
||||
events = append(events, metaEvent)
|
||||
|
||||
// token events
|
||||
for i := range w.Tokens {
|
||||
evt := &nostr.Event{}
|
||||
evt := nostr.Event{}
|
||||
evt.Tags = nostr.Tags{}
|
||||
err := w.Tokens[i].toEvent(ctx, kr, evt)
|
||||
err := w.Tokens[i].toEvent(ctx, kr, &evt)
|
||||
require.NoError(t, err)
|
||||
w.Tokens[i].event = evt
|
||||
w.Tokens[i].event = &evt
|
||||
events = append(events, evt)
|
||||
}
|
||||
|
||||
// history events
|
||||
for i := range w.History {
|
||||
evt := &nostr.Event{}
|
||||
evt := nostr.Event{}
|
||||
evt.Tags = nostr.Tags{}
|
||||
err := w.History[i].toEvent(ctx, kr, evt)
|
||||
err := w.History[i].toEvent(ctx, kr, &evt)
|
||||
require.NoError(t, err)
|
||||
w.History[i].event = evt
|
||||
w.History[i].event = &evt
|
||||
events = append(events, evt)
|
||||
}
|
||||
|
||||
// test different orderings
|
||||
testCases := []struct {
|
||||
name string
|
||||
sort func([]*nostr.Event)
|
||||
sort func([]nostr.Event)
|
||||
}{
|
||||
{
|
||||
name: "random order",
|
||||
sort: func(evts []*nostr.Event) {
|
||||
sort: func(evts []nostr.Event) {
|
||||
r := rand.New(rand.NewSource(42)) // deterministic
|
||||
r.Shuffle(len(evts), func(i, j int) {
|
||||
evts[i], evts[j] = evts[j], evts[i]
|
||||
@@ -124,16 +125,16 @@ func TestWallet(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "most recent first",
|
||||
sort: func(evts []*nostr.Event) {
|
||||
slices.SortFunc(evts, func(a, b *nostr.Event) int {
|
||||
sort: func(evts []nostr.Event) {
|
||||
slices.SortFunc(evts, func(a, b nostr.Event) int {
|
||||
return int(b.CreatedAt - a.CreatedAt)
|
||||
})
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "least recent first",
|
||||
sort: func(evts []*nostr.Event) {
|
||||
slices.SortFunc(evts, func(a, b *nostr.Event) int {
|
||||
sort: func(evts []nostr.Event) {
|
||||
slices.SortFunc(evts, func(a, b nostr.Event) int {
|
||||
return int(a.CreatedAt - b.CreatedAt)
|
||||
})
|
||||
},
|
||||
@@ -143,7 +144,7 @@ func TestWallet(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// make a copy and sort it
|
||||
eventsCopy := make([]*nostr.Event, len(events))
|
||||
eventsCopy := make([]nostr.Event, len(events))
|
||||
copy(eventsCopy, events)
|
||||
tc.sort(eventsCopy)
|
||||
|
||||
@@ -162,7 +163,7 @@ func TestWallet(t *testing.T) {
|
||||
|
||||
// load wallet from events
|
||||
loaded := loadWallet(ctx, kr, evtChan, make(chan nostr.RelayEvent), eoseChan)
|
||||
loaded.Processed = func(evt *nostr.Event, err error) {
|
||||
loaded.Processed = func(evt nostr.Event, err error) {
|
||||
fmt.Println("processed", evt.Kind, err)
|
||||
}
|
||||
|
||||
@@ -174,8 +175,8 @@ func TestWallet(t *testing.T) {
|
||||
slices.SortFunc(loaded.History, func(a, b HistoryEntry) int { return cmp.Compare(a.createdAt, b.createdAt) })
|
||||
slices.SortFunc(w.History, func(a, b HistoryEntry) int { return cmp.Compare(a.createdAt, b.createdAt) })
|
||||
for i := range w.History {
|
||||
slices.SortFunc(loaded.History[i].TokenReferences, func(a, b TokenRef) int { return cmp.Compare(a.EventID, b.EventID) })
|
||||
slices.SortFunc(w.History[i].TokenReferences, func(a, b TokenRef) int { return cmp.Compare(a.EventID, b.EventID) })
|
||||
slices.SortFunc(loaded.History[i].TokenReferences, func(a, b TokenRef) int { return bytes.Compare(a.EventID[:], b.EventID[:]) })
|
||||
slices.SortFunc(w.History[i].TokenReferences, func(a, b TokenRef) int { return bytes.Compare(a.EventID[:], b.EventID[:]) })
|
||||
require.Equal(t, loaded.History[i], w.History[i])
|
||||
}
|
||||
require.ElementsMatch(t, loaded.Mints, w.Mints)
|
||||
|
||||
@@ -20,24 +20,24 @@ func SendNutzap(
|
||||
kr nostr.Keyer,
|
||||
w *nip60.Wallet,
|
||||
pool *nostr.Pool,
|
||||
targetUserPublickey string,
|
||||
getUserReadRelays func(context.Context, string, int) []string,
|
||||
targetUserPublickey nostr.PubKey,
|
||||
getUserReadRelays func(context.Context, nostr.PubKey, int) []string,
|
||||
relays []string,
|
||||
eventId string, // can be "" if not targeting a specific event
|
||||
amount uint64,
|
||||
message string,
|
||||
) (chan nostr.PublishResult, error) {
|
||||
ie := pool.QuerySingle(ctx, relays, nostr.Filter{Kinds: []int{10019}, Authors: []string{targetUserPublickey}})
|
||||
ie := pool.QuerySingle(ctx, relays, nostr.Filter{Kinds: []uint16{10019}, Authors: []nostr.PubKey{targetUserPublickey}}, nostr.SubscriptionOptions{})
|
||||
if ie == nil {
|
||||
return nil, NutzapsNotAccepted
|
||||
}
|
||||
|
||||
info := Info{}
|
||||
if err := info.ParseEvent(ie.Event); err != nil {
|
||||
if err := info.ParseEvent(&ie.Event); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(info.Mints) == 0 || info.PublicKey == "" {
|
||||
if len(info.Mints) == 0 || info.PublicKey == nostr.ZeroPK {
|
||||
return nil, NutzapsNotAccepted
|
||||
}
|
||||
|
||||
@@ -55,7 +55,7 @@ func SendNutzap(
|
||||
Tags: make(nostr.Tags, 0, 8),
|
||||
}
|
||||
|
||||
nutzap.Tags = append(nutzap.Tags, nostr.Tag{"p", targetUserPublickey})
|
||||
nutzap.Tags = append(nutzap.Tags, nostr.Tag{"p", targetUserPublickey.Hex()})
|
||||
if eventId != "" {
|
||||
nutzap.Tags = append(nutzap.Tags, nostr.Tag{"e", eventId})
|
||||
}
|
||||
|
||||
35
nip77/negentropy/storage/vector/helpers.go
Normal file
35
nip77/negentropy/storage/vector/helpers.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package vector
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
|
||||
"fiatjaf.com/nostr/nip77/negentropy"
|
||||
)
|
||||
|
||||
func itemCompare(a, b negentropy.Item) int {
|
||||
if a.Timestamp == b.Timestamp {
|
||||
return bytes.Compare(a.ID[:], b.ID[:])
|
||||
}
|
||||
return cmp.Compare(a.Timestamp, b.Timestamp)
|
||||
}
|
||||
|
||||
// binary search with custom function
|
||||
func searchItemWithBound(items []negentropy.Item, bound negentropy.Bound) int {
|
||||
n := len(items)
|
||||
// Define x[-1] < target and x[n] >= target.
|
||||
// Invariant: x[i-1] < target, x[j] >= target.
|
||||
i, j := 0, n
|
||||
for i < j {
|
||||
h := int(uint(i+j) >> 1) // avoid overflow when computing h
|
||||
// i ≤ h < j
|
||||
if items[h].Timestamp < bound.Timestamp ||
|
||||
(items[h].Timestamp == bound.Timestamp && bytes.Compare(items[h].ID[:], bound.IDPrefix) == -1) {
|
||||
i = h + 1 // preserves x[i-1] < target
|
||||
} else {
|
||||
j = h // preserves x[j] >= target
|
||||
}
|
||||
}
|
||||
// i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i.
|
||||
return i
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
package vector
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"iter"
|
||||
"slices"
|
||||
|
||||
@@ -24,10 +23,6 @@ func New() *Vector {
|
||||
}
|
||||
|
||||
func (v *Vector) Insert(createdAt nostr.Timestamp, id nostr.ID) {
|
||||
if len(id) != 64 {
|
||||
panic(fmt.Errorf("bad id size for added item: expected %d bytes, got %d", 32, len(id)/2))
|
||||
}
|
||||
|
||||
item := negentropy.Item{Timestamp: createdAt, ID: id}
|
||||
v.items = append(v.items, item)
|
||||
}
|
||||
@@ -39,12 +34,12 @@ func (v *Vector) Seal() {
|
||||
panic("trying to seal an already sealed vector")
|
||||
}
|
||||
v.sealed = true
|
||||
slices.SortFunc(v.items, negentropy.ItemCompare)
|
||||
slices.SortFunc(v.items, itemCompare)
|
||||
}
|
||||
|
||||
func (v *Vector) GetBound(idx int) negentropy.Bound {
|
||||
if idx < len(v.items) {
|
||||
return negentropy.Bound{Item: v.items[idx]}
|
||||
return negentropy.Bound{Timestamp: v.items[idx].Timestamp, IDPrefix: v.items[idx].ID[:]}
|
||||
}
|
||||
return negentropy.InfiniteBound
|
||||
}
|
||||
@@ -60,7 +55,7 @@ func (v *Vector) Range(begin, end int) iter.Seq2[int, negentropy.Item] {
|
||||
}
|
||||
|
||||
func (v *Vector) FindLowerBound(begin, end int, bound negentropy.Bound) int {
|
||||
idx, _ := slices.BinarySearchFunc(v.items[begin:end], bound.Item, negentropy.ItemCompare)
|
||||
idx := searchItemWithBound(v.items[begin:end], bound)
|
||||
return begin + idx
|
||||
}
|
||||
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package negentropy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"fmt"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
@@ -36,13 +34,6 @@ type Item struct {
|
||||
ID nostr.ID
|
||||
}
|
||||
|
||||
func ItemCompare(a, b Item) int {
|
||||
if a.Timestamp == b.Timestamp {
|
||||
return bytes.Compare(a.ID[:], b.ID[:])
|
||||
}
|
||||
return cmp.Compare(a.Timestamp, b.Timestamp)
|
||||
}
|
||||
|
||||
func (i Item) String() string { return fmt.Sprintf("Item<%d:%x>", i.Timestamp, i.ID[:]) }
|
||||
|
||||
type Bound struct {
|
||||
|
||||
@@ -13,8 +13,8 @@ import (
|
||||
type direction struct {
|
||||
label string
|
||||
items chan nostr.ID
|
||||
source nostr.RelayStore
|
||||
target nostr.RelayStore
|
||||
source nostr.QuerierPublisher
|
||||
target nostr.QuerierPublisher
|
||||
}
|
||||
|
||||
type Direction int
|
||||
@@ -27,21 +27,21 @@ const (
|
||||
|
||||
func NegentropySync(
|
||||
ctx context.Context,
|
||||
store nostr.RelayStore,
|
||||
store nostr.QuerierPublisher,
|
||||
url string,
|
||||
filter nostr.Filter,
|
||||
dir Direction,
|
||||
) error {
|
||||
id := "go-nostr-tmp" // for now we can't have more than one subscription in the same connection
|
||||
|
||||
data, err := store.QuerySync(ctx, filter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query our local store: %w", err)
|
||||
}
|
||||
id := "nl-tmp" // for now we can't have more than one subscription in the same connection
|
||||
|
||||
vec := vector.New()
|
||||
neg := negentropy.New(vec, 1024*1024)
|
||||
for _, evt := range data {
|
||||
ch, err := store.QueryEvents(ctx, filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for evt := range ch {
|
||||
vec.Insert(evt.CreatedAt, evt.ID)
|
||||
}
|
||||
vec.Seal()
|
||||
@@ -49,31 +49,33 @@ func NegentropySync(
|
||||
result := make(chan error)
|
||||
|
||||
var r *nostr.Relay
|
||||
r, err = nostr.RelayConnect(ctx, url, nostr.WithCustomHandler(func(data string) {
|
||||
envelope := ParseNegMessage(data)
|
||||
if envelope == nil {
|
||||
return
|
||||
}
|
||||
switch env := envelope.(type) {
|
||||
case *OpenEnvelope, *CloseEnvelope:
|
||||
result <- fmt.Errorf("unexpected %s received from relay", env.Label())
|
||||
return
|
||||
case *ErrorEnvelope:
|
||||
result <- fmt.Errorf("relay returned a %s: %s", env.Label(), env.Reason)
|
||||
return
|
||||
case *MessageEnvelope:
|
||||
nextmsg, err := neg.Reconcile(env.Message)
|
||||
if err != nil {
|
||||
result <- fmt.Errorf("failed to reconcile: %w", err)
|
||||
r, err = nostr.RelayConnect(ctx, url, nostr.RelayOptions{
|
||||
CustomHandler: func(data string) {
|
||||
envelope := ParseNegMessage(data)
|
||||
if envelope == nil {
|
||||
return
|
||||
}
|
||||
switch env := envelope.(type) {
|
||||
case *OpenEnvelope, *CloseEnvelope:
|
||||
result <- fmt.Errorf("unexpected %s received from relay", env.Label())
|
||||
return
|
||||
case *ErrorEnvelope:
|
||||
result <- fmt.Errorf("relay returned a %s: %s", env.Label(), env.Reason)
|
||||
return
|
||||
case *MessageEnvelope:
|
||||
nextmsg, err := neg.Reconcile(env.Message)
|
||||
if err != nil {
|
||||
result <- fmt.Errorf("failed to reconcile: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
if nextmsg != "" {
|
||||
msgb, _ := MessageEnvelope{id, nextmsg}.MarshalJSON()
|
||||
r.Write(msgb)
|
||||
if nextmsg != "" {
|
||||
msgb, _ := MessageEnvelope{id, nextmsg}.MarshalJSON()
|
||||
r.Write(msgb)
|
||||
}
|
||||
}
|
||||
}
|
||||
}))
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -122,7 +124,7 @@ func NegentropySync(
|
||||
return
|
||||
}
|
||||
for evt := range evtch {
|
||||
dir.target.Publish(ctx, *evt)
|
||||
dir.target.Publish(ctx, evt)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
29
relay.go
29
relay.go
@@ -433,12 +433,39 @@ func (r *Relay) PrepareSubscription(ctx context.Context, filter Filter, opts Sub
|
||||
return sub
|
||||
}
|
||||
|
||||
// implement Querier interface
|
||||
func (r *Relay) QueryEvents(ctx context.Context, filter Filter) (chan Event, error) {
|
||||
sub, err := r.Subscribe(ctx, filter, SubscriptionOptions{Label: "queryevents"})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ch := make(chan Event)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case evt := <-sub.Events:
|
||||
ch <- evt
|
||||
case <-sub.EndOfStoredEvents:
|
||||
return
|
||||
case <-sub.ClosedReason:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
// Count sends a "COUNT" command to the relay and returns the count of events matching the filters.
|
||||
func (r *Relay) Count(
|
||||
ctx context.Context,
|
||||
filter Filter,
|
||||
opts SubscriptionOptions,
|
||||
) (int64, []byte, error) {
|
||||
) (uint32, []byte, error) {
|
||||
v, err := r.countInternal(ctx, filter, opts)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
|
||||
44
sdk/feeds.go
44
sdk/feeds.go
@@ -2,8 +2,6 @@ package sdk
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@@ -16,10 +14,10 @@ const (
|
||||
pubkeyStreamOldestPrefix = byte('O')
|
||||
)
|
||||
|
||||
func makePubkeyStreamKey(prefix byte, pubkey string) []byte {
|
||||
func makePubkeyStreamKey(prefix byte, pubkey nostr.PubKey) []byte {
|
||||
key := make([]byte, 1+8)
|
||||
key[0] = prefix
|
||||
hex.Decode(key[1:], []byte(pubkey[0:16]))
|
||||
copy(key[1:], pubkey[0:8])
|
||||
return key
|
||||
}
|
||||
|
||||
@@ -30,9 +28,9 @@ func makePubkeyStreamKey(prefix byte, pubkey string) []byte {
|
||||
func (sys *System) StreamLiveFeed(
|
||||
ctx context.Context,
|
||||
pubkeys []nostr.PubKey,
|
||||
kinds []int,
|
||||
) (<-chan *nostr.Event, error) {
|
||||
events := make(chan *nostr.Event)
|
||||
kinds []uint16,
|
||||
) (<-chan nostr.Event, error) {
|
||||
events := make(chan nostr.Event)
|
||||
|
||||
active := atomic.Int32{}
|
||||
active.Add(int32(len(pubkeys)))
|
||||
@@ -61,15 +59,17 @@ func (sys *System) StreamLiveFeed(
|
||||
}
|
||||
|
||||
filter := nostr.Filter{
|
||||
Authors: []string{pubkey},
|
||||
Authors: []nostr.PubKey{pubkey},
|
||||
Since: since,
|
||||
Kinds: kinds,
|
||||
}
|
||||
|
||||
go func() {
|
||||
sub := sys.Pool.SubscribeMany(ctx, relays, filter, nostr.WithLabel("livefeed"))
|
||||
sub := sys.Pool.SubscribeMany(ctx, relays, filter, nostr.SubscriptionOptions{
|
||||
Label: "livefeed",
|
||||
})
|
||||
for evt := range sub {
|
||||
sys.StoreRelay.Publish(ctx, *evt.Event)
|
||||
sys.Publisher.Publish(ctx, evt.Event)
|
||||
if latest < evt.CreatedAt {
|
||||
latest = evt.CreatedAt
|
||||
serial++
|
||||
@@ -101,8 +101,8 @@ func (sys *System) StreamLiveFeed(
|
||||
// for events or if we should just return what we have stored locally.
|
||||
func (sys *System) FetchFeedPage(
|
||||
ctx context.Context,
|
||||
pubkeys []string,
|
||||
kinds []int,
|
||||
pubkeys []nostr.PubKey,
|
||||
kinds []uint16,
|
||||
until nostr.Timestamp,
|
||||
totalLimit int,
|
||||
) ([]*nostr.Event, error) {
|
||||
@@ -123,21 +123,21 @@ func (sys *System) FetchFeedPage(
|
||||
}
|
||||
}
|
||||
|
||||
filter := nostr.Filter{Authors: []string{pubkey}, Kinds: kinds}
|
||||
filter := nostr.Filter{Authors: []nostr.PubKey{pubkey}, Kinds: kinds}
|
||||
|
||||
if until > oldestTimestamp {
|
||||
// we can use our local database
|
||||
filter.Until = &until
|
||||
res, err := sys.StoreRelay.QuerySync(ctx, filter)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("query failure at '%s': %w", pubkey, err)
|
||||
}
|
||||
|
||||
if len(res) >= limitPerKey {
|
||||
// we got enough from the local store
|
||||
events = append(events, res...)
|
||||
wg.Done()
|
||||
continue
|
||||
count := 0
|
||||
for evt := range sys.Store.QueryEvents(filter) {
|
||||
events = append(events, evt)
|
||||
count++
|
||||
if count >= limitPerKey {
|
||||
// we got enough from the local store
|
||||
wg.Done()
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -10,10 +10,10 @@ import (
|
||||
)
|
||||
|
||||
func runTestWith(t *testing.T, hdb hints.HintsDB) {
|
||||
const key1 = "0000000000000000000000000000000000000000000000000000000000000001"
|
||||
const key2 = "0000000000000000000000000000000000000000000000000000000000000002"
|
||||
const key3 = "0000000000000000000000000000000000000000000000000000000000000003"
|
||||
const key4 = "0000000000000000000000000000000000000000000000000000000000000004"
|
||||
key1 := nostr.MustPubKeyFromHex("0000000000000000000000000000000000000000000000000000000000000001")
|
||||
key2 := nostr.MustPubKeyFromHex("0000000000000000000000000000000000000000000000000000000000000002")
|
||||
key3 := nostr.MustPubKeyFromHex("0000000000000000000000000000000000000000000000000000000000000003")
|
||||
key4 := nostr.MustPubKeyFromHex("0000000000000000000000000000000000000000000000000000000000000004")
|
||||
const relayA = "wss://aaa.com"
|
||||
const relayB = "wss://bbb.net"
|
||||
const relayC = "wss://ccc.org"
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
// ProfileMetadata represents user profile information from kind 0 events.
|
||||
// It contains both the raw event and parsed metadata fields.
|
||||
type ProfileMetadata struct {
|
||||
PubKey string `json:"-"` // must always be set otherwise things will break
|
||||
PubKey nostr.PubKey `json:"-"` // must always be set otherwise things will break
|
||||
Event *nostr.Event `json:"-"` // may be empty if a profile metadata event wasn't found
|
||||
|
||||
// every one of these may be empty
|
||||
@@ -33,8 +33,7 @@ type ProfileMetadata struct {
|
||||
|
||||
// Npub returns the NIP-19 npub encoding of the profile's public key.
|
||||
func (p ProfileMetadata) Npub() string {
|
||||
v, _ := nip19.EncodePublicKey(p.PubKey)
|
||||
return v
|
||||
return nip19.EncodeNpub(p.PubKey)
|
||||
}
|
||||
|
||||
// NpubShort returns a shortened version of the NIP-19 npub encoding,
|
||||
@@ -47,8 +46,7 @@ func (p ProfileMetadata) NpubShort() string {
|
||||
// Nprofile returns the NIP-19 nprofile encoding of the profile,
|
||||
// including relay hints from the user's outbox.
|
||||
func (p ProfileMetadata) Nprofile(ctx context.Context, sys *System, nrelays int) string {
|
||||
v, _ := nip19.EncodeProfile(p.PubKey, sys.FetchOutboxRelays(ctx, p.PubKey, 2))
|
||||
return v
|
||||
return nip19.EncodeNprofile(p.PubKey, sys.FetchOutboxRelays(ctx, p.PubKey, 2))
|
||||
}
|
||||
|
||||
// ShortName returns the best available name for display purposes.
|
||||
@@ -105,7 +103,7 @@ func (sys System) FetchProfileFromInput(ctx context.Context, nip19OrNip05Code st
|
||||
// FetchProfileMetadata fetches metadata for a given user from the local cache, or from the local store,
|
||||
// or, failing these, from the target user's defined outbox relays -- then caches the result.
|
||||
// It always returns a ProfileMetadata, even if no metadata was found (in which case only the PubKey field is set).
|
||||
func (sys *System) FetchProfileMetadata(ctx context.Context, pubkey string) (pm ProfileMetadata) {
|
||||
func (sys *System) FetchProfileMetadata(ctx context.Context, pubkey nostr.PubKey) (pm ProfileMetadata) {
|
||||
if v, ok := sys.MetadataCache.Get(pubkey); ok {
|
||||
return v
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"math/rand/v2"
|
||||
|
||||
"fiatjaf.com/nostr"
|
||||
"fiatjaf.com/nostr/eventstore/wrappers"
|
||||
"fiatjaf.com/nostr/sdk/cache"
|
||||
cache_memory "fiatjaf.com/nostr/sdk/cache/memory"
|
||||
"fiatjaf.com/nostr/sdk/dataloader"
|
||||
@@ -51,7 +52,7 @@ type System struct {
|
||||
NoteSearchRelays *RelayStream
|
||||
Store eventstore.Store
|
||||
|
||||
StoreRelay nostr.RelayStore
|
||||
Publisher wrappers.StorePublisher
|
||||
|
||||
replaceableLoaders []*dataloader.Loader[nostr.PubKey, *nostr.Event]
|
||||
addressableLoaders []*dataloader.Loader[nostr.PubKey, []*nostr.Event]
|
||||
|
||||
126
tags.go
126
tags.go
@@ -4,53 +4,10 @@ import (
|
||||
"errors"
|
||||
"iter"
|
||||
"slices"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Tag []string
|
||||
|
||||
// Deprecated: this is too cumbersome for no reason when what we actually want is
|
||||
// the simpler logic present in Find and FindWithValue.
|
||||
func (tag Tag) StartsWith(prefix []string) bool {
|
||||
prefixLen := len(prefix)
|
||||
|
||||
if prefixLen > len(tag) {
|
||||
return false
|
||||
}
|
||||
// check initial elements for equality
|
||||
for i := 0; i < prefixLen-1; i++ {
|
||||
if prefix[i] != tag[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
// check last element just for a prefix
|
||||
return strings.HasPrefix(tag[prefixLen-1], prefix[prefixLen-1])
|
||||
}
|
||||
|
||||
// Deprecated: write these inline instead
|
||||
func (tag Tag) Key() string {
|
||||
if len(tag) > 0 {
|
||||
return tag[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Deprecated: write these inline instead
|
||||
func (tag Tag) Value() string {
|
||||
if len(tag) > 1 {
|
||||
return tag[1]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Deprecated: write these inline instead
|
||||
func (tag Tag) Relay() string {
|
||||
if len(tag) > 2 && (tag[0] == "e" || tag[0] == "p") {
|
||||
return NormalizeURL(tag[2])
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type Tags []Tag
|
||||
|
||||
// GetD gets the first "d" tag (for parameterized replaceable events) value or ""
|
||||
@@ -63,89 +20,6 @@ func (tags Tags) GetD() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Deprecated: use Find or FindWithValue instead
|
||||
func (tags Tags) GetFirst(tagPrefix []string) *Tag {
|
||||
for _, v := range tags {
|
||||
if v.StartsWith(tagPrefix) {
|
||||
return &v
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deprecated: use FindLast or FindLastWithValue instead
|
||||
func (tags Tags) GetLast(tagPrefix []string) *Tag {
|
||||
for i := len(tags) - 1; i >= 0; i-- {
|
||||
v := tags[i]
|
||||
if v.StartsWith(tagPrefix) {
|
||||
return &v
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deprecated: use FindAll instead
|
||||
func (tags Tags) GetAll(tagPrefix []string) Tags {
|
||||
result := make(Tags, 0, len(tags))
|
||||
for _, v := range tags {
|
||||
if v.StartsWith(tagPrefix) {
|
||||
result = append(result, v)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Deprecated: use FindAll instead
|
||||
func (tags Tags) All(tagPrefix []string) iter.Seq2[int, Tag] {
|
||||
return func(yield func(int, Tag) bool) {
|
||||
for i, v := range tags {
|
||||
if v.StartsWith(tagPrefix) {
|
||||
if !yield(i, v) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated: this is useless, write your own
|
||||
func (tags Tags) FilterOut(tagPrefix []string) Tags {
|
||||
filtered := make(Tags, 0, len(tags))
|
||||
for _, v := range tags {
|
||||
if !v.StartsWith(tagPrefix) {
|
||||
filtered = append(filtered, v)
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
// Deprecated: this is useless, write your own
|
||||
func (tags *Tags) FilterOutInPlace(tagPrefix []string) {
|
||||
for i := 0; i < len(*tags); i++ {
|
||||
tag := (*tags)[i]
|
||||
if tag.StartsWith(tagPrefix) {
|
||||
// remove this by swapping the last tag into this place
|
||||
last := len(*tags) - 1
|
||||
(*tags)[i] = (*tags)[last]
|
||||
*tags = (*tags)[0:last]
|
||||
i-- // this is so we can match this just swapped item in the next iteration
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated: write your own instead with Find() and append()
|
||||
func (tags Tags) AppendUnique(tag Tag) Tags {
|
||||
n := len(tag)
|
||||
if n > 2 {
|
||||
n = 2
|
||||
}
|
||||
|
||||
if tags.GetFirst(tag[:n]) == nil {
|
||||
return append(tags, tag)
|
||||
}
|
||||
return tags
|
||||
}
|
||||
|
||||
// Find returns the first tag with the given key/tagName that also has one value (i.e. at least 2 items)
|
||||
func (tags Tags) Find(key string) Tag {
|
||||
for _, v := range tags {
|
||||
|
||||
Reference in New Issue
Block a user