since and until are not pointers anymore because that is too annoying.

This commit is contained in:
fiatjaf
2025-05-08 09:32:54 -03:00
parent 0853405c03
commit c0934e0639
17 changed files with 55 additions and 85 deletions

View File

@@ -27,8 +27,8 @@ func prepareQueries(filter nostr.Filter) (
} }
var until uint32 = 4294967295 var until uint32 = 4294967295
if filter.Until != nil { if filter.Until != 0 {
if fu := uint32(*filter.Until); fu < until { if fu := uint32(filter.Until); fu < until {
until = fu + 1 until = fu + 1
} }
} }
@@ -38,8 +38,8 @@ func prepareQueries(filter nostr.Filter) (
} }
// this is where we'll end the iteration // this is where we'll end the iteration
if filter.Since != nil { if filter.Since != 0 {
if fs := uint32(*filter.Since); fs > since { if fs := uint32(filter.Since); fs > since {
since = fs since = fs
} }
} }

View File

@@ -54,14 +54,14 @@ func (b *BlugeBackend) QueryEvents(filter nostr.Filter) iter.Seq[nostr.Event] {
q = complicatedQuery q = complicatedQuery
} }
if filter.Since != nil || filter.Until != nil { if filter.Since != 0 || filter.Until != 0 {
min := 0.0 min := 0.0
if filter.Since != nil { if filter.Since != 0 {
min = float64(*filter.Since) min = float64(filter.Since)
} }
max := float64(nostr.Now()) max := float64(nostr.Now())
if filter.Until != nil { if filter.Until != 0 {
max = float64(*filter.Until) max = float64(filter.Until)
} }
dateRangeQ := bluge.NewNumericRangeInclusiveQuery(min, max, true, true) dateRangeQ := bluge.NewNumericRangeInclusiveQuery(min, max, true, true)
dateRangeQ.SetField(createdAtField) dateRangeQ.SetField(createdAtField)

View File

@@ -36,8 +36,8 @@ func (b *LMDBBackend) prepareQueries(filter nostr.Filter) (
} }
var until uint32 = 4294967295 var until uint32 = 4294967295
if filter.Until != nil { if filter.Until != 0 {
if fu := uint32(*filter.Until); fu < until { if fu := uint32(filter.Until); fu < until {
until = fu + 1 until = fu + 1
} }
} }
@@ -62,8 +62,8 @@ func (b *LMDBBackend) prepareQueries(filter nostr.Filter) (
} }
// this is where we'll end the iteration // this is where we'll end the iteration
if filter.Since != nil { if filter.Since != 0 {
if fs := uint32(*filter.Since); fs > since { if fs := uint32(filter.Since); fs > since {
since = fs since = fs
} }
} }

View File

@@ -36,8 +36,8 @@ func (il *IndexingLayer) prepareQueries(filter nostr.Filter) (
} }
var until uint32 = 4294967295 var until uint32 = 4294967295
if filter.Until != nil { if filter.Until != 0 {
if fu := uint32(*filter.Until); fu < until { if fu := uint32(filter.Until); fu < until {
until = fu + 1 until = fu + 1
} }
} }
@@ -51,8 +51,8 @@ func (il *IndexingLayer) prepareQueries(filter nostr.Filter) (
}() }()
// this is where we'll end the iteration // this is where we'll end the iteration
if filter.Since != nil { if filter.Since != 0 {
if fs := uint32(*filter.Since); fs > since { if fs := uint32(filter.Since); fs > since {
since = fs since = fs
} }
} }

View File

@@ -41,11 +41,11 @@ func (b *SliceStore) QueryEvents(filter nostr.Filter) iter.Seq[nostr.Event] {
// efficiently determine where to start and end // efficiently determine where to start and end
start := 0 start := 0
end := len(b.internal) end := len(b.internal)
if filter.Until != nil { if filter.Until != 0 {
start, _ = slices.BinarySearchFunc(b.internal, *filter.Until, eventTimestampComparator) start, _ = slices.BinarySearchFunc(b.internal, filter.Until, eventTimestampComparator)
} }
if filter.Since != nil { if filter.Since != 0 {
end, _ = slices.BinarySearchFunc(b.internal, *filter.Since, eventTimestampComparator) end, _ = slices.BinarySearchFunc(b.internal, filter.Since, eventTimestampComparator)
} }
// ham // ham

View File

@@ -11,8 +11,8 @@ type Filter struct {
Kinds []Kind Kinds []Kind
Authors []PubKey Authors []PubKey
Tags TagMap Tags TagMap
Since *Timestamp Since Timestamp
Until *Timestamp Until Timestamp
Limit int Limit int
Search string Search string
@@ -32,11 +32,11 @@ func (ef Filter) Matches(event Event) bool {
return false return false
} }
if ef.Since != nil && event.CreatedAt < *ef.Since { if ef.Since != 0 && event.CreatedAt < ef.Since {
return false return false
} }
if ef.Until != nil && event.CreatedAt > *ef.Until { if ef.Until != 0 && event.CreatedAt > ef.Until {
return false return false
} }
@@ -92,11 +92,11 @@ func FilterEqual(a Filter, b Filter) bool {
} }
} }
if !arePointerValuesEqual(a.Since, b.Since) { if a.Since != b.Since {
return false return false
} }
if !arePointerValuesEqual(a.Until, b.Until) { if a.Until != b.Until {
return false return false
} }
@@ -117,6 +117,8 @@ func (ef Filter) Clone() Filter {
Limit: ef.Limit, Limit: ef.Limit,
Search: ef.Search, Search: ef.Search,
LimitZero: ef.LimitZero, LimitZero: ef.LimitZero,
Since: ef.Since,
Until: ef.Until,
} }
if ef.IDs != nil { if ef.IDs != nil {
@@ -140,16 +142,6 @@ func (ef Filter) Clone() Filter {
} }
} }
if ef.Since != nil {
since := *ef.Since
clone.Since = &since
}
if ef.Until != nil {
until := *ef.Until
clone.Until = &until
}
return clone return clone
} }

View File

@@ -97,22 +97,16 @@ func easyjson4d398eaaDecodeGithubComNbdWtfGoNostr(in *jlexer.Lexer, out *Filter)
case "since": case "since":
if in.IsNull() { if in.IsNull() {
in.Skip() in.Skip()
out.Since = nil out.Since = 0
} else { } else {
if out.Since == nil { out.Since = Timestamp(in.Int64())
out.Since = new(Timestamp)
}
*out.Since = Timestamp(in.Int64())
} }
case "until": case "until":
if in.IsNull() { if in.IsNull() {
in.Skip() in.Skip()
out.Until = nil out.Until = 0
} else { } else {
if out.Until == nil { out.Until = Timestamp(in.Int64())
out.Until = new(Timestamp)
}
*out.Until = Timestamp(in.Int64())
} }
case "limit": case "limit":
out.Limit = int(in.Int()) out.Limit = int(in.Int())
@@ -211,7 +205,7 @@ func easyjson4d398eaaEncodeGithubComNbdWtfGoNostr(out *jwriter.Writer, in Filter
out.RawByte(']') out.RawByte(']')
} }
} }
if in.Since != nil { if in.Since != 0 {
const prefix string = ",\"since\":" const prefix string = ",\"since\":"
if first { if first {
first = false first = false
@@ -219,9 +213,9 @@ func easyjson4d398eaaEncodeGithubComNbdWtfGoNostr(out *jwriter.Writer, in Filter
} else { } else {
out.RawString(prefix) out.RawString(prefix)
} }
out.Int64(int64(*in.Since)) out.Int64(int64(in.Since))
} }
if in.Until != nil { if in.Until != 0 {
const prefix string = ",\"until\":" const prefix string = ",\"until\":"
if first { if first {
first = false first = false
@@ -229,7 +223,7 @@ func easyjson4d398eaaEncodeGithubComNbdWtfGoNostr(out *jwriter.Writer, in Filter
} else { } else {
out.RawString(prefix) out.RawString(prefix)
} }
out.Int64(int64(*in.Until)) out.Int64(int64(in.Until))
} }
if in.Limit != 0 || in.LimitZero { if in.Limit != 0 || in.LimitZero {
const prefix string = ",\"limit\":" const prefix string = ",\"limit\":"

View File

@@ -132,16 +132,6 @@ func escapeString(dst []byte, s string) []byte {
return dst return dst
} }
func arePointerValuesEqual[V comparable](a *V, b *V) bool {
if a == nil && b == nil {
return true
}
if a != nil && b != nil {
return *a == *b
}
return false
}
func subIdToSerial(subId string) int64 { func subIdToSerial(subId string) int64 {
n := strings.Index(subId, ":") n := strings.Index(subId, ":")
if n < 0 || n > len(subId) { if n < 0 || n > len(subId) {

View File

@@ -20,8 +20,8 @@ type KeySigner struct {
// NewPlainKeySigner creates a new KeySigner from a private key. // NewPlainKeySigner creates a new KeySigner from a private key.
// Returns an error if the private key is invalid. // Returns an error if the private key is invalid.
func NewPlainKeySigner(sec [32]byte) (KeySigner, error) { func NewPlainKeySigner(sec [32]byte) KeySigner {
return KeySigner{sec, nostr.GetPublicKey(sec), xsync.NewMapOf[nostr.PubKey, [32]byte]()}, nil return KeySigner{sec, nostr.GetPublicKey(sec), xsync.NewMapOf[nostr.PubKey, [32]byte]()}
} }
// SignEvent signs the provided event with the signer's private key. // SignEvent signs the provided event with the signer's private key.

View File

@@ -41,7 +41,7 @@ func (rl *Relay) handleDeleteRequest(ctx context.Context, evt nostr.Event) error
Kinds: []nostr.Kind{nostr.Kind(kind)}, Kinds: []nostr.Kind{nostr.Kind(kind)},
Authors: []nostr.PubKey{author}, Authors: []nostr.PubKey{author},
Tags: nostr.TagMap{"d": []string{identifier}}, Tags: nostr.TagMap{"d": []string{identifier}},
Until: &evt.CreatedAt, Until: evt.CreatedAt,
} }
default: default:
continue continue

View File

@@ -156,7 +156,7 @@ func ListenForMessages(
for ie := range pool.SubscribeMany(ctx, ourRelays, nostr.Filter{ for ie := range pool.SubscribeMany(ctx, ourRelays, nostr.Filter{
Kinds: []nostr.Kind{nostr.KindGiftWrap}, Kinds: []nostr.Kind{nostr.KindGiftWrap},
Tags: nostr.TagMap{"p": []string{pk.Hex()}}, Tags: nostr.TagMap{"p": []string{pk.Hex()}},
Since: &since, Since: since,
}, nostr.SubscriptionOptions{Label: "mydms"}) { }, nostr.SubscriptionOptions{Label: "mydms"}) {
rumor, err := nip59.GiftUnwrap( rumor, err := nip59.GiftUnwrap(
ie.Event, ie.Event,

View File

@@ -11,7 +11,7 @@ import (
// //
// It returns -1 when the filter is not eligible for hyperloglog calculation. // It returns -1 when the filter is not eligible for hyperloglog calculation.
func HyperLogLogEventPubkeyOffsetForFilter(filter nostr.Filter) int { func HyperLogLogEventPubkeyOffsetForFilter(filter nostr.Filter) int {
if filter.IDs != nil || filter.Since != nil || filter.Until != nil || filter.Authors != nil || if filter.IDs != nil || filter.Since != 0 || filter.Until != 0 || filter.Authors != nil ||
len(filter.Kinds) != 1 || filter.Search != "" || len(filter.Tags) != 1 { len(filter.Kinds) != 1 || filter.Search != "" || len(filter.Tags) != 1 {
// obvious cases in which we won't bother to do hyperloglog stuff // obvious cases in which we won't bother to do hyperloglog stuff
return -1 return -1

View File

@@ -109,11 +109,10 @@ func NewBunker(
} }
go func() { go func() {
now := nostr.Now()
events := pool.SubscribeMany(ctx, relays, nostr.Filter{ events := pool.SubscribeMany(ctx, relays, nostr.Filter{
Tags: nostr.TagMap{"p": []string{clientPublicKey.Hex()}}, Tags: nostr.TagMap{"p": []string{clientPublicKey.Hex()}},
Kinds: []nostr.Kind{nostr.KindNostrConnect}, Kinds: []nostr.Kind{nostr.KindNostrConnect},
Since: &now, Since: nostr.Now(),
LimitZero: true, LimitZero: true,
}, nostr.SubscriptionOptions{ }, nostr.SubscriptionOptions{
Label: "bunker46client", Label: "bunker46client",

View File

@@ -19,10 +19,7 @@ import (
func TestWallet(t *testing.T) { func TestWallet(t *testing.T) {
ctx := context.Background() ctx := context.Background()
kr, err := keyer.NewPlainKeySigner(nostr.MustSecretKeyFromHex("040cbf11f24b080ad9d8669d7514d9f3b7b1f58e5a6dcb75549352b041656537")) kr := keyer.NewPlainKeySigner(nostr.MustSecretKeyFromHex("040cbf11f24b080ad9d8669d7514d9f3b7b1f58e5a6dcb75549352b041656537"))
if err != nil {
t.Fatal(err)
}
privateKey, _ := btcec.NewPrivateKey() privateKey, _ := btcec.NewPrivateKey()
@@ -85,7 +82,7 @@ func TestWallet(t *testing.T) {
// wallet metadata event // wallet metadata event
metaEvent := nostr.Event{} metaEvent := nostr.Event{}
err = w.toEvent(ctx, kr, &metaEvent) err := w.toEvent(ctx, kr, &metaEvent)
require.NoError(t, err) require.NoError(t, err)
events = append(events, metaEvent) events = append(events, metaEvent)

View File

@@ -11,8 +11,8 @@ func (pool *Pool) PaginatorWithInterval(
) func(ctx context.Context, urls []string, filter Filter, opts SubscriptionOptions) chan RelayEvent { ) func(ctx context.Context, urls []string, filter Filter, opts SubscriptionOptions) chan RelayEvent {
return func(ctx context.Context, urls []string, filter Filter, opts SubscriptionOptions) chan RelayEvent { return func(ctx context.Context, urls []string, filter Filter, opts SubscriptionOptions) chan RelayEvent {
nextUntil := Now() nextUntil := Now()
if filter.Until != nil { if filter.Until != 0 {
nextUntil = *filter.Until nextUntil = filter.Until
} }
globalLimit := uint64(filter.Limit) globalLimit := uint64(filter.Limit)
@@ -29,7 +29,7 @@ func (pool *Pool) PaginatorWithInterval(
defer close(globalCh) defer close(globalCh)
for { for {
filter.Until = &nextUntil filter.Until = nextUntil
time.Sleep(interval) time.Sleep(interval)
keepGoing := false keepGoing := false
@@ -48,7 +48,7 @@ func (pool *Pool) PaginatorWithInterval(
return return
} }
if evt.CreatedAt < *filter.Until { if evt.CreatedAt < filter.Until {
nextUntil = evt.CreatedAt nextUntil = evt.CreatedAt
} }
} }

View File

@@ -492,8 +492,7 @@ func (pool *Pool) subMany(
// this means the connection was closed for weird reasons, like the server shut down // this means the connection was closed for weird reasons, like the server shut down
// so we will update the filters here to include only events seem from now on // so we will update the filters here to include only events seem from now on
// and try to reconnect until we succeed // and try to reconnect until we succeed
now := Now() filter.Since = Now()
filter.Since = &now
debugLogf("%s reconnecting because sub.Events is broken\n", nm) debugLogf("%s reconnecting because sub.Events is broken\n", nm)
goto reconnect goto reconnect
} }

View File

@@ -52,10 +52,10 @@ func (sys *System) StreamLiveFeed(
serial := 0 serial := 0
var since *nostr.Timestamp var since nostr.Timestamp
if data, _ := sys.KVStore.Get(latestKey); data != nil { if data, _ := sys.KVStore.Get(latestKey); data != nil {
latest = decodeTimestamp(data) latest = decodeTimestamp(data)
since = &latest since = latest
} }
filter := nostr.Filter{ filter := nostr.Filter{
@@ -127,7 +127,7 @@ func (sys *System) FetchFeedPage(
if until > oldestTimestamp { if until > oldestTimestamp {
// we can use our local database // we can use our local database
filter.Until = &until filter.Until = until
count := 0 count := 0
for evt := range sys.Store.QueryEvents(filter) { for evt := range sys.Store.QueryEvents(filter) {
@@ -150,9 +150,8 @@ func (sys *System) FetchFeedPage(
wg.Done() wg.Done()
continue continue
} }
fUntil := oldestTimestamp + 1 filter.Until = oldestTimestamp + 1
filter.Until = &fUntil filter.Since = 0
filter.Since = nil
for ie := range sys.Pool.FetchMany(ctx, relays, filter, nostr.SubscriptionOptions{ for ie := range sys.Pool.FetchMany(ctx, relays, filter, nostr.SubscriptionOptions{
Label: "feedpage", Label: "feedpage",
}) { }) {