negentropy.
- a way to handle custom messages from Relay (NEG-whatever etc) - negentropy implementation (adapted from that other one) - nip77 nostr negentropy extension - QueryEvents method for RelayStore that returns a channel (makes negentropy syncing work more seamlessly)
This commit is contained in:
181
nip77/envelopes.go
Normal file
181
nip77/envelopes.go
Normal file
@@ -0,0 +1,181 @@
|
||||
package nip77
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/mailru/easyjson"
|
||||
jwriter "github.com/mailru/easyjson/jwriter"
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
"github.com/tidwall/gjson"
|
||||
)
|
||||
|
||||
func ParseNegMessage(message []byte) nostr.Envelope {
|
||||
firstComma := bytes.Index(message, []byte{','})
|
||||
if firstComma == -1 {
|
||||
return nil
|
||||
}
|
||||
label := message[0:firstComma]
|
||||
|
||||
var v nostr.Envelope
|
||||
switch {
|
||||
case bytes.Contains(label, []byte("NEG-MSG")):
|
||||
v = &MessageEnvelope{}
|
||||
case bytes.Contains(label, []byte("NEG-OPEN")):
|
||||
v = &OpenEnvelope{}
|
||||
case bytes.Contains(label, []byte("NEG-ERR")):
|
||||
v = &ErrorEnvelope{}
|
||||
case bytes.Contains(label, []byte("NEG-CLOSE")):
|
||||
v = &CloseEnvelope{}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := v.UnmarshalJSON(message); err != nil {
|
||||
return nil
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
var (
|
||||
_ nostr.Envelope = (*OpenEnvelope)(nil)
|
||||
_ nostr.Envelope = (*MessageEnvelope)(nil)
|
||||
_ nostr.Envelope = (*CloseEnvelope)(nil)
|
||||
_ nostr.Envelope = (*ErrorEnvelope)(nil)
|
||||
)
|
||||
|
||||
type OpenEnvelope struct {
|
||||
SubscriptionID string
|
||||
Filter nostr.Filter
|
||||
Message string
|
||||
}
|
||||
|
||||
func (_ OpenEnvelope) Label() string { return "NEG-OPEN" }
|
||||
func (v OpenEnvelope) String() string {
|
||||
b, _ := v.MarshalJSON()
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (v *OpenEnvelope) UnmarshalJSON(data []byte) error {
|
||||
r := gjson.ParseBytes(data)
|
||||
arr := r.Array()
|
||||
if len(arr) != 4 {
|
||||
return fmt.Errorf("failed to decode NEG-OPEN envelope")
|
||||
}
|
||||
|
||||
v.SubscriptionID = arr[1].Str
|
||||
v.Message = arr[3].Str
|
||||
return easyjson.Unmarshal([]byte(arr[2].Raw), &v.Filter)
|
||||
}
|
||||
|
||||
func (v OpenEnvelope) MarshalJSON() ([]byte, error) {
|
||||
res := bytes.NewBuffer(make([]byte, 0, 17+len(v.SubscriptionID)+len(v.Message)+500))
|
||||
|
||||
res.WriteString(`["NEG-OPEN","`)
|
||||
res.WriteString(v.SubscriptionID)
|
||||
res.WriteString(`",`)
|
||||
|
||||
w := jwriter.Writer{}
|
||||
v.Filter.MarshalEasyJSON(&w)
|
||||
w.Buffer.DumpTo(res)
|
||||
|
||||
res.WriteString(`,"`)
|
||||
res.WriteString(v.Message)
|
||||
res.WriteString(`"]`)
|
||||
|
||||
return res.Bytes(), nil
|
||||
}
|
||||
|
||||
type MessageEnvelope struct {
|
||||
SubscriptionID string
|
||||
Message string
|
||||
}
|
||||
|
||||
func (_ MessageEnvelope) Label() string { return "NEG-MSG" }
|
||||
func (v MessageEnvelope) String() string {
|
||||
b, _ := v.MarshalJSON()
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (v *MessageEnvelope) UnmarshalJSON(data []byte) error {
|
||||
r := gjson.ParseBytes(data)
|
||||
arr := r.Array()
|
||||
if len(arr) < 3 {
|
||||
return fmt.Errorf("failed to decode NEG-MSG envelope")
|
||||
}
|
||||
v.SubscriptionID = arr[1].Str
|
||||
v.Message = arr[2].Str
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v MessageEnvelope) MarshalJSON() ([]byte, error) {
|
||||
res := bytes.NewBuffer(make([]byte, 0, 17+len(v.SubscriptionID)+len(v.Message)))
|
||||
|
||||
res.WriteString(`["NEG-MSG","`)
|
||||
res.WriteString(v.SubscriptionID)
|
||||
res.WriteString(`","`)
|
||||
res.WriteString(v.Message)
|
||||
res.WriteString(`"]`)
|
||||
|
||||
return res.Bytes(), nil
|
||||
}
|
||||
|
||||
type CloseEnvelope struct {
|
||||
SubscriptionID string
|
||||
}
|
||||
|
||||
func (_ CloseEnvelope) Label() string { return "NEG-CLOSE" }
|
||||
func (v CloseEnvelope) String() string {
|
||||
b, _ := v.MarshalJSON()
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (v *CloseEnvelope) UnmarshalJSON(data []byte) error {
|
||||
r := gjson.ParseBytes(data)
|
||||
arr := r.Array()
|
||||
if len(arr) < 2 {
|
||||
return fmt.Errorf("failed to decode NEG-CLOSE envelope")
|
||||
}
|
||||
v.SubscriptionID = arr[1].Str
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v CloseEnvelope) MarshalJSON() ([]byte, error) {
|
||||
res := bytes.NewBuffer(make([]byte, 0, 14+len(v.SubscriptionID)))
|
||||
res.WriteString(`["NEG-CLOSE","`)
|
||||
res.WriteString(v.SubscriptionID)
|
||||
res.WriteString(`"]`)
|
||||
return res.Bytes(), nil
|
||||
}
|
||||
|
||||
type ErrorEnvelope struct {
|
||||
SubscriptionID string
|
||||
Reason string
|
||||
}
|
||||
|
||||
func (_ ErrorEnvelope) Label() string { return "NEG-ERROR" }
|
||||
func (v ErrorEnvelope) String() string {
|
||||
b, _ := v.MarshalJSON()
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (v *ErrorEnvelope) UnmarshalJSON(data []byte) error {
|
||||
r := gjson.ParseBytes(data)
|
||||
arr := r.Array()
|
||||
if len(arr) < 3 {
|
||||
return fmt.Errorf("failed to decode NEG-ERROR envelope")
|
||||
}
|
||||
v.SubscriptionID = arr[1].Str
|
||||
v.Reason = arr[2].Str
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v ErrorEnvelope) MarshalJSON() ([]byte, error) {
|
||||
res := bytes.NewBuffer(make([]byte, 0, 19+len(v.SubscriptionID)+len(v.Reason)))
|
||||
res.WriteString(`["NEG-ERROR","`)
|
||||
res.WriteString(v.SubscriptionID)
|
||||
res.WriteString(`","`)
|
||||
res.WriteString(v.Reason)
|
||||
res.WriteString(`"]`)
|
||||
return res.Bytes(), nil
|
||||
}
|
||||
61
nip77/example/example.go
Normal file
61
nip77/example/example.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/fiatjaf/eventstore"
|
||||
"github.com/fiatjaf/eventstore/slicestore"
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
"github.com/nbd-wtf/go-nostr/nip77"
|
||||
)
|
||||
|
||||
func main() {
|
||||
ctx := context.Background()
|
||||
db := &slicestore.SliceStore{}
|
||||
db.Init()
|
||||
|
||||
sk := nostr.GeneratePrivateKey()
|
||||
local := eventstore.RelayWrapper{Store: db}
|
||||
|
||||
for {
|
||||
for i := 0; i < 20; i++ {
|
||||
{
|
||||
evt := nostr.Event{
|
||||
Kind: 1,
|
||||
Content: fmt.Sprintf("same old hello %d", i),
|
||||
CreatedAt: nostr.Timestamp(i),
|
||||
Tags: nostr.Tags{},
|
||||
}
|
||||
evt.Sign(sk)
|
||||
db.SaveEvent(ctx, &evt)
|
||||
}
|
||||
|
||||
{
|
||||
evt := nostr.Event{
|
||||
Kind: 1,
|
||||
Content: fmt.Sprintf("custom hello %d", i),
|
||||
CreatedAt: nostr.Now(),
|
||||
Tags: nostr.Tags{},
|
||||
}
|
||||
evt.Sign(sk)
|
||||
db.SaveEvent(ctx, &evt)
|
||||
}
|
||||
}
|
||||
|
||||
err := nip77.NegentropySync(ctx,
|
||||
local, "ws://localhost:7777", nostr.Filter{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
data, err := local.QuerySync(ctx, nostr.Filter{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
fmt.Println("total local events:", len(data))
|
||||
time.Sleep(time.Second * 10)
|
||||
}
|
||||
}
|
||||
41
nip77/idlistpool.go
Normal file
41
nip77/idlistpool.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package nip77
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type idlistpool struct {
|
||||
initialsize int
|
||||
pool [][]string
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func newidlistpool(initialsize int) *idlistpool {
|
||||
ilp := idlistpool{
|
||||
initialsize: initialsize,
|
||||
pool: make([][]string, 1, 2),
|
||||
}
|
||||
|
||||
ilp.pool[0] = make([]string, 0, initialsize)
|
||||
|
||||
return &ilp
|
||||
}
|
||||
|
||||
func (ilp *idlistpool) grab() []string {
|
||||
ilp.Lock()
|
||||
defer ilp.Unlock()
|
||||
|
||||
l := len(ilp.pool)
|
||||
if l > 0 {
|
||||
idlist := ilp.pool[l-1]
|
||||
ilp.pool = ilp.pool[0 : l-1]
|
||||
return idlist
|
||||
}
|
||||
idlist := make([]string, 0, ilp.initialsize)
|
||||
return idlist
|
||||
}
|
||||
|
||||
func (ilp *idlistpool) giveback(idlist []string) {
|
||||
idlist = idlist[:0]
|
||||
ilp.pool = append(ilp.pool, idlist)
|
||||
}
|
||||
126
nip77/negentropy/encoding.go
Normal file
126
nip77/negentropy/encoding.go
Normal file
@@ -0,0 +1,126 @@
|
||||
package negentropy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
)
|
||||
|
||||
func (n *Negentropy) DecodeTimestampIn(reader *bytes.Reader) (nostr.Timestamp, error) {
|
||||
t, err := decodeVarInt(reader)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
timestamp := nostr.Timestamp(t)
|
||||
if timestamp == 0 {
|
||||
timestamp = maxTimestamp
|
||||
} else {
|
||||
timestamp--
|
||||
}
|
||||
|
||||
timestamp += n.lastTimestampIn
|
||||
if timestamp < n.lastTimestampIn { // Check for overflow
|
||||
timestamp = maxTimestamp
|
||||
}
|
||||
n.lastTimestampIn = timestamp
|
||||
return timestamp, nil
|
||||
}
|
||||
|
||||
func (n *Negentropy) DecodeBound(reader *bytes.Reader) (Bound, error) {
|
||||
timestamp, err := n.DecodeTimestampIn(reader)
|
||||
if err != nil {
|
||||
return Bound{}, err
|
||||
}
|
||||
|
||||
length, err := decodeVarInt(reader)
|
||||
if err != nil {
|
||||
return Bound{}, err
|
||||
}
|
||||
|
||||
id := make([]byte, length)
|
||||
if _, err = reader.Read(id); err != nil {
|
||||
return Bound{}, err
|
||||
}
|
||||
|
||||
return Bound{Item{timestamp, hex.EncodeToString(id)}}, nil
|
||||
}
|
||||
|
||||
func (n *Negentropy) encodeTimestampOut(timestamp nostr.Timestamp) []byte {
|
||||
if timestamp == maxTimestamp {
|
||||
n.lastTimestampOut = maxTimestamp
|
||||
return encodeVarInt(0)
|
||||
}
|
||||
temp := timestamp
|
||||
timestamp -= n.lastTimestampOut
|
||||
n.lastTimestampOut = temp
|
||||
return encodeVarInt(int(timestamp + 1))
|
||||
}
|
||||
|
||||
func (n *Negentropy) encodeBound(bound Bound) []byte {
|
||||
var output []byte
|
||||
|
||||
t := n.encodeTimestampOut(bound.Timestamp)
|
||||
idlen := encodeVarInt(len(bound.ID) / 2)
|
||||
output = append(output, t...)
|
||||
output = append(output, idlen...)
|
||||
id, _ := hex.DecodeString(bound.Item.ID)
|
||||
|
||||
output = append(output, id...)
|
||||
return output
|
||||
}
|
||||
|
||||
func getMinimalBound(prev, curr Item) Bound {
|
||||
if curr.Timestamp != prev.Timestamp {
|
||||
return Bound{Item{curr.Timestamp, ""}}
|
||||
}
|
||||
|
||||
sharedPrefixBytes := 0
|
||||
|
||||
for i := 0; i < 32; i++ {
|
||||
if curr.ID[i:i+2] != prev.ID[i:i+2] {
|
||||
break
|
||||
}
|
||||
sharedPrefixBytes++
|
||||
}
|
||||
|
||||
// sharedPrefixBytes + 1 to include the first differing byte, or the entire ID if identical.
|
||||
return Bound{Item{curr.Timestamp, curr.ID[:(sharedPrefixBytes+1)*2]}}
|
||||
}
|
||||
|
||||
func decodeVarInt(reader *bytes.Reader) (int, error) {
|
||||
var res int = 0
|
||||
|
||||
for {
|
||||
b, err := reader.ReadByte()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
res = (res << 7) | (int(b) & 127)
|
||||
if (b & 128) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func encodeVarInt(n int) []byte {
|
||||
if n == 0 {
|
||||
return []byte{0}
|
||||
}
|
||||
|
||||
var o []byte
|
||||
for n != 0 {
|
||||
o = append([]byte{byte(n & 0x7F)}, o...)
|
||||
n >>= 7
|
||||
}
|
||||
|
||||
for i := 0; i < len(o)-1; i++ {
|
||||
o[i] |= 0x80
|
||||
}
|
||||
|
||||
return o
|
||||
}
|
||||
315
nip77/negentropy/negentropy.go
Normal file
315
nip77/negentropy/negentropy.go
Normal file
@@ -0,0 +1,315 @@
|
||||
package negentropy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"unsafe"
|
||||
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
)
|
||||
|
||||
const (
|
||||
protocolVersion byte = 0x61 // version 1
|
||||
maxTimestamp = nostr.Timestamp(math.MaxInt64)
|
||||
)
|
||||
|
||||
var infiniteBound = Bound{Item: Item{Timestamp: maxTimestamp}}
|
||||
|
||||
type Negentropy struct {
|
||||
storage Storage
|
||||
sealed bool
|
||||
frameSizeLimit int
|
||||
isInitiator bool
|
||||
lastTimestampIn nostr.Timestamp
|
||||
lastTimestampOut nostr.Timestamp
|
||||
|
||||
Haves chan string
|
||||
HaveNots chan string
|
||||
}
|
||||
|
||||
func NewNegentropy(storage Storage, frameSizeLimit int) *Negentropy {
|
||||
return &Negentropy{
|
||||
storage: storage,
|
||||
frameSizeLimit: frameSizeLimit,
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Negentropy) Insert(evt *nostr.Event) {
|
||||
err := n.storage.Insert(evt.CreatedAt, evt.ID)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Negentropy) seal() {
|
||||
if !n.sealed {
|
||||
n.storage.Seal()
|
||||
}
|
||||
n.sealed = true
|
||||
}
|
||||
|
||||
func (n *Negentropy) Initiate() []byte {
|
||||
n.seal()
|
||||
n.isInitiator = true
|
||||
|
||||
n.Haves = make(chan string, n.storage.Size()/2)
|
||||
n.HaveNots = make(chan string, n.storage.Size()/2)
|
||||
|
||||
output := bytes.NewBuffer(make([]byte, 0, 1+n.storage.Size()*32))
|
||||
output.WriteByte(protocolVersion)
|
||||
n.SplitRange(0, n.storage.Size(), infiniteBound, output)
|
||||
|
||||
return output.Bytes()
|
||||
}
|
||||
|
||||
func (n *Negentropy) Reconcile(msg []byte) (output []byte, err error) {
|
||||
n.seal()
|
||||
reader := bytes.NewReader(msg)
|
||||
|
||||
output, err = n.reconcileAux(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(output) == 1 && n.isInitiator {
|
||||
close(n.Haves)
|
||||
close(n.HaveNots)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func (n *Negentropy) reconcileAux(reader *bytes.Reader) ([]byte, error) {
|
||||
n.lastTimestampIn, n.lastTimestampOut = 0, 0 // reset for each message
|
||||
|
||||
fullOutput := bytes.NewBuffer(make([]byte, 0, 5000))
|
||||
fullOutput.WriteByte(protocolVersion)
|
||||
|
||||
pv, err := reader.ReadByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if pv < 0x60 || pv > 0x6f {
|
||||
return nil, fmt.Errorf("invalid protocol version byte")
|
||||
}
|
||||
if pv != protocolVersion {
|
||||
if n.isInitiator {
|
||||
return nil, fmt.Errorf("unsupported negentropy protocol version requested")
|
||||
}
|
||||
return fullOutput.Bytes(), nil
|
||||
}
|
||||
|
||||
var prevBound Bound
|
||||
prevIndex := 0
|
||||
skip := false
|
||||
|
||||
partialOutput := bytes.NewBuffer(make([]byte, 0, 100))
|
||||
for reader.Len() > 0 {
|
||||
partialOutput.Reset()
|
||||
|
||||
doSkip := func() {
|
||||
if skip {
|
||||
skip = false
|
||||
encodedBound := n.encodeBound(prevBound)
|
||||
partialOutput.Write(encodedBound)
|
||||
partialOutput.WriteByte(SkipMode)
|
||||
}
|
||||
}
|
||||
|
||||
currBound, err := n.DecodeBound(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
modeVal, err := decodeVarInt(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mode := Mode(modeVal)
|
||||
|
||||
lower := prevIndex
|
||||
upper := n.storage.FindLowerBound(prevIndex, n.storage.Size(), currBound)
|
||||
|
||||
switch mode {
|
||||
case SkipMode:
|
||||
skip = true
|
||||
|
||||
case FingerprintMode:
|
||||
var theirFingerprint [FingerprintSize]byte
|
||||
_, err := reader.Read(theirFingerprint[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ourFingerprint, err := n.storage.Fingerprint(lower, upper)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if theirFingerprint == ourFingerprint {
|
||||
skip = true
|
||||
} else {
|
||||
doSkip()
|
||||
n.SplitRange(lower, upper, currBound, partialOutput)
|
||||
}
|
||||
|
||||
case IdListMode:
|
||||
numIds, err := decodeVarInt(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
theirElems := make(map[string]struct{})
|
||||
var idb [32]byte
|
||||
|
||||
for i := 0; i < numIds; i++ {
|
||||
_, err := reader.Read(idb[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id := hex.EncodeToString(idb[:])
|
||||
theirElems[id] = struct{}{}
|
||||
}
|
||||
|
||||
n.storage.Iterate(lower, upper, func(item Item, _ int) bool {
|
||||
id := item.ID
|
||||
if _, exists := theirElems[id]; !exists {
|
||||
if n.isInitiator {
|
||||
n.Haves <- id
|
||||
}
|
||||
} else {
|
||||
delete(theirElems, id)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
if n.isInitiator {
|
||||
skip = true
|
||||
for id := range theirElems {
|
||||
n.HaveNots <- id
|
||||
}
|
||||
} else {
|
||||
doSkip()
|
||||
|
||||
responseIds := make([]byte, 0, 32*n.storage.Size())
|
||||
endBound := currBound
|
||||
|
||||
n.storage.Iterate(lower, upper, func(item Item, index int) bool {
|
||||
if n.frameSizeLimit-200 < fullOutput.Len()+len(responseIds) {
|
||||
endBound = Bound{item}
|
||||
upper = index
|
||||
return false
|
||||
}
|
||||
|
||||
id, _ := hex.DecodeString(item.ID)
|
||||
responseIds = append(responseIds, id...)
|
||||
return true
|
||||
})
|
||||
|
||||
encodedBound := n.encodeBound(endBound)
|
||||
|
||||
partialOutput.Write(encodedBound)
|
||||
partialOutput.WriteByte(IdListMode)
|
||||
partialOutput.Write(encodeVarInt(len(responseIds) / 32))
|
||||
partialOutput.Write(responseIds)
|
||||
|
||||
partialOutput.WriteTo(fullOutput)
|
||||
partialOutput.Reset()
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected mode %d", mode)
|
||||
}
|
||||
|
||||
if n.frameSizeLimit-200 < fullOutput.Len()+partialOutput.Len() {
|
||||
// frame size limit exceeded, handle by encoding a boundary and fingerprint for the remaining range
|
||||
remainingFingerprint, err := n.storage.Fingerprint(upper, n.storage.Size())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
fullOutput.Write(n.encodeBound(infiniteBound))
|
||||
fullOutput.WriteByte(FingerprintMode)
|
||||
fullOutput.Write(remainingFingerprint[:])
|
||||
|
||||
break // stop processing further
|
||||
} else {
|
||||
// append the constructed output for this iteration
|
||||
partialOutput.WriteTo(fullOutput)
|
||||
}
|
||||
|
||||
prevIndex = upper
|
||||
prevBound = currBound
|
||||
}
|
||||
|
||||
return fullOutput.Bytes(), nil
|
||||
}
|
||||
|
||||
func (n *Negentropy) SplitRange(lower, upper int, upperBound Bound, output *bytes.Buffer) {
|
||||
numElems := upper - lower
|
||||
const buckets = 16
|
||||
|
||||
if numElems < buckets*2 {
|
||||
// we just send the full ids here
|
||||
boundEncoded := n.encodeBound(upperBound)
|
||||
output.Write(boundEncoded)
|
||||
output.WriteByte(IdListMode)
|
||||
output.Write(encodeVarInt(numElems))
|
||||
|
||||
n.storage.Iterate(lower, upper, func(item Item, _ int) bool {
|
||||
id, _ := hex.DecodeString(item.ID)
|
||||
output.Write(id)
|
||||
return true
|
||||
})
|
||||
} else {
|
||||
itemsPerBucket := numElems / buckets
|
||||
bucketsWithExtra := numElems % buckets
|
||||
curr := lower
|
||||
|
||||
for i := 0; i < buckets; i++ {
|
||||
bucketSize := itemsPerBucket
|
||||
if i < bucketsWithExtra {
|
||||
bucketSize++
|
||||
}
|
||||
ourFingerprint, err := n.storage.Fingerprint(curr, curr+bucketSize)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
panic(err)
|
||||
}
|
||||
|
||||
curr += bucketSize
|
||||
|
||||
var nextBound Bound
|
||||
if curr == upper {
|
||||
nextBound = upperBound
|
||||
} else {
|
||||
var prevItem, currItem Item
|
||||
|
||||
n.storage.Iterate(curr-1, curr+1, func(item Item, index int) bool {
|
||||
if index == curr-1 {
|
||||
prevItem = item
|
||||
} else {
|
||||
currItem = item
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
minBound := getMinimalBound(prevItem, currItem)
|
||||
nextBound = minBound
|
||||
}
|
||||
|
||||
boundEncoded := n.encodeBound(nextBound)
|
||||
output.Write(boundEncoded)
|
||||
output.WriteByte(FingerprintMode)
|
||||
output.Write(ourFingerprint[:])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Negentropy) Name() string {
|
||||
p := unsafe.Pointer(n)
|
||||
return fmt.Sprintf("%d", uintptr(p)&127)
|
||||
}
|
||||
111
nip77/negentropy/types.go
Normal file
111
nip77/negentropy/types.go
Normal file
@@ -0,0 +1,111 @@
|
||||
package negentropy
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
)
|
||||
|
||||
const FingerprintSize = 16
|
||||
|
||||
type Mode int
|
||||
|
||||
const (
|
||||
SkipMode = 0
|
||||
FingerprintMode = 1
|
||||
IdListMode = 2
|
||||
)
|
||||
|
||||
type Storage interface {
|
||||
Insert(nostr.Timestamp, string) error
|
||||
Seal()
|
||||
Size() int
|
||||
Iterate(begin, end int, cb func(item Item, i int) bool) error
|
||||
FindLowerBound(begin, end int, value Bound) int
|
||||
GetBound(idx int) Bound
|
||||
Fingerprint(begin, end int) ([FingerprintSize]byte, error)
|
||||
}
|
||||
|
||||
type Item struct {
|
||||
Timestamp nostr.Timestamp
|
||||
ID string
|
||||
}
|
||||
|
||||
func itemCompare(a, b Item) int {
|
||||
if a.Timestamp != b.Timestamp {
|
||||
return int(a.Timestamp - b.Timestamp)
|
||||
}
|
||||
return strings.Compare(a.ID, b.ID)
|
||||
}
|
||||
|
||||
func (i Item) String() string { return fmt.Sprintf("Item<%d:%s>", i.Timestamp, i.ID) }
|
||||
|
||||
type Bound struct{ Item }
|
||||
|
||||
func (b Bound) String() string {
|
||||
if b.Timestamp == infiniteBound.Timestamp {
|
||||
return "Bound<infinite>"
|
||||
}
|
||||
return fmt.Sprintf("Bound<%d:%s>", b.Timestamp, b.ID)
|
||||
}
|
||||
|
||||
type Accumulator struct {
|
||||
Buf []byte
|
||||
}
|
||||
|
||||
func (acc *Accumulator) SetToZero() {
|
||||
acc.Buf = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
|
||||
}
|
||||
|
||||
func (acc *Accumulator) Add(id string) {
|
||||
b, _ := hex.DecodeString(id)
|
||||
acc.AddBytes(b)
|
||||
}
|
||||
|
||||
func (acc *Accumulator) AddAccumulator(other Accumulator) {
|
||||
acc.AddBytes(other.Buf)
|
||||
}
|
||||
|
||||
func (acc *Accumulator) AddBytes(other []byte) {
|
||||
var currCarry, nextCarry uint32
|
||||
|
||||
if len(acc.Buf) < 32 {
|
||||
newBuf := make([]byte, 32)
|
||||
copy(newBuf, acc.Buf)
|
||||
acc.Buf = newBuf
|
||||
}
|
||||
|
||||
for i := 0; i < 8; i++ {
|
||||
offset := i * 4
|
||||
orig := binary.LittleEndian.Uint32(acc.Buf[offset:])
|
||||
otherV := binary.LittleEndian.Uint32(other[offset:])
|
||||
|
||||
next := orig + currCarry + otherV
|
||||
if next < orig || next < otherV {
|
||||
nextCarry = 1
|
||||
}
|
||||
|
||||
binary.LittleEndian.PutUint32(acc.Buf[offset:], next&0xFFFFFFFF)
|
||||
currCarry = nextCarry
|
||||
nextCarry = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (acc *Accumulator) SV() []byte {
|
||||
return acc.Buf[:]
|
||||
}
|
||||
|
||||
func (acc *Accumulator) GetFingerprint(n int) [FingerprintSize]byte {
|
||||
input := acc.SV()
|
||||
input = append(input, encodeVarInt(n)...)
|
||||
|
||||
hash := sha256.Sum256(input)
|
||||
|
||||
var fingerprint [FingerprintSize]byte
|
||||
copy(fingerprint[:], hash[:FingerprintSize])
|
||||
return fingerprint
|
||||
}
|
||||
74
nip77/negentropy/vector.go
Normal file
74
nip77/negentropy/vector.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package negentropy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
)
|
||||
|
||||
type Vector struct {
|
||||
items []Item
|
||||
sealed bool
|
||||
}
|
||||
|
||||
func NewVector() *Vector {
|
||||
return &Vector{
|
||||
items: make([]Item, 0, 30),
|
||||
}
|
||||
}
|
||||
|
||||
func (v *Vector) Insert(createdAt nostr.Timestamp, id string) error {
|
||||
if len(id)/2 != 32 {
|
||||
return fmt.Errorf("bad id size for added item: expected %d, got %d", 32, len(id)/2)
|
||||
}
|
||||
|
||||
item := Item{createdAt, id}
|
||||
v.items = append(v.items, item)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *Vector) Size() int { return len(v.items) }
|
||||
|
||||
func (v *Vector) Seal() {
|
||||
if v.sealed {
|
||||
panic("trying to seal an already sealed vector")
|
||||
}
|
||||
v.sealed = true
|
||||
slices.SortFunc(v.items, itemCompare)
|
||||
}
|
||||
|
||||
func (v *Vector) GetBound(idx int) Bound {
|
||||
if idx < len(v.items) {
|
||||
return Bound{v.items[idx]}
|
||||
}
|
||||
return infiniteBound
|
||||
}
|
||||
|
||||
func (v *Vector) Iterate(begin, end int, cb func(Item, int) bool) error {
|
||||
for i := begin; i < end; i++ {
|
||||
if !cb(v.items[i], i) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *Vector) FindLowerBound(begin, end int, bound Bound) int {
|
||||
idx, _ := slices.BinarySearchFunc(v.items[begin:end], bound.Item, itemCompare)
|
||||
return begin + idx
|
||||
}
|
||||
|
||||
func (v *Vector) Fingerprint(begin, end int) ([FingerprintSize]byte, error) {
|
||||
var out Accumulator
|
||||
out.SetToZero()
|
||||
|
||||
if err := v.Iterate(begin, end, func(item Item, _ int) bool {
|
||||
out.Add(item.ID)
|
||||
return true
|
||||
}); err != nil {
|
||||
return [FingerprintSize]byte{}, err
|
||||
}
|
||||
|
||||
return out.GetFingerprint(end - begin), nil
|
||||
}
|
||||
180
nip77/negentropy/whatever_test.go
Normal file
180
nip77/negentropy/whatever_test.go
Normal file
@@ -0,0 +1,180 @@
|
||||
package negentropy
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSuperSmall(t *testing.T) {
|
||||
runTestWith(t,
|
||||
4,
|
||||
[][]int{{0, 3}}, [][]int{{2, 4}},
|
||||
[][]int{{3, 4}}, [][]int{{0, 2}},
|
||||
)
|
||||
}
|
||||
|
||||
func TestNoNeedToSync(t *testing.T) {
|
||||
runTestWith(t,
|
||||
50,
|
||||
[][]int{{0, 50}}, [][]int{{0, 50}},
|
||||
[][]int{}, [][]int{},
|
||||
)
|
||||
}
|
||||
|
||||
func TestSmallNumbers(t *testing.T) {
|
||||
runTestWith(t,
|
||||
20,
|
||||
[][]int{{2, 15}}, [][]int{{0, 7}, {10, 20}},
|
||||
[][]int{{0, 2}, {15, 20}}, [][]int{{7, 10}},
|
||||
)
|
||||
}
|
||||
|
||||
func TestBigNumbers(t *testing.T) {
|
||||
runTestWith(t,
|
||||
200,
|
||||
[][]int{{20, 150}}, [][]int{{0, 70}, {100, 200}},
|
||||
[][]int{{0, 20}, {150, 200}}, [][]int{{70, 100}},
|
||||
)
|
||||
}
|
||||
|
||||
func TestMuchBiggerNumbersAndConfusion(t *testing.T) {
|
||||
runTestWith(t,
|
||||
20000,
|
||||
[][]int{{20, 150}, {1700, 3400}, {7000, 8100}, {13800, 13816}, {13817, 14950}, {19800, 20000}}, // n1
|
||||
[][]int{{0, 2000}, {3000, 3600}, {10000, 12200}, {13799, 13801}, {14800, 19900}}, // n2
|
||||
[][]int{{0, 20}, {150, 1700}, {3400, 3600}, {10000, 12200}, {13799, 13800}, {14950, 19800}}, // n1 need
|
||||
[][]int{{2000, 3000}, {7000, 8100}, {13801, 13816}, {13817, 14800}, {19900, 20000}}, // n1 have
|
||||
)
|
||||
}
|
||||
|
||||
func runTestWith(t *testing.T,
|
||||
totalEvents int,
|
||||
n1Ranges [][]int, n2Ranges [][]int,
|
||||
expectedN1NeedRanges [][]int, expectedN1HaveRanges [][]int,
|
||||
) {
|
||||
var err error
|
||||
var q []byte
|
||||
var n1 *Negentropy
|
||||
var n2 *Negentropy
|
||||
|
||||
events := make([]*nostr.Event, totalEvents)
|
||||
for i := range events {
|
||||
evt := nostr.Event{}
|
||||
evt.Content = fmt.Sprintf("event %d", i)
|
||||
evt.Kind = 1
|
||||
evt.CreatedAt = nostr.Timestamp(i)
|
||||
evt.ID = fmt.Sprintf("%064d", i)
|
||||
events[i] = &evt
|
||||
}
|
||||
|
||||
{
|
||||
n1 = NewNegentropy(NewVector(), 1<<16)
|
||||
for _, r := range n1Ranges {
|
||||
for i := r[0]; i < r[1]; i++ {
|
||||
n1.Insert(events[i])
|
||||
}
|
||||
}
|
||||
|
||||
q = n1.Initiate()
|
||||
}
|
||||
|
||||
{
|
||||
n2 = NewNegentropy(NewVector(), 1<<16)
|
||||
for _, r := range n2Ranges {
|
||||
for i := r[0]; i < r[1]; i++ {
|
||||
n2.Insert(events[i])
|
||||
}
|
||||
}
|
||||
|
||||
q, err = n2.Reconcile(q)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
invert := map[*Negentropy]*Negentropy{
|
||||
n1: n2,
|
||||
n2: n1,
|
||||
}
|
||||
i := 1
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(3)
|
||||
|
||||
go func() {
|
||||
wg.Done()
|
||||
for n := n1; q != nil; n = invert[n] {
|
||||
i++
|
||||
|
||||
q, err = n.Reconcile(q)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
|
||||
if q == nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
expectedHave := make([]string, 0, 100)
|
||||
for _, r := range expectedN1HaveRanges {
|
||||
for i := r[0]; i < r[1]; i++ {
|
||||
expectedHave = append(expectedHave, events[i].ID)
|
||||
}
|
||||
}
|
||||
haves := make([]string, 0, 100)
|
||||
for item := range n1.Haves {
|
||||
if slices.Contains(haves, item) {
|
||||
continue
|
||||
}
|
||||
haves = append(haves, item)
|
||||
}
|
||||
require.ElementsMatch(t, expectedHave, haves, "wrong have")
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
expectedNeed := make([]string, 0, 100)
|
||||
for _, r := range expectedN1NeedRanges {
|
||||
for i := r[0]; i < r[1]; i++ {
|
||||
expectedNeed = append(expectedNeed, events[i].ID)
|
||||
}
|
||||
}
|
||||
havenots := make([]string, 0, 100)
|
||||
for item := range n1.HaveNots {
|
||||
if slices.Contains(havenots, item) {
|
||||
continue
|
||||
}
|
||||
havenots = append(havenots, item)
|
||||
}
|
||||
require.ElementsMatch(t, expectedNeed, havenots, "wrong need")
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func hexedBytes(o []byte) string {
|
||||
s := strings.Builder{}
|
||||
s.Grow(2 + 1 + len(o)*5)
|
||||
s.WriteString("[ ")
|
||||
for _, b := range o {
|
||||
x := hex.EncodeToString([]byte{b})
|
||||
s.WriteString("0x")
|
||||
s.WriteString(x)
|
||||
s.WriteString(" ")
|
||||
}
|
||||
s.WriteString("]")
|
||||
return s.String()
|
||||
}
|
||||
148
nip77/nip77.go
Normal file
148
nip77/nip77.go
Normal file
@@ -0,0 +1,148 @@
|
||||
package nip77
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/cespare/xxhash"
|
||||
"github.com/greatroar/blobloom"
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
"github.com/nbd-wtf/go-nostr/nip77/negentropy"
|
||||
)
|
||||
|
||||
func NegentropySync(ctx context.Context, store nostr.RelayStore, url string, filter nostr.Filter) error {
|
||||
id := "go-nostr-tmp" // for now we can't have more than one subscription in the same connection
|
||||
|
||||
data, err := store.QuerySync(ctx, filter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query our local store: %w", err)
|
||||
}
|
||||
|
||||
neg := negentropy.NewNegentropy(negentropy.NewVector(), 1024*1024)
|
||||
for _, evt := range data {
|
||||
neg.Insert(evt)
|
||||
}
|
||||
|
||||
result := make(chan error)
|
||||
|
||||
var r *nostr.Relay
|
||||
r, err = nostr.RelayConnect(ctx, url, nostr.WithCustomHandler(func(data []byte) {
|
||||
envelope := ParseNegMessage(data)
|
||||
if envelope == nil {
|
||||
return
|
||||
}
|
||||
switch env := envelope.(type) {
|
||||
case *OpenEnvelope, *CloseEnvelope:
|
||||
result <- fmt.Errorf("unexpected %s received from relay", env.Label())
|
||||
return
|
||||
case *ErrorEnvelope:
|
||||
result <- fmt.Errorf("relay returned a %s: %s", env.Label(), env.Reason)
|
||||
return
|
||||
case *MessageEnvelope:
|
||||
msg, err := hex.DecodeString(env.Message)
|
||||
if err != nil {
|
||||
result <- fmt.Errorf("relay sent invalid message: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
nextmsg, err := neg.Reconcile(msg)
|
||||
if err != nil {
|
||||
result <- fmt.Errorf("failed to reconcile: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(nextmsg) != 0 {
|
||||
msgb, _ := MessageEnvelope{id, hex.EncodeToString(nextmsg)}.MarshalJSON()
|
||||
r.Write(msgb)
|
||||
}
|
||||
}
|
||||
}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msg := neg.Initiate()
|
||||
open, _ := OpenEnvelope{id, filter, hex.EncodeToString(msg)}.MarshalJSON()
|
||||
err = <-r.Write(open)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write to relay: %w", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
clse, _ := CloseEnvelope{id}.MarshalJSON()
|
||||
r.Write(clse)
|
||||
}()
|
||||
|
||||
type direction struct {
|
||||
label string
|
||||
items chan string
|
||||
source nostr.RelayStore
|
||||
target nostr.RelayStore
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
pool := newidlistpool(50)
|
||||
for _, dir := range []direction{
|
||||
{"up", neg.Haves, store, r},
|
||||
{"down", neg.HaveNots, r, store},
|
||||
} {
|
||||
wg.Add(1)
|
||||
go func(dir direction) {
|
||||
defer wg.Done()
|
||||
|
||||
seen := blobloom.NewOptimized(blobloom.Config{
|
||||
Capacity: 10000,
|
||||
FPRate: 0.01,
|
||||
})
|
||||
|
||||
doSync := func(ids []string) {
|
||||
defer wg.Done()
|
||||
defer pool.giveback(ids)
|
||||
|
||||
if len(ids) == 0 {
|
||||
return
|
||||
}
|
||||
evtch, err := dir.source.QueryEvents(ctx, nostr.Filter{IDs: ids})
|
||||
if err != nil {
|
||||
result <- fmt.Errorf("error querying source on %s: %w", dir.label, err)
|
||||
return
|
||||
}
|
||||
for evt := range evtch {
|
||||
dir.target.Publish(ctx, *evt)
|
||||
}
|
||||
}
|
||||
|
||||
ids := pool.grab()
|
||||
for item := range dir.items {
|
||||
h := xxhash.Sum64([]byte(item))
|
||||
if seen.Has(h) {
|
||||
continue
|
||||
}
|
||||
|
||||
seen.Add(h)
|
||||
ids = append(ids, item)
|
||||
if len(ids) == 50 {
|
||||
wg.Add(1)
|
||||
go doSync(ids)
|
||||
ids = pool.grab()
|
||||
}
|
||||
}
|
||||
wg.Add(1)
|
||||
doSync(ids)
|
||||
}(dir)
|
||||
}
|
||||
|
||||
go func() {
|
||||
wg.Wait()
|
||||
result <- nil
|
||||
}()
|
||||
|
||||
err = <-result
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
Reference in New Issue
Block a user