bring in khatru and eventstore.
This commit is contained in:
3
khatru/.gitignore
vendored
Normal file
3
khatru/.gitignore
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
*.env
|
||||
.idea/
|
||||
knowledge.md
|
||||
141
khatru/README.md
Normal file
141
khatru/README.md
Normal file
@@ -0,0 +1,141 @@
|
||||
# khatru, a relay framework [](https://pkg.go.dev/github.com/fiatjaf/khatru#Relay)
|
||||
|
||||
[](https://github.com/fiatjaf/khatru/actions/workflows/test.yml)
|
||||
[](https://pkg.go.dev/github.com/fiatjaf/khatru)
|
||||
[](https://goreportcard.com/report/github.com/fiatjaf/khatru)
|
||||
|
||||
Khatru makes it easy to write very very custom relays:
|
||||
|
||||
- custom event or filter acceptance policies
|
||||
- custom `AUTH` handlers
|
||||
- custom storage and pluggable databases
|
||||
- custom webpages and other HTTP handlers
|
||||
|
||||
Here's a sample:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/fiatjaf/khatru"
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// create the relay instance
|
||||
relay := khatru.NewRelay()
|
||||
|
||||
// set up some basic properties (will be returned on the NIP-11 endpoint)
|
||||
relay.Info.Name = "my relay"
|
||||
relay.Info.PubKey = "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798"
|
||||
relay.Info.Description = "this is my custom relay"
|
||||
relay.Info.Icon = "https://external-content.duckduckgo.com/iu/?u=https%3A%2F%2Fliquipedia.net%2Fcommons%2Fimages%2F3%2F35%2FSCProbe.jpg&f=1&nofb=1&ipt=0cbbfef25bce41da63d910e86c3c343e6c3b9d63194ca9755351bb7c2efa3359&ipo=images"
|
||||
|
||||
// you must bring your own storage scheme -- if you want to have any
|
||||
store := make(map[string]*nostr.Event, 120)
|
||||
|
||||
// set up the basic relay functions
|
||||
relay.StoreEvent = append(relay.StoreEvent,
|
||||
func(ctx context.Context, event *nostr.Event) error {
|
||||
store[event.ID] = event
|
||||
return nil
|
||||
},
|
||||
)
|
||||
relay.QueryEvents = append(relay.QueryEvents,
|
||||
func(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) {
|
||||
ch := make(chan *nostr.Event)
|
||||
go func() {
|
||||
for _, evt := range store {
|
||||
if filter.Matches(evt) {
|
||||
ch <- evt
|
||||
}
|
||||
}
|
||||
close(ch)
|
||||
}()
|
||||
return ch, nil
|
||||
},
|
||||
)
|
||||
relay.DeleteEvent = append(relay.DeleteEvent,
|
||||
func(ctx context.Context, event *nostr.Event) error {
|
||||
delete(store, event.ID)
|
||||
return nil
|
||||
},
|
||||
)
|
||||
|
||||
// there are many other configurable things you can set
|
||||
relay.RejectEvent = append(relay.RejectEvent,
|
||||
// built-in policies
|
||||
policies.ValidateKind,
|
||||
|
||||
// define your own policies
|
||||
policies.PreventLargeTags(100),
|
||||
func(ctx context.Context, event *nostr.Event) (reject bool, msg string) {
|
||||
if event.PubKey == "fa984bd7dbb282f07e16e7ae87b26a2a7b9b90b7246a44771f0cf5ae58018f52" {
|
||||
return true, "we don't allow this person to write here"
|
||||
}
|
||||
return false, "" // anyone else can
|
||||
},
|
||||
)
|
||||
|
||||
// you can request auth by rejecting an event or a request with the prefix "auth-required: "
|
||||
relay.RejectFilter = append(relay.RejectFilter,
|
||||
// built-in policies
|
||||
policies.NoComplexFilters,
|
||||
|
||||
// define your own policies
|
||||
func(ctx context.Context, filter nostr.Filter) (reject bool, msg string) {
|
||||
if pubkey := khatru.GetAuthed(ctx); pubkey != "" {
|
||||
log.Printf("request from %s\n", pubkey)
|
||||
return false, ""
|
||||
}
|
||||
return true, "auth-required: only authenticated users can read from this relay"
|
||||
// (this will cause an AUTH message to be sent and then a CLOSED message such that clients can
|
||||
// authenticate and then request again)
|
||||
},
|
||||
)
|
||||
// check the docs for more goodies!
|
||||
|
||||
mux := relay.Router()
|
||||
// set up other http handlers
|
||||
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("content-type", "text/html")
|
||||
fmt.Fprintf(w, `<b>welcome</b> to my relay!`)
|
||||
})
|
||||
|
||||
// start the server
|
||||
fmt.Println("running on :3334")
|
||||
http.ListenAndServe(":3334", relay)
|
||||
}
|
||||
```
|
||||
|
||||
### But I don't want to write my own database!
|
||||
|
||||
Fear no more. Using the https://github.com/fiatjaf/eventstore module you get a bunch of compatible databases out of the box and you can just plug them into your relay. For example, [sqlite](https://pkg.go.dev/github.com/fiatjaf/eventstore/sqlite3):
|
||||
|
||||
```go
|
||||
db := sqlite3.SQLite3Backend{DatabaseURL: "/tmp/khatru-sqlite-tmp"}
|
||||
if err := db.Init(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
relay.StoreEvent = append(relay.StoreEvent, db.SaveEvent)
|
||||
relay.QueryEvents = append(relay.QueryEvents, db.QueryEvents)
|
||||
relay.CountEvents = append(relay.CountEvents, db.CountEvents)
|
||||
relay.DeleteEvent = append(relay.DeleteEvent, db.DeleteEvent)
|
||||
relay.ReplaceEvent = append(relay.ReplaceEvent, db.ReplaceEvent)
|
||||
```
|
||||
|
||||
### But I don't want to write a bunch of custom policies!
|
||||
|
||||
Fear no more. We have a bunch of common policies written in the `github.com/fiatjaf/khatru/policies` package and also a handpicked selection of base sane defaults, which you can apply with:
|
||||
|
||||
```go
|
||||
policies.ApplySaneDefaults(relay)
|
||||
```
|
||||
|
||||
Contributions to this are very much welcomed.
|
||||
113
khatru/adding.go
Normal file
113
khatru/adding.go
Normal file
@@ -0,0 +1,113 @@
|
||||
package khatru
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/fiatjaf/eventstore"
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
)
|
||||
|
||||
// AddEvent sends an event through then normal add pipeline, as if it was received from a websocket.
|
||||
func (rl *Relay) AddEvent(ctx context.Context, evt *nostr.Event) (skipBroadcast bool, writeError error) {
|
||||
if evt == nil {
|
||||
return false, errors.New("error: event is nil")
|
||||
}
|
||||
|
||||
if nostr.IsEphemeralKind(evt.Kind) {
|
||||
return false, rl.handleEphemeral(ctx, evt)
|
||||
} else {
|
||||
return rl.handleNormal(ctx, evt)
|
||||
}
|
||||
}
|
||||
|
||||
func (rl *Relay) handleNormal(ctx context.Context, evt *nostr.Event) (skipBroadcast bool, writeError error) {
|
||||
for _, reject := range rl.RejectEvent {
|
||||
if reject, msg := reject(ctx, evt); reject {
|
||||
if msg == "" {
|
||||
return true, errors.New("blocked: no reason")
|
||||
} else {
|
||||
return true, errors.New(nostr.NormalizeOKMessage(msg, "blocked"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// will store
|
||||
// regular kinds are just saved directly
|
||||
if nostr.IsRegularKind(evt.Kind) {
|
||||
for _, store := range rl.StoreEvent {
|
||||
if err := store(ctx, evt); err != nil {
|
||||
switch err {
|
||||
case eventstore.ErrDupEvent:
|
||||
return true, nil
|
||||
default:
|
||||
return false, fmt.Errorf("%s", nostr.NormalizeOKMessage(err.Error(), "error"))
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// otherwise it's a replaceable -- so we'll use the replacer functions if we have any
|
||||
if len(rl.ReplaceEvent) > 0 {
|
||||
for _, repl := range rl.ReplaceEvent {
|
||||
if err := repl(ctx, evt); err != nil {
|
||||
switch err {
|
||||
case eventstore.ErrDupEvent:
|
||||
return true, nil
|
||||
default:
|
||||
return false, fmt.Errorf("%s", nostr.NormalizeOKMessage(err.Error(), "error"))
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// otherwise do it the manual way
|
||||
filter := nostr.Filter{Limit: 1, Kinds: []int{evt.Kind}, Authors: []string{evt.PubKey}}
|
||||
if nostr.IsAddressableKind(evt.Kind) {
|
||||
// when addressable, add the "d" tag to the filter
|
||||
filter.Tags = nostr.TagMap{"d": []string{evt.Tags.GetD()}}
|
||||
}
|
||||
|
||||
// now we fetch old events and delete them
|
||||
shouldStore := true
|
||||
for _, query := range rl.QueryEvents {
|
||||
ch, err := query(ctx, filter)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
for previous := range ch {
|
||||
if isOlder(previous, evt) {
|
||||
for _, del := range rl.DeleteEvent {
|
||||
del(ctx, previous)
|
||||
}
|
||||
} else {
|
||||
// we found a more recent event, so we won't delete it and also will not store this new one
|
||||
shouldStore = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// store
|
||||
if shouldStore {
|
||||
for _, store := range rl.StoreEvent {
|
||||
if saveErr := store(ctx, evt); saveErr != nil {
|
||||
switch saveErr {
|
||||
case eventstore.ErrDupEvent:
|
||||
return true, nil
|
||||
default:
|
||||
return false, fmt.Errorf("%s", nostr.NormalizeOKMessage(saveErr.Error(), "error"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, ons := range rl.OnEventSaved {
|
||||
ons(ctx, evt)
|
||||
}
|
||||
|
||||
// track event expiration if applicable
|
||||
rl.expirationManager.trackEvent(evt)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
45
khatru/blossom/authorization.go
Normal file
45
khatru/blossom/authorization.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package blossom
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/mailru/easyjson"
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
)
|
||||
|
||||
func readAuthorization(r *http.Request) (*nostr.Event, error) {
|
||||
token := r.Header.Get("Authorization")
|
||||
if !strings.HasPrefix(token, "Nostr ") {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
eventj, err := base64.StdEncoding.DecodeString(token[6:])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid base64 token")
|
||||
}
|
||||
var evt nostr.Event
|
||||
if err := easyjson.Unmarshal(eventj, &evt); err != nil {
|
||||
return nil, fmt.Errorf("broken event")
|
||||
}
|
||||
if evt.Kind != 24242 || !evt.CheckID() {
|
||||
return nil, fmt.Errorf("invalid event")
|
||||
}
|
||||
if ok, _ := evt.CheckSignature(); !ok {
|
||||
return nil, fmt.Errorf("invalid signature")
|
||||
}
|
||||
|
||||
expirationTag := evt.Tags.Find("expiration")
|
||||
if expirationTag == nil {
|
||||
return nil, fmt.Errorf("missing \"expiration\" tag")
|
||||
}
|
||||
expiration, _ := strconv.ParseInt(expirationTag[1], 10, 64)
|
||||
if nostr.Timestamp(expiration) < nostr.Now() {
|
||||
return nil, fmt.Errorf("event expired")
|
||||
}
|
||||
|
||||
return &evt, nil
|
||||
}
|
||||
26
khatru/blossom/blob.go
Normal file
26
khatru/blossom/blob.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package blossom
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
)
|
||||
|
||||
type BlobDescriptor struct {
|
||||
URL string `json:"url"`
|
||||
SHA256 string `json:"sha256"`
|
||||
Size int `json:"size"`
|
||||
Type string `json:"type"`
|
||||
Uploaded nostr.Timestamp `json:"uploaded"`
|
||||
|
||||
Owner string `json:"-"`
|
||||
}
|
||||
|
||||
type BlobIndex interface {
|
||||
Keep(ctx context.Context, blob BlobDescriptor, pubkey string) error
|
||||
List(ctx context.Context, pubkey string) (chan BlobDescriptor, error)
|
||||
Get(ctx context.Context, sha256 string) (*BlobDescriptor, error)
|
||||
Delete(ctx context.Context, sha256 string, pubkey string) error
|
||||
}
|
||||
|
||||
var _ BlobIndex = (*EventStoreBlobIndexWrapper)(nil)
|
||||
104
khatru/blossom/eventstorewrapper.go
Normal file
104
khatru/blossom/eventstorewrapper.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package blossom
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/fiatjaf/eventstore"
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
)
|
||||
|
||||
// EventStoreBlobIndexWrapper uses fake events to keep track of what blobs we have stored and who owns them
|
||||
type EventStoreBlobIndexWrapper struct {
|
||||
eventstore.Store
|
||||
|
||||
ServiceURL string
|
||||
}
|
||||
|
||||
func (es EventStoreBlobIndexWrapper) Keep(ctx context.Context, blob BlobDescriptor, pubkey string) error {
|
||||
ch, err := es.Store.QueryEvents(ctx, nostr.Filter{Authors: []string{pubkey}, Kinds: []int{24242}, Tags: nostr.TagMap{"x": []string{blob.SHA256}}})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if <-ch == nil {
|
||||
// doesn't exist, save
|
||||
evt := &nostr.Event{
|
||||
PubKey: pubkey,
|
||||
Kind: 24242,
|
||||
Tags: nostr.Tags{
|
||||
{"x", blob.SHA256},
|
||||
{"type", blob.Type},
|
||||
{"size", strconv.Itoa(blob.Size)},
|
||||
},
|
||||
CreatedAt: blob.Uploaded,
|
||||
}
|
||||
evt.ID = evt.GetID()
|
||||
es.Store.SaveEvent(ctx, evt)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (es EventStoreBlobIndexWrapper) List(ctx context.Context, pubkey string) (chan BlobDescriptor, error) {
|
||||
ech, err := es.Store.QueryEvents(ctx, nostr.Filter{Authors: []string{pubkey}, Kinds: []int{24242}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ch := make(chan BlobDescriptor)
|
||||
|
||||
go func() {
|
||||
for evt := range ech {
|
||||
ch <- es.parseEvent(evt)
|
||||
}
|
||||
close(ch)
|
||||
}()
|
||||
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
func (es EventStoreBlobIndexWrapper) Get(ctx context.Context, sha256 string) (*BlobDescriptor, error) {
|
||||
ech, err := es.Store.QueryEvents(ctx, nostr.Filter{Tags: nostr.TagMap{"x": []string{sha256}}, Kinds: []int{24242}, Limit: 1})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
evt := <-ech
|
||||
if evt != nil {
|
||||
bd := es.parseEvent(evt)
|
||||
return &bd, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (es EventStoreBlobIndexWrapper) Delete(ctx context.Context, sha256 string, pubkey string) error {
|
||||
ech, err := es.Store.QueryEvents(ctx, nostr.Filter{Authors: []string{pubkey}, Tags: nostr.TagMap{"x": []string{sha256}}, Kinds: []int{24242}, Limit: 1})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
evt := <-ech
|
||||
if evt != nil {
|
||||
return es.Store.DeleteEvent(ctx, evt)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (es EventStoreBlobIndexWrapper) parseEvent(evt *nostr.Event) BlobDescriptor {
|
||||
hhash := evt.Tags[0][1]
|
||||
mimetype := evt.Tags[1][1]
|
||||
ext := getExtension(mimetype)
|
||||
size, _ := strconv.Atoi(evt.Tags[2][1])
|
||||
|
||||
return BlobDescriptor{
|
||||
Owner: evt.PubKey,
|
||||
Uploaded: evt.CreatedAt,
|
||||
URL: es.ServiceURL + "/" + hhash + ext,
|
||||
SHA256: hhash,
|
||||
Type: mimetype,
|
||||
Size: size,
|
||||
}
|
||||
}
|
||||
367
khatru/blossom/handlers.go
Normal file
367
khatru/blossom/handlers.go
Normal file
@@ -0,0 +1,367 @@
|
||||
package blossom
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/liamg/magic"
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
)
|
||||
|
||||
func (bs BlossomServer) handleUploadCheck(w http.ResponseWriter, r *http.Request) {
|
||||
auth, err := readAuthorization(r)
|
||||
if err != nil {
|
||||
blossomError(w, err.Error(), 400)
|
||||
return
|
||||
}
|
||||
if auth == nil {
|
||||
blossomError(w, "missing \"Authorization\" header", 401)
|
||||
return
|
||||
}
|
||||
if auth.Tags.FindWithValue("t", "upload") == nil {
|
||||
blossomError(w, "invalid \"Authorization\" event \"t\" tag", 403)
|
||||
return
|
||||
}
|
||||
|
||||
mimetype := r.Header.Get("X-Content-Type")
|
||||
exts, _ := mime.ExtensionsByType(mimetype)
|
||||
var ext string
|
||||
if len(exts) > 0 {
|
||||
ext = exts[0]
|
||||
}
|
||||
|
||||
// get the file size from the incoming header
|
||||
size, _ := strconv.Atoi(r.Header.Get("X-Content-Length"))
|
||||
|
||||
for _, rb := range bs.RejectUpload {
|
||||
reject, reason, code := rb(r.Context(), auth, size, ext)
|
||||
if reject {
|
||||
blossomError(w, reason, code)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bs BlossomServer) handleUpload(w http.ResponseWriter, r *http.Request) {
|
||||
auth, err := readAuthorization(r)
|
||||
if err != nil {
|
||||
blossomError(w, "invalid \"Authorization\": "+err.Error(), 404)
|
||||
return
|
||||
}
|
||||
if auth == nil {
|
||||
blossomError(w, "missing \"Authorization\" header", 401)
|
||||
return
|
||||
}
|
||||
if auth.Tags.FindWithValue("t", "upload") == nil {
|
||||
blossomError(w, "invalid \"Authorization\" event \"t\" tag", 403)
|
||||
return
|
||||
}
|
||||
|
||||
// get the file size from the incoming header
|
||||
size, _ := strconv.Atoi(r.Header.Get("Content-Length"))
|
||||
if size == 0 {
|
||||
blossomError(w, "missing \"Content-Length\" header", 400)
|
||||
return
|
||||
}
|
||||
|
||||
// read first bytes of upload so we can find out the filetype
|
||||
b := make([]byte, min(50, size), size)
|
||||
if n, err := r.Body.Read(b); err != nil && n != size {
|
||||
blossomError(w, "failed to read initial bytes of upload body: "+err.Error(), 400)
|
||||
return
|
||||
}
|
||||
var ext string
|
||||
if ft, _ := magic.Lookup(b); ft != nil {
|
||||
ext = "." + ft.Extension
|
||||
} else {
|
||||
// if we can't find, use the filetype given by the upload header
|
||||
mimetype := r.Header.Get("Content-Type")
|
||||
ext = getExtension(mimetype)
|
||||
}
|
||||
|
||||
// run the reject hooks
|
||||
for _, ru := range bs.RejectUpload {
|
||||
reject, reason, code := ru(r.Context(), auth, size, ext)
|
||||
if reject {
|
||||
blossomError(w, reason, code)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// if it passes then we have to read the entire thing into memory so we can compute the sha256
|
||||
for {
|
||||
var n int
|
||||
n, err = r.Body.Read(b[len(b):cap(b)])
|
||||
b = b[:len(b)+n]
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
break
|
||||
}
|
||||
if len(b) == cap(b) {
|
||||
// add more capacity (let append pick how much)
|
||||
// if Content-Length was correct we shouldn't reach this
|
||||
b = append(b, 0)[:len(b)]
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
blossomError(w, "failed to read upload body: "+err.Error(), 400)
|
||||
return
|
||||
}
|
||||
|
||||
hash := sha256.Sum256(b)
|
||||
hhash := hex.EncodeToString(hash[:])
|
||||
|
||||
// keep track of the blob descriptor
|
||||
bd := BlobDescriptor{
|
||||
URL: bs.ServiceURL + "/" + hhash + ext,
|
||||
SHA256: hhash,
|
||||
Size: len(b),
|
||||
Type: mime.TypeByExtension(ext),
|
||||
Uploaded: nostr.Now(),
|
||||
}
|
||||
if err := bs.Store.Keep(r.Context(), bd, auth.PubKey); err != nil {
|
||||
blossomError(w, "failed to save event: "+err.Error(), 400)
|
||||
return
|
||||
}
|
||||
|
||||
// save actual blob
|
||||
for _, sb := range bs.StoreBlob {
|
||||
if err := sb(r.Context(), hhash, b); err != nil {
|
||||
blossomError(w, "failed to save: "+err.Error(), 500)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// return response
|
||||
json.NewEncoder(w).Encode(bd)
|
||||
}
|
||||
|
||||
func (bs BlossomServer) handleGetBlob(w http.ResponseWriter, r *http.Request) {
|
||||
spl := strings.SplitN(r.URL.Path, ".", 2)
|
||||
hhash := spl[0]
|
||||
if len(hhash) != 65 {
|
||||
blossomError(w, "invalid /<sha256>[.ext] path", 400)
|
||||
return
|
||||
}
|
||||
hhash = hhash[1:]
|
||||
|
||||
// check for an authorization tag, if any
|
||||
auth, err := readAuthorization(r)
|
||||
if err != nil {
|
||||
blossomError(w, err.Error(), 400)
|
||||
return
|
||||
}
|
||||
|
||||
// if there is one, we check if it has the extra requirements
|
||||
if auth != nil {
|
||||
if auth.Tags.FindWithValue("t", "get") == nil {
|
||||
blossomError(w, "invalid \"Authorization\" event \"t\" tag", 403)
|
||||
return
|
||||
}
|
||||
|
||||
if auth.Tags.FindWithValue("x", hhash) == nil &&
|
||||
auth.Tags.FindWithValue("server", bs.ServiceURL) == nil {
|
||||
blossomError(w, "invalid \"Authorization\" event \"x\" or \"server\" tag", 403)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for _, rg := range bs.RejectGet {
|
||||
reject, reason, code := rg(r.Context(), auth, hhash)
|
||||
if reject {
|
||||
blossomError(w, reason, code)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var ext string
|
||||
if len(spl) == 2 {
|
||||
ext = "." + spl[1]
|
||||
}
|
||||
|
||||
for _, lb := range bs.LoadBlob {
|
||||
reader, _ := lb(r.Context(), hhash)
|
||||
if reader != nil {
|
||||
// use unix epoch as the time if we can't find the descriptor
|
||||
// as described in the http.ServeContent documentation
|
||||
t := time.Unix(0, 0)
|
||||
descriptor, err := bs.Store.Get(r.Context(), hhash)
|
||||
if err == nil && descriptor != nil {
|
||||
t = descriptor.Uploaded.Time()
|
||||
}
|
||||
w.Header().Set("ETag", hhash)
|
||||
w.Header().Set("Cache-Control", "public, max-age=604800, immutable")
|
||||
http.ServeContent(w, r, hhash+ext, t, reader)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
blossomError(w, "file not found", 404)
|
||||
}
|
||||
|
||||
func (bs BlossomServer) handleHasBlob(w http.ResponseWriter, r *http.Request) {
|
||||
spl := strings.SplitN(r.URL.Path, ".", 2)
|
||||
hhash := spl[0]
|
||||
if len(hhash) != 65 {
|
||||
blossomError(w, "invalid /<sha256>[.ext] path", 400)
|
||||
return
|
||||
}
|
||||
hhash = hhash[1:]
|
||||
|
||||
bd, err := bs.Store.Get(r.Context(), hhash)
|
||||
if err != nil {
|
||||
blossomError(w, "failed to query: "+err.Error(), 500)
|
||||
return
|
||||
}
|
||||
|
||||
if bd == nil {
|
||||
blossomError(w, "file not found", 404)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (bs BlossomServer) handleList(w http.ResponseWriter, r *http.Request) {
|
||||
// check for an authorization tag, if any
|
||||
auth, err := readAuthorization(r)
|
||||
if err != nil {
|
||||
blossomError(w, err.Error(), 400)
|
||||
return
|
||||
}
|
||||
|
||||
// if there is one, we check if it has the extra requirements
|
||||
if auth != nil {
|
||||
if auth.Tags.FindWithValue("t", "list") == nil {
|
||||
blossomError(w, "invalid \"Authorization\" event \"t\" tag", 403)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
pubkey := r.URL.Path[6:]
|
||||
|
||||
for _, rl := range bs.RejectList {
|
||||
reject, reason, code := rl(r.Context(), auth, pubkey)
|
||||
if reject {
|
||||
blossomError(w, reason, code)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
ch, err := bs.Store.List(r.Context(), pubkey)
|
||||
if err != nil {
|
||||
blossomError(w, "failed to query: "+err.Error(), 500)
|
||||
return
|
||||
}
|
||||
|
||||
w.Write([]byte{'['})
|
||||
enc := json.NewEncoder(w)
|
||||
first := true
|
||||
for bd := range ch {
|
||||
if !first {
|
||||
w.Write([]byte{','})
|
||||
} else {
|
||||
first = false
|
||||
}
|
||||
enc.Encode(bd)
|
||||
}
|
||||
w.Write([]byte{']'})
|
||||
}
|
||||
|
||||
func (bs BlossomServer) handleDelete(w http.ResponseWriter, r *http.Request) {
|
||||
auth, err := readAuthorization(r)
|
||||
if err != nil {
|
||||
blossomError(w, err.Error(), 400)
|
||||
return
|
||||
}
|
||||
|
||||
if auth != nil {
|
||||
if auth.Tags.FindWithValue("t", "delete") == nil {
|
||||
blossomError(w, "invalid \"Authorization\" event \"t\" tag", 403)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
spl := strings.SplitN(r.URL.Path, ".", 2)
|
||||
hhash := spl[0]
|
||||
if len(hhash) != 65 {
|
||||
blossomError(w, "invalid /<sha256>[.ext] path", 400)
|
||||
return
|
||||
}
|
||||
hhash = hhash[1:]
|
||||
if auth.Tags.FindWithValue("x", hhash) == nil &&
|
||||
auth.Tags.FindWithValue("server", bs.ServiceURL) == nil {
|
||||
blossomError(w, "invalid \"Authorization\" event \"x\" or \"server\" tag", 403)
|
||||
return
|
||||
}
|
||||
|
||||
// should we accept this delete?
|
||||
for _, rd := range bs.RejectDelete {
|
||||
reject, reason, code := rd(r.Context(), auth, hhash)
|
||||
if reject {
|
||||
blossomError(w, reason, code)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// delete the entry that links this blob to this author
|
||||
if err := bs.Store.Delete(r.Context(), hhash, auth.PubKey); err != nil {
|
||||
blossomError(w, "delete of blob entry failed: "+err.Error(), 500)
|
||||
return
|
||||
}
|
||||
|
||||
// we will actually only delete the file if no one else owns it
|
||||
if bd, err := bs.Store.Get(r.Context(), hhash); err == nil && bd == nil {
|
||||
for _, del := range bs.DeleteBlob {
|
||||
if err := del(r.Context(), hhash); err != nil {
|
||||
blossomError(w, "failed to delete blob: "+err.Error(), 500)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bs BlossomServer) handleReport(w http.ResponseWriter, r *http.Request) {
|
||||
var body []byte
|
||||
_, err := r.Body.Read(body)
|
||||
if err != nil {
|
||||
blossomError(w, "can't read request body", 400)
|
||||
return
|
||||
}
|
||||
|
||||
var evt *nostr.Event
|
||||
if err := json.Unmarshal(body, evt); err != nil {
|
||||
blossomError(w, "can't parse event", 400)
|
||||
return
|
||||
}
|
||||
|
||||
if isValid, _ := evt.CheckSignature(); !isValid {
|
||||
blossomError(w, "invalid report event is provided", 400)
|
||||
return
|
||||
}
|
||||
|
||||
if evt.Kind != nostr.KindReporting {
|
||||
blossomError(w, "invalid report event is provided", 400)
|
||||
return
|
||||
}
|
||||
|
||||
for _, rr := range bs.ReceiveReport {
|
||||
if err := rr(r.Context(), evt); err != nil {
|
||||
blossomError(w, "failed to receive report: "+err.Error(), 500)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bs BlossomServer) handleMirror(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func (bs BlossomServer) handleNegentropy(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
78
khatru/blossom/server.go
Normal file
78
khatru/blossom/server.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package blossom
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/fiatjaf/khatru"
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
)
|
||||
|
||||
type BlossomServer struct {
|
||||
ServiceURL string
|
||||
Store BlobIndex
|
||||
|
||||
StoreBlob []func(ctx context.Context, sha256 string, body []byte) error
|
||||
LoadBlob []func(ctx context.Context, sha256 string) (io.ReadSeeker, error)
|
||||
DeleteBlob []func(ctx context.Context, sha256 string) error
|
||||
ReceiveReport []func(ctx context.Context, reportEvt *nostr.Event) error
|
||||
|
||||
RejectUpload []func(ctx context.Context, auth *nostr.Event, size int, ext string) (bool, string, int)
|
||||
RejectGet []func(ctx context.Context, auth *nostr.Event, sha256 string) (bool, string, int)
|
||||
RejectList []func(ctx context.Context, auth *nostr.Event, pubkey string) (bool, string, int)
|
||||
RejectDelete []func(ctx context.Context, auth *nostr.Event, sha256 string) (bool, string, int)
|
||||
}
|
||||
|
||||
func New(rl *khatru.Relay, serviceURL string) *BlossomServer {
|
||||
bs := &BlossomServer{
|
||||
ServiceURL: serviceURL,
|
||||
}
|
||||
|
||||
base := rl.Router()
|
||||
mux := http.NewServeMux()
|
||||
|
||||
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/upload" {
|
||||
if r.Method == "PUT" {
|
||||
bs.handleUpload(w, r)
|
||||
return
|
||||
} else if r.Method == "HEAD" {
|
||||
bs.handleUploadCheck(w, r)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if strings.HasPrefix(r.URL.Path, "/list/") && r.Method == "GET" {
|
||||
bs.handleList(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
if (len(r.URL.Path) == 65 || strings.Index(r.URL.Path, ".") == 65) && strings.Index(r.URL.Path[1:], "/") == -1 {
|
||||
if r.Method == "HEAD" {
|
||||
bs.handleHasBlob(w, r)
|
||||
return
|
||||
} else if r.Method == "GET" {
|
||||
bs.handleGetBlob(w, r)
|
||||
return
|
||||
} else if r.Method == "DELETE" {
|
||||
bs.handleDelete(w, r)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if r.URL.Path == "/report" {
|
||||
if r.Method == "PUT" {
|
||||
bs.handleReport(w, r)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
base.ServeHTTP(w, r)
|
||||
})
|
||||
|
||||
rl.SetRouter(mux)
|
||||
|
||||
return bs
|
||||
}
|
||||
37
khatru/blossom/utils.go
Normal file
37
khatru/blossom/utils.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package blossom
|
||||
|
||||
import (
|
||||
"mime"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func blossomError(w http.ResponseWriter, msg string, code int) {
|
||||
w.Header().Add("X-Reason", msg)
|
||||
w.WriteHeader(code)
|
||||
}
|
||||
|
||||
func getExtension(mimetype string) string {
|
||||
if mimetype == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
switch mimetype {
|
||||
case "image/jpeg":
|
||||
return ".jpg"
|
||||
case "image/gif":
|
||||
return ".gif"
|
||||
case "image/png":
|
||||
return ".png"
|
||||
case "image/webp":
|
||||
return ".webp"
|
||||
case "video/mp4":
|
||||
return ".mp4"
|
||||
}
|
||||
|
||||
exts, _ := mime.ExtensionsByType(mimetype)
|
||||
if len(exts) > 0 {
|
||||
return exts[0]
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
11
khatru/broadcasting.go
Normal file
11
khatru/broadcasting.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package khatru
|
||||
|
||||
import (
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
)
|
||||
|
||||
// BroadcastEvent emits an event to all listeners whose filters' match, skipping all filters and actions
|
||||
// it also doesn't attempt to store the event or trigger any reactions or callbacks
|
||||
func (rl *Relay) BroadcastEvent(evt *nostr.Event) int {
|
||||
return rl.notifyListeners(evt)
|
||||
}
|
||||
85
khatru/deleting.go
Normal file
85
khatru/deleting.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package khatru
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
)
|
||||
|
||||
func (rl *Relay) handleDeleteRequest(ctx context.Context, evt *nostr.Event) error {
|
||||
// event deletion -- nip09
|
||||
for _, tag := range evt.Tags {
|
||||
if len(tag) >= 2 {
|
||||
var f nostr.Filter
|
||||
|
||||
switch tag[0] {
|
||||
case "e":
|
||||
f = nostr.Filter{IDs: []string{tag[1]}}
|
||||
case "a":
|
||||
spl := strings.Split(tag[1], ":")
|
||||
if len(spl) != 3 {
|
||||
continue
|
||||
}
|
||||
kind, err := strconv.Atoi(spl[0])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
author := spl[1]
|
||||
identifier := spl[2]
|
||||
f = nostr.Filter{
|
||||
Kinds: []int{kind},
|
||||
Authors: []string{author},
|
||||
Tags: nostr.TagMap{"d": []string{identifier}},
|
||||
Until: &evt.CreatedAt,
|
||||
}
|
||||
default:
|
||||
continue
|
||||
}
|
||||
|
||||
ctx := context.WithValue(ctx, internalCallKey, struct{}{})
|
||||
for _, query := range rl.QueryEvents {
|
||||
ch, err := query(ctx, f)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
target := <-ch
|
||||
if target == nil {
|
||||
continue
|
||||
}
|
||||
// got the event, now check if the user can delete it
|
||||
acceptDeletion := target.PubKey == evt.PubKey
|
||||
var msg string
|
||||
if !acceptDeletion {
|
||||
msg = "you are not the author of this event"
|
||||
}
|
||||
// but if we have a function to overwrite this outcome, use that instead
|
||||
for _, odo := range rl.OverwriteDeletionOutcome {
|
||||
acceptDeletion, msg = odo(ctx, target, evt)
|
||||
}
|
||||
|
||||
if acceptDeletion {
|
||||
// delete it
|
||||
for _, del := range rl.DeleteEvent {
|
||||
if err := del(ctx, target); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// if it was tracked to be expired that is not needed anymore
|
||||
rl.expirationManager.removeEvent(target.ID)
|
||||
} else {
|
||||
// fail and stop here
|
||||
return fmt.Errorf("blocked: %s", msg)
|
||||
}
|
||||
|
||||
// don't try to query this same event again
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
1
khatru/docs/.gitignore
vendored
Normal file
1
khatru/docs/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
node_modules
|
||||
9
khatru/docs/.prettierrc.yaml
Normal file
9
khatru/docs/.prettierrc.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
semi: false
|
||||
arrowParens: avoid
|
||||
insertPragma: false
|
||||
printWidth: 80
|
||||
proseWrap: preserve
|
||||
singleQuote: true
|
||||
trailingComma: none
|
||||
useTabs: false
|
||||
bracketSpacing: false
|
||||
2
khatru/docs/.vitepress/.gitignore
vendored
Normal file
2
khatru/docs/.vitepress/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
cache
|
||||
dist
|
||||
42
khatru/docs/.vitepress/config.js
Normal file
42
khatru/docs/.vitepress/config.js
Normal file
@@ -0,0 +1,42 @@
|
||||
export default {
|
||||
lang: 'en-US',
|
||||
title: 'khatru',
|
||||
description: 'a framework for making Nostr relays',
|
||||
themeConfig: {
|
||||
logo: '/logo.png',
|
||||
nav: [
|
||||
{text: 'Home', link: '/'},
|
||||
{text: 'Why', link: '/why'},
|
||||
{text: 'Docs', link: '/getting-started'},
|
||||
{text: 'Source', link: 'https://github.com/fiatjaf/khatru'}
|
||||
],
|
||||
sidebar: [
|
||||
{
|
||||
text: 'Core Concepts',
|
||||
items: [
|
||||
{ text: 'Event Storage', link: '/core/eventstore' },
|
||||
{ text: 'Authentication', link: '/core/auth' },
|
||||
{ text: 'HTTP Integration', link: '/core/embed' },
|
||||
{ text: 'Request Routing', link: '/core/routing' },
|
||||
{ text: 'Management API', link: '/core/management' },
|
||||
{ text: 'Media Storage (Blossom)', link: '/core/blossom' },
|
||||
]
|
||||
},
|
||||
{
|
||||
text: 'Cookbook',
|
||||
items: [
|
||||
{ text: 'Search', link: '/cookbook/search' },
|
||||
{ text: 'Dynamic Relays', link: '/cookbook/dynamic' },
|
||||
{ text: 'Generating Events Live', link: '/cookbook/custom-live-events' },
|
||||
{ text: 'Custom Stores', link: '/cookbook/custom-stores' },
|
||||
{ text: 'Using something like Google Drive', link: '/cookbook/google-drive' },
|
||||
]
|
||||
}
|
||||
],
|
||||
editLink: {
|
||||
pattern: 'https://github.com/fiatjaf/khatru/edit/master/docs/:path'
|
||||
}
|
||||
},
|
||||
head: [['link', {rel: 'icon', href: '/logo.png'}]],
|
||||
cleanUrls: true
|
||||
}
|
||||
11
khatru/docs/.vitepress/theme/Layout.vue
Normal file
11
khatru/docs/.vitepress/theme/Layout.vue
Normal file
@@ -0,0 +1,11 @@
|
||||
<script setup>
|
||||
import DefaultTheme from 'vitepress/theme'
|
||||
const {Layout} = DefaultTheme
|
||||
</script>
|
||||
<template>
|
||||
<Layout>
|
||||
<template #layout-bottom>
|
||||
<div class="khatru-layout-bottom">~</div>
|
||||
</template>
|
||||
</Layout>
|
||||
</template>
|
||||
24
khatru/docs/.vitepress/theme/custom.css
Normal file
24
khatru/docs/.vitepress/theme/custom.css
Normal file
@@ -0,0 +1,24 @@
|
||||
:root {
|
||||
--vp-c-brand-1: #2eafab;
|
||||
--vp-c-brand-2: #30373b;
|
||||
--vp-c-brand-3: #3b6a3e;
|
||||
--vp-button-brand-bg: #2eafab;
|
||||
--vp-button-brand-hover-bg: #3b6a3e;
|
||||
--vp-button-brand-active-bg: #30373b;
|
||||
|
||||
--vp-c-bg: #f2e6e2;
|
||||
--vp-c-bg-soft: #f3f2f0;
|
||||
}
|
||||
|
||||
.dark {
|
||||
--vp-c-bg: #0a0a08;
|
||||
--vp-c-bg-soft: #161a0e;
|
||||
}
|
||||
|
||||
.khatru-layout-bottom {
|
||||
margin: 2rem auto;
|
||||
width: 200px;
|
||||
text-align: center;
|
||||
font-family: monospace;
|
||||
font-size: 2rem;
|
||||
}
|
||||
8
khatru/docs/.vitepress/theme/index.mjs
Normal file
8
khatru/docs/.vitepress/theme/index.mjs
Normal file
@@ -0,0 +1,8 @@
|
||||
import DefaultTheme from 'vitepress/theme'
|
||||
import NostrifyLayout from './Layout.vue'
|
||||
import './custom.css'
|
||||
|
||||
export default {
|
||||
extends: DefaultTheme,
|
||||
Layout: NostrifyLayout
|
||||
}
|
||||
1
khatru/docs/config.js
Symbolic link
1
khatru/docs/config.js
Symbolic link
@@ -0,0 +1 @@
|
||||
.vitepress/config.js
|
||||
64
khatru/docs/cookbook/custom-live-events.md
Normal file
64
khatru/docs/cookbook/custom-live-events.md
Normal file
@@ -0,0 +1,64 @@
|
||||
---
|
||||
outline: deep
|
||||
---
|
||||
|
||||
# Generating custom live events
|
||||
|
||||
Suppose you want to generate a new event every time a goal is scored on some soccer game and send that to all clients subscribed to a given game according to a tag `t`.
|
||||
|
||||
We'll assume you'll be polling some HTTP API that gives you the game's current score, and that in your `main` function you'll start the function that does the polling:
|
||||
|
||||
```go
|
||||
func main () {
|
||||
// other stuff here
|
||||
relay := khatru.NewRelay()
|
||||
|
||||
go startPollingGame(relay)
|
||||
// other stuff here
|
||||
}
|
||||
|
||||
type GameStatus struct {
|
||||
TeamA int `json:"team_a"`
|
||||
TeamB int `json:"team_b"`
|
||||
}
|
||||
|
||||
func startPollingGame(relay *khatru.Relay) {
|
||||
current := GameStatus{0, 0}
|
||||
|
||||
for {
|
||||
newStatus, err := fetchGameStatus()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if newStatus.TeamA > current.TeamA {
|
||||
// team A has scored a goal, here we generate an event
|
||||
evt := nostr.Event{
|
||||
CreatedAt: nostr.Now(),
|
||||
Kind: 1,
|
||||
Content: "team A has scored!",
|
||||
Tags: nostr.Tags{{"t", "this-game"}}
|
||||
}
|
||||
evt.Sign(global.RelayPrivateKey)
|
||||
// calling BroadcastEvent will send the event to everybody who has been listening for tag "t=[this-game]"
|
||||
// there is no need to do any code to keep track of these clients or who is listening to what, khatru
|
||||
// does that already in the background automatically
|
||||
relay.BroadcastEvent(evt)
|
||||
|
||||
// just calling BroadcastEvent won't cause this event to be be stored,
|
||||
// if for any reason you want to store these events you must call the store functions manually
|
||||
for _, store := range relay.StoreEvent {
|
||||
store(context.TODO(), evt)
|
||||
}
|
||||
}
|
||||
if newStatus.TeamB > current.TeamB {
|
||||
// same here, if team B has scored a goal
|
||||
// ...
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func fetchGameStatus() (GameStatus, error) {
|
||||
// implementation of calling some external API goes here
|
||||
}
|
||||
```
|
||||
88
khatru/docs/cookbook/custom-stores.md
Normal file
88
khatru/docs/cookbook/custom-stores.md
Normal file
@@ -0,0 +1,88 @@
|
||||
---
|
||||
outline: deep
|
||||
---
|
||||
|
||||
# Generating events on the fly from a non-Nostr data-source
|
||||
|
||||
Suppose you want to serve events with the weather data for periods in the past. All you have is a big CSV file with the data.
|
||||
|
||||
Then you get a query like `{"#g": ["d6nvp"], "since": 1664074800, "until": 1666666800, "kind": 10774}`, imagine for a while that kind `10774` means weather data.
|
||||
|
||||
First you do some geohashing calculation to discover that `d6nvp` corresponds to Willemstad, Curaçao, then you query your XML file for the Curaçao weather data for the given period -- from `2022-09-25` to `2022-10-25`, then you return the events corresponding to such query, signed on the fly:
|
||||
|
||||
```go
|
||||
func main () {
|
||||
// other stuff here
|
||||
relay := khatru.NewRelay()
|
||||
|
||||
relay.QueryEvents = append(relay.QueryEvents,
|
||||
handleWeatherQuery,
|
||||
)
|
||||
// other stuff here
|
||||
}
|
||||
|
||||
func handleWeatherQuery(ctx context.Context, filter nostr.Filter) (ch chan *nostr.Event, err error) {
|
||||
if filter.Kind != 10774 {
|
||||
// this function only handles kind 10774, if the query is for something else we return
|
||||
// a nil channel, which corresponds to no results
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
file, err := os.Open("weatherdata.xml")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("we have lost our file: %w", err)
|
||||
}
|
||||
|
||||
// QueryEvents functions are expected to return a channel
|
||||
ch := make(chan *nostr.Event)
|
||||
|
||||
// and they can do their query asynchronously, emitting events to the channel as they come
|
||||
go func () {
|
||||
defer file.Close()
|
||||
|
||||
// we're going to do this for each tag in the filter
|
||||
gTags, _ := filter.Tags["g"]
|
||||
for _, gTag := range gTags {
|
||||
// translate geohash into city name
|
||||
citName, err := geohashToCityName(gTag)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
reader := csv.NewReader(file)
|
||||
for {
|
||||
record, err := reader.Read()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// ensure we're only getting records for Willemstad
|
||||
if cityName != record[0] {
|
||||
continue
|
||||
}
|
||||
|
||||
date, _ := time.Parse("2006-01-02", record[1])
|
||||
ts := nostr.Timestamp(date.Unix())
|
||||
if ts > filter.Since && ts < filter.Until {
|
||||
// we found a record that matches the filter, so we make
|
||||
// an event on the fly and return it
|
||||
evt := nostr.Event{
|
||||
CreatedAt: ts,
|
||||
Kind: 10774,
|
||||
Tags: nostr.Tags{
|
||||
{"temperature", record[2]},
|
||||
{"condition", record[3]},
|
||||
}
|
||||
}
|
||||
evt.Sign(global.RelayPrivateKey)
|
||||
ch <- evt
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return ch, nil
|
||||
}
|
||||
```
|
||||
|
||||
Beware, the code above is inefficient and the entire approach is not very smart, it's meant just as an example.
|
||||
58
khatru/docs/cookbook/dynamic.md
Normal file
58
khatru/docs/cookbook/dynamic.md
Normal file
@@ -0,0 +1,58 @@
|
||||
---
|
||||
outline: deep
|
||||
---
|
||||
|
||||
# Generating `khatru` relays dynamically and serving them from the same path
|
||||
|
||||
Suppose you want to expose a different relay interface depending on the subdomain that is accessed. I don't know, maybe you want to serve just events with pictures on `pictures.example.com` and just events with audio files on `audios.example.com`; maybe you want just events in English on `en.example.com` and just examples in Portuguese on `pt.example.com`, there are many possibilities.
|
||||
|
||||
You could achieve that with a scheme like the following
|
||||
|
||||
```go
|
||||
var topLevelHost = "example.com"
|
||||
var mainRelay = khatru.NewRelay() // we're omitting all the configuration steps for brevity
|
||||
var subRelays = xsync.NewMapOf[string, *khatru.Relay]()
|
||||
|
||||
func main () {
|
||||
handler := http.HandlerFunc(dynamicRelayHandler)
|
||||
|
||||
log.Printf("listening at http://0.0.0.0:8080")
|
||||
http.ListenAndServe("0.0.0.0:8080", handler)
|
||||
}
|
||||
|
||||
func dynamicRelayHandler(w http.ResponseWriter, r *http.Request) {
|
||||
var relay *khatru.Relay
|
||||
subdomain := r.Host[0 : len(topLevelHost)-len(topLevelHost)]
|
||||
if subdomain == "" {
|
||||
// no subdomain, use the main top-level relay
|
||||
relay = mainRelay
|
||||
} else {
|
||||
// call on subdomain, so get a dynamic relay
|
||||
subdomain = subdomain[0 : len(subdomain)-1] // remove dangling "."
|
||||
// get a dynamic relay
|
||||
relay, _ = subRelays.LoadOrCompute(subdomain, func () *khatru.Relay {
|
||||
return makeNewRelay(subdomain)
|
||||
})
|
||||
}
|
||||
|
||||
relay.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
func makeNewRelay (subdomain string) *khatru.Relay {
|
||||
// somehow use the subdomain to generate a relay with specific configurations
|
||||
relay := khatru.NewRelay()
|
||||
switch subdomain {
|
||||
case "pictures":
|
||||
// relay configuration shenanigans go here
|
||||
case "audios":
|
||||
// relay configuration shenanigans go here
|
||||
case "en":
|
||||
// relay configuration shenanigans go here
|
||||
case "pt":
|
||||
// relay configuration shenanigans go here
|
||||
}
|
||||
return relay
|
||||
}
|
||||
```
|
||||
|
||||
In practice you could come up with a way that allows all these dynamic relays to share a common underlying datastore, but this is out of the scope of this example.
|
||||
67
khatru/docs/cookbook/google-drive.md
Normal file
67
khatru/docs/cookbook/google-drive.md
Normal file
@@ -0,0 +1,67 @@
|
||||
---
|
||||
outline: deep
|
||||
---
|
||||
|
||||
## Querying events from Google Drive
|
||||
|
||||
Suppose you have a bunch of events stored in text files on Google Drive and you want to serve them as a relay. You could just store each event as a separate file and use the native Google Drive search to match the queries when serving requests. It would probably not be as fast as using local database, but it would work.
|
||||
|
||||
```go
|
||||
func main () {
|
||||
// other stuff here
|
||||
relay := khatru.NewRelay()
|
||||
|
||||
relay.StoreEvent = append(relay.StoreEvent, handleEvent)
|
||||
relay.QueryEvents = append(relay.QueryEvents, handleQuery)
|
||||
// other stuff here
|
||||
}
|
||||
|
||||
func handleEvent(ctx context.Context, event *nostr.Event) error {
|
||||
// store each event as a file on google drive
|
||||
_, err := gdriveService.Files.Create(googledrive.CreateOptions{
|
||||
Name: event.ID, // with the name set to their id
|
||||
Body: event.String(), // the body as the full event JSON
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func handleQuery(ctx context.Context, filter nostr.Filter) (ch chan *nostr.Event, err error) {
|
||||
// QueryEvents functions are expected to return a channel
|
||||
ch := make(chan *nostr.Event)
|
||||
|
||||
// and they can do their query asynchronously, emitting events to the channel as they come
|
||||
go func () {
|
||||
if len(filter.IDs) > 0 {
|
||||
// if the query is for ids we can do a simpler name match
|
||||
for _, id := range filter.IDS {
|
||||
results, _ := gdriveService.Files.List(googledrive.ListOptions{
|
||||
Q: fmt.Sprintf("name = '%s'", id)
|
||||
})
|
||||
if len(results) > 0 {
|
||||
var evt nostr.Event
|
||||
json.Unmarshal(results[0].Body, &evt)
|
||||
ch <- evt
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// otherwise we use the google-provided search and hope it will catch tags that are in the event body
|
||||
for tagName, tagValues := range filter.Tags {
|
||||
results, _ := gdriveService.Files.List(googledrive.ListOptions{
|
||||
Q: fmt.Sprintf("fullText contains '%s'", tagValues)
|
||||
})
|
||||
for _, result := range results {
|
||||
var evt nostr.Event
|
||||
json.Unmarshal(results[0].Body, &evt)
|
||||
if filter.Match(evt) {
|
||||
ch <- evt
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return ch, nil
|
||||
}
|
||||
```
|
||||
|
||||
(Disclaimer: since I have no idea of how to properly use the Google Drive API this interface is entirely made up.)
|
||||
51
khatru/docs/cookbook/search.md
Normal file
51
khatru/docs/cookbook/search.md
Normal file
@@ -0,0 +1,51 @@
|
||||
---
|
||||
outline: deep
|
||||
---
|
||||
|
||||
# Implementing NIP-50 `search` support
|
||||
|
||||
The [`nostr.Filter` type](https://pkg.go.dev/github.com/nbd-wtf/go-nostr#Filter) has a `Search` field, so you basically just has to handle that if it's present.
|
||||
|
||||
It can be tricky to implement fulltext search properly though, so some [eventstores](../core/eventstore) implement it natively, such as [Bluge](https://pkg.go.dev/github.com/fiatjaf/eventstore/bluge), [OpenSearch](https://pkg.go.dev/github.com/fiatjaf/eventstore/opensearch) and [ElasticSearch](https://pkg.go.dev/github.com/fiatjaf/eventstore/elasticsearch) (although for the last two you'll need an instance of these database servers running, while with Bluge it's embedded).
|
||||
|
||||
If you have any of these you can just use them just like any other eventstore:
|
||||
|
||||
```go
|
||||
func main () {
|
||||
// other stuff here
|
||||
|
||||
normal := &lmdb.LMDBBackend{Path: "data"}
|
||||
os.MkdirAll(normal.Path, 0755)
|
||||
if err := normal.Init(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
search := bluge.BlugeBackend{Path: "search", RawEventStore: normal}
|
||||
if err := search.Init(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
relay.StoreEvent = append(relay.StoreEvent, normal.SaveEvent, search.SaveEvent)
|
||||
relay.QueryEvents = append(relay.QueryEvents, normal.QueryEvents, search.QueryEvents)
|
||||
relay.DeleteEvent = append(relay.DeleteEvent, normal.DeleteEvent, search.DeleteEvent)
|
||||
|
||||
// other stuff here
|
||||
}
|
||||
```
|
||||
|
||||
Note that in this case we're using the [LMDB](https://pkg.go.dev/github.com/fiatjaf/eventstore/lmdb) adapter for normal queries and it explicitly rejects any filter that contains a `Search` field, while [Bluge](https://pkg.go.dev/github.com/fiatjaf/eventstore/bluge) rejects any filter _without_ a `Search` value, which make them pair well together.
|
||||
|
||||
Other adapters, like [SQLite](https://pkg.go.dev/github.com/fiatjaf/eventstore/sqlite3), implement search functionality on their own, so if you don't want to use that you would have to have a middleware between, like:
|
||||
|
||||
```go
|
||||
relay.StoreEvent = append(relay.StoreEvent, db.SaveEvent, search.SaveEvent)
|
||||
relay.QueryEvents = append(relay.QueryEvents, func (ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) {
|
||||
if len(filter.Search) > 0 {
|
||||
return search.QueryEvents(ctx, filter)
|
||||
} else {
|
||||
filterNoSearch := filter
|
||||
filterNoSearch.Search = ""
|
||||
return normal.QueryEvents(ctx, filterNoSearch)
|
||||
}
|
||||
})
|
||||
```
|
||||
85
khatru/docs/core/auth.md
Normal file
85
khatru/docs/core/auth.md
Normal file
@@ -0,0 +1,85 @@
|
||||
---
|
||||
outline: deep
|
||||
---
|
||||
|
||||
# NIP-42 `AUTH`
|
||||
|
||||
`khatru` supports [NIP-42](https://nips.nostr.com/42) out of the box. The functionality is exposed in the following ways.
|
||||
|
||||
## Sending arbitrary `AUTH` challenges
|
||||
|
||||
At any time you can send an `AUTH` message to a client that is making a request.
|
||||
|
||||
It makes sense to give the user the option to authenticate right after they establish a connection, for example, when you have a relay that works differently depending on whether the user is authenticated or not.
|
||||
|
||||
```go
|
||||
relay := khatru.NewRelay()
|
||||
|
||||
relay.OnConnect = append(relay.OnConnect, func(ctx context.Context) {
|
||||
khatru.RequestAuth(ctx)
|
||||
})
|
||||
```
|
||||
|
||||
This will send a NIP-42 `AUTH` challenge message to the client so it will have the option to authenticate itself whenever it wants to.
|
||||
|
||||
## Signaling to the client that a specific query requires an authenticated user
|
||||
|
||||
If on `RejectFilter` or `RejectEvent` you prefix the message with `auth-required: `, that will automatically send an `AUTH` message before a `CLOSED` or `OK` with that prefix, such that the client will immediately be able to know it must authenticate to proceed and will already have the challenge required for that, so they can immediately replay the request.
|
||||
|
||||
```go
|
||||
relay.RejectFilter = append(relay.RejectFilter, func(ctx context.Context, filter nostr.Filter) (bool, string) {
|
||||
return true, "auth-required: this query requires you to be authenticated"
|
||||
})
|
||||
relay.RejectEvent = append(relay.RejectEvent, func(ctx context.Context, event *nostr.Event) (bool, string) {
|
||||
return true, "auth-required: publishing this event requires authentication"
|
||||
})
|
||||
```
|
||||
|
||||
## Reading the auth status of a client
|
||||
|
||||
After a client is authenticated and opens a new subscription with `REQ` or sends a new event with `EVENT`, you'll be able to read the public key they're authenticated with.
|
||||
|
||||
```go
|
||||
relay.RejectFilter = append(relay.RejectFilter, func(ctx context.Context, filter nostr.Filter) (bool, string) {
|
||||
authenticatedUser := khatru.GetAuthed(ctx)
|
||||
})
|
||||
```
|
||||
|
||||
## Telling an authenticated user they're still not allowed to do something
|
||||
|
||||
If the user is authenticated but still not allowed (because some specific filters or events are only accessible to some specific users) you can reply on `RejectFilter` or `RejectEvent` with a message prefixed with `"restricted: "` to make that clear to clients.
|
||||
|
||||
```go
|
||||
relay.RejectFilter = append(relay.RejectFilter, func(ctx context.Context, filter nostr.Filter) (bool, string) {
|
||||
authenticatedUser := khatru.GetAuthed(ctx)
|
||||
|
||||
if slices.Contain(authorizedUsers, authenticatedUser) {
|
||||
return false
|
||||
} else {
|
||||
return true, "restricted: you're not a member of the privileged group that can read that stuff"
|
||||
}
|
||||
})
|
||||
```
|
||||
|
||||
## Reacting to a successful authentication
|
||||
|
||||
Each `khatru.WebSocket` object has an `.Authed` channel that is closed whenever that connection performs a successful authentication.
|
||||
|
||||
You can use that to emulate a listener for these events in case you want to keep track of who is authenticating in real time and not only check it when they request for something.
|
||||
|
||||
```go
|
||||
relay.OnConnect = append(relay.OnConnect,
|
||||
khatru.RequestAuth,
|
||||
func(ctx context.Context) {
|
||||
go func(ctx context.Context) {
|
||||
conn := khatru.GetConnection(ctx)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
fmt.Println("connection closed")
|
||||
case <-conn.Authed:
|
||||
fmt.Println("authenticated as", conn.AuthedPublicKey)
|
||||
}
|
||||
}(ctx)
|
||||
},
|
||||
)
|
||||
```
|
||||
93
khatru/docs/core/blossom.md
Normal file
93
khatru/docs/core/blossom.md
Normal file
@@ -0,0 +1,93 @@
|
||||
---
|
||||
outline: deep
|
||||
---
|
||||
|
||||
# Blossom: Media Storage
|
||||
|
||||
Khatru comes with a built-in Blossom HTTP handler that allows you to store and serve media blobs using storage backend you want (filesystem, S3 etc).
|
||||
|
||||
## Basic Setup
|
||||
|
||||
Here's a minimal example of what you should do to enable it:
|
||||
|
||||
```go
|
||||
func main() {
|
||||
relay := khatru.NewRelay()
|
||||
|
||||
// create blossom server with the relay and service URL
|
||||
bl := blossom.New(relay, "http://localhost:3334")
|
||||
|
||||
// create a database for keeping track of blob metadata
|
||||
// (do not use the same database used for the relay events)
|
||||
bl.Store = blossom.EventStoreBlobIndexWrapper{Store: blobdb, ServiceURL: bl.ServiceURL}
|
||||
|
||||
// implement the required storage functions
|
||||
bl.StoreBlob = append(bl.StoreBlob, func(ctx context.Context, sha256 string, body []byte) error {
|
||||
// store the blob data somewhere
|
||||
return nil
|
||||
})
|
||||
bl.LoadBlob = append(bl.LoadBlob, func(ctx context.Context, sha256 string) (io.ReadSeeker, error) {
|
||||
// load and return the blob data
|
||||
return nil, nil
|
||||
})
|
||||
bl.DeleteBlob = append(bl.DeleteBlob, func(ctx context.Context, sha256 string) error {
|
||||
// delete the blob data
|
||||
return nil
|
||||
})
|
||||
|
||||
http.ListenAndServe(":3334", relay)
|
||||
}
|
||||
```
|
||||
|
||||
## Storage Backend Integration
|
||||
|
||||
You can integrate any storage backend by implementing the three core functions:
|
||||
|
||||
- `StoreBlob`: Save the blob data
|
||||
- `LoadBlob`: Retrieve the blob data
|
||||
- `DeleteBlob`: Remove the blob data
|
||||
|
||||
## Upload Restrictions
|
||||
|
||||
You can implement upload restrictions using the `RejectUpload` hook. Here's an example that limits file size and restricts uploads to whitelisted users:
|
||||
|
||||
```go
|
||||
const maxFileSize = 10 * 1024 * 1024 // 10MB
|
||||
|
||||
var allowedUsers = map[string]bool{
|
||||
"pubkey1": true,
|
||||
"pubkey2": true,
|
||||
}
|
||||
|
||||
bl.RejectUpload = append(bl.RejectUpload, func(ctx context.Context, auth *nostr.Event, size int, ext string) (bool, string, int) {
|
||||
// check file size
|
||||
if size > maxFileSize {
|
||||
return true, "file too large", 413
|
||||
}
|
||||
|
||||
// check if user is allowed
|
||||
if auth == nil || !allowedUsers[auth.PubKey] {
|
||||
return true, "unauthorized", 403
|
||||
}
|
||||
|
||||
return false, "", 0
|
||||
})
|
||||
```
|
||||
|
||||
There are other `Reject*` hooks you can also implement, but this is the most important one.
|
||||
|
||||
## Tracking blob metadata
|
||||
|
||||
Blossom needs a database to keep track of blob metadata in order to know which user owns each blob, for example (and mind you that more than one user might own the same blob so when of them deletes the blob we don't actually delete it because the other user still has a claim to it). The simplest way to do it currently is by relying on a wrapper on top of fake Nostr events over eventstore, which is `EventStoreBlobIndexWrapper`, but other solutions can be used.
|
||||
|
||||
```go
|
||||
db := &badger.BadgerBackend{Path: "/tmp/khatru-badger-blossom-blobstore"}
|
||||
db.Init()
|
||||
|
||||
bl.Store = blossom.EventStoreBlobIndexWrapper{
|
||||
Store: db,
|
||||
ServiceURL: bl.ServiceURL,
|
||||
}
|
||||
```
|
||||
|
||||
This will store blob metadata as special `kind:24242` events, but you shouldn't have to worry about it as the wrapper handles all the complexity of tracking ownership and managing blob lifecycle. Jut avoid reusing the same datastore that is used for the actual relay events unless you know what you're doing.
|
||||
72
khatru/docs/core/embed.md
Normal file
72
khatru/docs/core/embed.md
Normal file
@@ -0,0 +1,72 @@
|
||||
---
|
||||
outline: deep
|
||||
---
|
||||
|
||||
# Mixing a `khatru` relay with other HTTP handlers
|
||||
|
||||
If you already have a web server with all its HTML handlers or a JSON HTTP API or anything like that, something like:
|
||||
|
||||
```go
|
||||
func main() {
|
||||
mux := http.NewServeMux()
|
||||
|
||||
mux.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir("./static"))))
|
||||
mux.HandleFunc("/.well-known/nostr.json", handleNIP05)
|
||||
mux.HandleFunc("/page/{page}", handlePage)
|
||||
mux.HandleFunc("/", handleHomePage)
|
||||
|
||||
log.Printf("listening at http://0.0.0.0:8080")
|
||||
http.ListenAndServe("0.0.0.0:8080", mux)
|
||||
}
|
||||
```
|
||||
|
||||
Then you can easily inject a relay or two there in alternative paths if you want:
|
||||
|
||||
```diff
|
||||
mux := http.NewServeMux()
|
||||
|
||||
+ relay1 := khatru.NewRelay()
|
||||
+ relay2 := khatru.NewRelay()
|
||||
+ // and so on
|
||||
|
||||
mux.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir("./static"))))
|
||||
mux.HandleFunc("/.well-known/nostr.json", handleNIP05)
|
||||
mux.HandleFunc("/page/{page}", handlePage)
|
||||
mux.HandleFunc("/", handleHomePage)
|
||||
+ mux.Handle("/relay1", relay1)
|
||||
+ mux.Handle("/relay2", relay2)
|
||||
+ // and so forth
|
||||
|
||||
log.Printf("listening at http://0.0.0.0:8080")
|
||||
```
|
||||
|
||||
Imagine each of these relay handlers is different, each can be using a different eventstore and have different policies for writing and reading.
|
||||
|
||||
## Exposing a relay interface at the root
|
||||
|
||||
If you want to expose your relay at the root path `/` that is also possible. You can just use it as the `mux` directly:
|
||||
|
||||
```go
|
||||
func main() {
|
||||
relay := khatru.NewRelay()
|
||||
// ... -- relay configuration steps (omitted for brevity)
|
||||
|
||||
mux := relay.Router() // the relay comes with its own http.ServeMux inside
|
||||
|
||||
mux.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir("./static"))))
|
||||
mux.HandleFunc("/.well-known/nostr.json", handleNIP05)
|
||||
mux.HandleFunc("/page/{page}", handlePage)
|
||||
mux.HandleFunc("/", handleHomePage)
|
||||
|
||||
log.Printf("listening at http://0.0.0.0:8080")
|
||||
http.ListenAndServe("0.0.0.0:8080", mux)
|
||||
}
|
||||
```
|
||||
|
||||
Every [`khatru.Relay`](https://pkg.go.dev/github.com/fiatjaf/khatru#Relay) instance comes with its own ['http.ServeMux`](https://pkg.go.dev/net/http#ServeMux) inside. It ensures all requests are handled normally, but intercepts the requests that are pertinent to the relay operation, specifically the WebSocket requests, and the [NIP-11](https://nips.nostr.com/11) and the [NIP-86](https://nips.nostr.com/86) HTTP requests.
|
||||
|
||||
## Exposing multiple relays at the same path or at the root
|
||||
|
||||
That's also possible, as long as you have a way of differentiating each HTTP request that comes at the middleware level and associating it with a `khatru.Relay` instance in the background.
|
||||
|
||||
See [dynamic](../cookbook/dynamic) for an example that does that using the subdomain. [`countries`](https://git.fiatjaf.com/countries) does it using the requester country implied from its IP address.
|
||||
99
khatru/docs/core/eventstore.md
Normal file
99
khatru/docs/core/eventstore.md
Normal file
@@ -0,0 +1,99 @@
|
||||
---
|
||||
outline: deep
|
||||
---
|
||||
|
||||
# Event Storage
|
||||
|
||||
Khatru doesn't make any assumptions about how you'll want to store events. Any function can be plugged in to the `StoreEvent`, `DeleteEvent`, `ReplaceEvent` and `QueryEvents` hooks.
|
||||
|
||||
However the [`eventstore`](https://github.com/fiatjaf/eventstore) library has adapters that you can easily plug into `khatru`'s hooks.
|
||||
|
||||
# Using the `eventstore` library
|
||||
|
||||
The library includes many different adapters -- often called "backends" --, written by different people and with different levels of quality, reliability and speed.
|
||||
|
||||
For all of them you start by instantiating a struct containing some basic options and a pointer (a file path for local databases, a connection string for remote databases) to the data. Then you call `.Init()` and if all is well you're ready to start storing, querying and deleting events, so you can pass the respective functions to their `khatru` counterparts. These eventstores also expose a `.Close()` function that must be called if you're going to stop using that store and keep your application open.
|
||||
|
||||
Here's an example with the [Badger](https://pkg.go.dev/github.com/fiatjaf/eventstore/badger) adapter, made for the [Badger](https://github.com/dgraph-io/badger) embedded key-value database:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/fiatjaf/eventstore/badger"
|
||||
"github.com/fiatjaf/khatru"
|
||||
)
|
||||
|
||||
func main() {
|
||||
relay := khatru.NewRelay()
|
||||
|
||||
db := badger.BadgerBackend{Path: "/tmp/khatru-badger-tmp"}
|
||||
if err := db.Init(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
relay.StoreEvent = append(relay.StoreEvent, db.SaveEvent)
|
||||
relay.QueryEvents = append(relay.QueryEvents, db.QueryEvents)
|
||||
relay.CountEvents = append(relay.CountEvents, db.CountEvents)
|
||||
relay.DeleteEvent = append(relay.DeleteEvent, db.DeleteEvent)
|
||||
relay.ReplaceEvent = append(relay.ReplaceEvent, db.ReplaceEvent)
|
||||
|
||||
fmt.Println("running on :3334")
|
||||
http.ListenAndServe(":3334", relay)
|
||||
}
|
||||
```
|
||||
|
||||
[LMDB](https://pkg.go.dev/github.com/fiatjaf/eventstore/lmdb) works the same way.
|
||||
|
||||
[SQLite](https://pkg.go.dev/github.com/fiatjaf/eventstore/sqlite3) also stores things locally so it only needs a `Path`.
|
||||
|
||||
[PostgreSQL](https://pkg.go.dev/github.com/fiatjaf/eventstore/postgresql) and [MySQL](https://pkg.go.dev/github.com/fiatjaf/eventstore/mysql) use remote connections to database servers, so they take a `DatabaseURL` parameter, but after that it's the same.
|
||||
|
||||
## Using two at a time
|
||||
|
||||
If you want to use two different adapters at the same time that's easy. Just add both to the corresponding slices:
|
||||
|
||||
```go
|
||||
relay.StoreEvent = append(relay.StoreEvent, db1.SaveEvent, db2.SaveEvent)
|
||||
relay.QueryEvents = append(relay.QueryEvents, db1.QueryEvents, db2.SaveEvent)
|
||||
```
|
||||
|
||||
But that will duplicate events on both and then return duplicated events on each query.
|
||||
|
||||
## Sharding
|
||||
|
||||
You can do a kind of sharding, for example, by storing some events in one store and others in another:
|
||||
|
||||
For example, maybe you want kind 1 events in `db1` and kind 30023 events in `db30023`:
|
||||
|
||||
```go
|
||||
relay.StoreEvent = append(relay.StoreEvent, func (ctx context.Context, evt *nostr.Event) error {
|
||||
switch evt.Kind {
|
||||
case 1:
|
||||
return db1.StoreEvent(ctx, evt)
|
||||
case 30023:
|
||||
return db30023.StoreEvent(ctx, evt)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
})
|
||||
relay.QueryEvents = append(relay.QueryEvents, func (ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) {
|
||||
for _, kind := range filter.Kinds {
|
||||
switch kind {
|
||||
case 1:
|
||||
filter1 := filter
|
||||
filter1.Kinds = []int{1}
|
||||
return db1.QueryEvents(ctx, filter1)
|
||||
case 30023:
|
||||
filter30023 := filter
|
||||
filter30023.Kinds = []int{30023}
|
||||
return db30023.QueryEvents(ctx, filter30023)
|
||||
default:
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
})
|
||||
```
|
||||
85
khatru/docs/core/management.md
Normal file
85
khatru/docs/core/management.md
Normal file
@@ -0,0 +1,85 @@
|
||||
---
|
||||
outline: deep
|
||||
---
|
||||
|
||||
# Management API
|
||||
|
||||
[NIP-86](https://nips.nostr.com/86) specifies a set of RPC methods for managing the boring aspects of relays, such as whitelisting or banning users, banning individual events, banning IPs and so on.
|
||||
|
||||
All [`khatru.Relay`](https://pkg.go.dev/github.com/fiatjaf/khatru#Relay) instances expose a field `ManagementAPI` with a [`RelayManagementAPI`](https://pkg.go.dev/github.com/fiatjaf/khatru#RelayManagementAPI) instance inside, which can be used for creating handlers for each of the RPC methods.
|
||||
|
||||
There is also a generic `RejectAPICall` which is a slice of functions that will be called before any RPC method, if they exist and, if any of them returns true, the request will be rejected.
|
||||
|
||||
The most basic implementation of a `RejectAPICall` handler would be one that checks the public key of the caller with a hardcoded public key of the relay owner:
|
||||
|
||||
```go
|
||||
var owner = "<my-own-pubkey>"
|
||||
var allowedPubkeys = make([]string, 0, 10)
|
||||
|
||||
func main () {
|
||||
relay := khatru.NewRelay()
|
||||
|
||||
relay.ManagementAPI.RejectAPICall = append(relay.ManagementAPI.RejectAPICall,
|
||||
func(ctx context.Context, mp nip86.MethodParams) (reject bool, msg string) {
|
||||
user := khatru.GetAuthed(ctx)
|
||||
if user != owner {
|
||||
return true, "go away, intruder"
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
)
|
||||
|
||||
relay.ManagementAPI.AllowPubKey = func(ctx context.Context, pubkey string, reason string) error {
|
||||
allowedPubkeys = append(allowedPubkeys, pubkey)
|
||||
return nil
|
||||
}
|
||||
relay.ManagementAPI.BanPubKey = func(ctx context.Context, pubkey string, reason string) error {
|
||||
idx := slices.Index(allowedPubkeys, pubkey)
|
||||
if idx == -1 {
|
||||
return fmt.Errorf("pubkey already not allowed")
|
||||
}
|
||||
allowedPubkeys = slices.Delete(allowedPubkeys, idx, idx+1)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
You can also not provide any `RejectAPICall` handler and do the approval specifically on each RPC handler.
|
||||
|
||||
In the following example any current member can include any other pubkey, and anyone who was added before is able to remove any pubkey that was added afterwards (not a very good idea, but serves as an example).
|
||||
|
||||
```go
|
||||
var allowedPubkeys = []string{"<my-own-pubkey>"}
|
||||
|
||||
func main () {
|
||||
relay := khatru.NewRelay()
|
||||
|
||||
relay.ManagementAPI.AllowPubKey = func(ctx context.Context, pubkey string, reason string) error {
|
||||
caller := khatru.GetAuthed(ctx)
|
||||
|
||||
if slices.Contains(allowedPubkeys, caller) {
|
||||
allowedPubkeys = append(allowedPubkeys, pubkey)
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("you're not authorized")
|
||||
}
|
||||
relay.ManagementAPI.BanPubKey = func(ctx context.Context, pubkey string, reason string) error {
|
||||
caller := khatru.GetAuthed(ctx)
|
||||
|
||||
callerIdx := slices.Index(allowedPubkeys, caller)
|
||||
if callerIdx == -1 {
|
||||
return fmt.Errorf("you're not even allowed here")
|
||||
}
|
||||
|
||||
targetIdx := slices.Index(allowedPubkeys, pubkey)
|
||||
if targetIdx < callerIdx {
|
||||
// target is a bigger OG than the caller, so it has bigger influence and can't be removed
|
||||
return fmt.Errorf("you're less powerful than the pubkey you're trying to remove")
|
||||
}
|
||||
|
||||
// allow deletion since the target came after the caller
|
||||
allowedPubkeys = slices.Delete(allowedPubkeys, targetIdx, targetIdx+1)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
```
|
||||
63
khatru/docs/core/routing.md
Normal file
63
khatru/docs/core/routing.md
Normal file
@@ -0,0 +1,63 @@
|
||||
---
|
||||
outline: deep
|
||||
---
|
||||
|
||||
# Request Routing
|
||||
|
||||
If you have one (or more) set of policies that have to be executed in sequence (for example, first you check for the presence of a tag, then later in the next policies you use that tag without checking) and they only apply to some class of events, but you still want your relay to deal with other classes of events that can lead to cumbersome sets of rules, always having to check if an event meets the requirements and so on. There is where routing can help you.
|
||||
|
||||
It also can be handy if you get a [`khatru.Relay`](https://pkg.go.dev/github.com/fiatjaf/khatru#Relay) from somewhere else, like a library such as [`relay29`](https://github.com/fiatjaf/relay29), and you want to combine it with other policies without some interfering with the others. As in the example below:
|
||||
|
||||
```go
|
||||
sk := os.Getenv("RELAY_SECRET_KEY")
|
||||
|
||||
// a relay for NIP-29 groups
|
||||
groupsStore := badger.BadgerBackend{}
|
||||
groupsStore.Init()
|
||||
groupsRelay, _ := khatru29.Init(relay29.Options{Domain: "example.com", DB: groupsStore, SecretKey: sk})
|
||||
// ...
|
||||
|
||||
// a relay for everything else
|
||||
publicStore := slicestore.SliceStore{}
|
||||
publicStore.Init()
|
||||
publicRelay := khatru.NewRelay()
|
||||
publicRelay.StoreEvent = append(publicRelay.StoreEvent, publicStore.SaveEvent)
|
||||
publicRelay.QueryEvents = append(publicRelay.QueryEvents, publicStore.QueryEvents)
|
||||
publicRelay.CountEvents = append(publicRelay.CountEvents, publicStore.CountEvents)
|
||||
publicRelay.DeleteEvent = append(publicRelay.DeleteEvent, publicStore.DeleteEvent)
|
||||
// ...
|
||||
|
||||
// a higher-level relay that just routes between the two above
|
||||
router := khatru.NewRouter()
|
||||
|
||||
// route requests and events to the groups relay
|
||||
router.Route().
|
||||
Req(func (filter nostr.Filter) bool {
|
||||
_, hasHTag := filter.Tags["h"]
|
||||
if hasHTag {
|
||||
return true
|
||||
}
|
||||
return slices.Contains(filter.Kinds, func (k int) bool { return k == 39000 || k == 39001 || k == 39002 })
|
||||
}).
|
||||
Event(func (event *nostr.Event) bool {
|
||||
switch {
|
||||
case event.Kind <= 9021 && event.Kind >= 9000:
|
||||
return true
|
||||
case event.Kind <= 39010 && event.Kind >= 39000:
|
||||
return true
|
||||
case event.Kind <= 12 && event.Kind >= 9:
|
||||
return true
|
||||
case event.Tags.Find("h") != nil:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}).
|
||||
Relay(groupsRelay)
|
||||
|
||||
// route requests and events to the other
|
||||
router.Route().
|
||||
Req(func (filter nostr.Filter) bool { return true }).
|
||||
Event(func (event *nostr.Event) bool { return true }).
|
||||
Relay(publicRelay)
|
||||
```
|
||||
79
khatru/docs/getting-started/index.md
Normal file
79
khatru/docs/getting-started/index.md
Normal file
@@ -0,0 +1,79 @@
|
||||
---
|
||||
outline: deep
|
||||
---
|
||||
|
||||
# Getting Started
|
||||
|
||||
Download the library:
|
||||
|
||||
```bash
|
||||
go get github.com/fiatjaf/khatru
|
||||
```
|
||||
|
||||
Include the library:
|
||||
|
||||
```go
|
||||
import "github.com/fiatjaf/khatru"
|
||||
```
|
||||
|
||||
Then in your `main()` function, instantiate a new `Relay`:
|
||||
|
||||
```go
|
||||
relay := khatru.NewRelay()
|
||||
```
|
||||
|
||||
Optionally, set up basic info about the relay that will be returned according to [NIP-11](https://nips.nostr.com/11):
|
||||
|
||||
```go
|
||||
relay.Info.Name = "my relay"
|
||||
relay.Info.PubKey = "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798"
|
||||
relay.Info.Description = "this is my custom relay"
|
||||
relay.Info.Icon = "https://external-content.duckduckgo.com/iu/?u=https%3A%2F%2Fliquipedia.net%2Fcommons%2Fimages%2F3%2F35%2FSCProbe.jpg&f=1&nofb=1&ipt=0cbbfef25bce41da63d910e86c3c343e6c3b9d63194ca9755351bb7c2efa3359&ipo=images"
|
||||
```
|
||||
|
||||
Now we must set up the basic functions for accepting events and answering queries. We could make our own querying engine from scratch, but we can also use [eventstore](https://github.com/fiatjaf/eventstore). In this example we'll use the SQLite adapter:
|
||||
|
||||
```go
|
||||
db := sqlite3.SQLite3Backend{DatabaseURL: "/tmp/khatru-sqlite-tmp"}
|
||||
if err := db.Init(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
relay.StoreEvent = append(relay.StoreEvent, db.SaveEvent)
|
||||
relay.QueryEvents = append(relay.QueryEvents, db.QueryEvents)
|
||||
relay.DeleteEvent = append(relay.DeleteEvent, db.DeleteEvent)
|
||||
relay.ReplaceEvent = append(relay.ReplaceEvent, db.ReplaceEvent)
|
||||
```
|
||||
|
||||
These are lists of functions that will be called in order every time an `EVENT` is received, or a `REQ` query is received. You can add more than one handler there, you can have a function that reads from some other server, but just in some cases, you can do anything.
|
||||
|
||||
The next step is adding some protection, because maybe we don't want to allow _anyone_ to write to our relay. Maybe we want to only allow people that have a pubkey starting with `"a"`, `"b"` or `"c"`:
|
||||
|
||||
```go
|
||||
relay.RejectEvent = append(relay.RejectEvent, func (ctx context.Context, event *nostr.Event) (reject bool, msg string) {
|
||||
firstHexChar := event.PubKey[0:1]
|
||||
if firstHexChar == "a" || firstHexChar == "b" || firstHexChar == "c" {
|
||||
return false, "" // allow
|
||||
}
|
||||
return true, "you're not allowed in this shard"
|
||||
})
|
||||
```
|
||||
|
||||
We can also make use of some default policies that come bundled with Khatru:
|
||||
|
||||
```go
|
||||
import "github.com/fiatjaf/khatru" // implied
|
||||
|
||||
relay.RejectEvent = append(relay.RejectEvent, policies.PreventLargeTags(120), policies.PreventTimestampsInThePast(time.Hour * 2), policies.PreventTimestampsInTheFuture(time.Minute * 30))
|
||||
```
|
||||
|
||||
There are many other ways to customize the relay behavior. Take a look at the [`Relay` struct docs](https://pkg.go.dev/github.com/fiatjaf/khatru#Relay) for more, or read the pages on the sidebar.
|
||||
|
||||
The last step is actually running the server. Our relay is actually an `http.Handler`, so it can just be ran directly with `http.ListenAndServe()` from the standard library:
|
||||
|
||||
```go
|
||||
fmt.Println("running on :3334")
|
||||
http.ListenAndServe(":3334", relay)
|
||||
```
|
||||
|
||||
And that's it.
|
||||
57
khatru/docs/index.md
Normal file
57
khatru/docs/index.md
Normal file
@@ -0,0 +1,57 @@
|
||||
---
|
||||
layout: home
|
||||
|
||||
hero:
|
||||
name: khatru
|
||||
text: a framework for making Nostr relays
|
||||
tagline: write your custom relay with code over configuration
|
||||
actions:
|
||||
- theme: brand
|
||||
text: Get Started
|
||||
link: /getting-started
|
||||
|
||||
features:
|
||||
- title: It's a library
|
||||
icon: 🐢
|
||||
link: /getting-started
|
||||
details: This is not an executable that you have to tweak with config files, it's a library that you import and use, so you just write code and it does exactly what you want.
|
||||
- title: It's very very customizable
|
||||
icon: 🎶
|
||||
link: /core/embed
|
||||
details: Run arbitrary functions to reject events, reject filters, overwrite results of queries, perform actual queries, mix the relay stuff with other HTTP handlers or even run it inside an existing website.
|
||||
- title: It plugs into event stores easily
|
||||
icon: 📦
|
||||
link: /core/eventstore
|
||||
details: khatru's companion, the `eventstore` library, provides all methods for storing and querying events efficiently from SQLite, LMDB, Postgres, Badger and others.
|
||||
- title: It supports NIP-42 AUTH
|
||||
icon: 🪪
|
||||
link: /core/auth
|
||||
details: You can check if a client is authenticated or request AUTH anytime, or reject an event or a filter with an "auth-required:" and it will be handled automatically.
|
||||
- title: It supports NIP-86 Management API
|
||||
icon: 🛠️
|
||||
link: /core/management
|
||||
details: You just define your custom handlers for each RPC call and they will be exposed appropriately to management clients.
|
||||
- title: It's written in Go
|
||||
icon: 🛵
|
||||
link: https://pkg.go.dev/github.com/fiatjaf/khatru
|
||||
details: That means it is fast and lightweight, you can learn the language in 5 minutes and it builds your relay into a single binary that's easy to ship and deploy.
|
||||
---
|
||||
|
||||
## A glimpse of `khatru`'s power
|
||||
|
||||
It allows you to create a fully-functional relay in 7 lines of code:
|
||||
|
||||
```go
|
||||
func main() {
|
||||
relay := khatru.NewRelay()
|
||||
db := badger.BadgerBackend{Path: "/tmp/khatru-badgern-tmp"}
|
||||
db.Init()
|
||||
relay.StoreEvent = append(relay.StoreEvent, db.SaveEvent)
|
||||
relay.QueryEvents = append(relay.QueryEvents, db.QueryEvents)
|
||||
relay.DeleteEvent = append(relay.DeleteEvent, db.DeleteEvent)
|
||||
relay.ReplaceEvent = append(relay.ReplaceEvent, db.ReplaceEvent)
|
||||
http.ListenAndServe(":3334", relay)
|
||||
}
|
||||
```
|
||||
|
||||
After that you can customize it in infinite ways. See the links above.
|
||||
7
khatru/docs/justfile
Normal file
7
khatru/docs/justfile
Normal file
@@ -0,0 +1,7 @@
|
||||
export PATH := "./node_modules/.bin:" + env_var('PATH')
|
||||
|
||||
dev:
|
||||
vitepress dev
|
||||
|
||||
build:
|
||||
vitepress build
|
||||
BIN
khatru/docs/logo.png
Normal file
BIN
khatru/docs/logo.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 34 KiB |
5
khatru/docs/package.json
Normal file
5
khatru/docs/package.json
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"dependencies": {
|
||||
"vitepress": "^1.3.0"
|
||||
}
|
||||
}
|
||||
38
khatru/docs/why.md
Normal file
38
khatru/docs/why.md
Normal file
@@ -0,0 +1,38 @@
|
||||
# Why `khatru`?
|
||||
|
||||
If you want to craft a relay that isn't completely dumb, but it's supposed to
|
||||
|
||||
* have custom own policies for accepting events;
|
||||
* handle requests for stored events using data from multiple sources;
|
||||
* require users to authenticate for some operations and not for others;
|
||||
* and other stuff.
|
||||
|
||||
`khatru` provides a simple framework for creating your custom relay without having to reimplement it all from scratch or hack into other relay codebases.
|
||||
|
||||
# Use cases
|
||||
|
||||
`khatru` is being used today in the real world by
|
||||
|
||||
* [pyramid](https://github.com/github-tijlxyz/khatru-pyramid), a relay with a invite-based whitelisting system similar to [lobste.rs](https://lobste.rs)
|
||||
* [triflector](https://github.com/coracle-social/triflector), a relay which enforces authentication based on custom policy
|
||||
* [countries](https://git.fiatjaf.com/countries), a relay that stores and serves content differently according to the country of the reader or writer
|
||||
* [jingle](https://github.com/fiatjaf/jingle), a simple relay that exposes part of `khatru`'s configuration options to JavaScript code supplied by the user that is interpreted at runtime
|
||||
* [njump](https://git.njump.me/njump), a Nostr gateway to the web that also serves its cached content in a relay interface
|
||||
* [song](https://git.fiatjaf.com/song), a personal git server that comes with an embedded relay dedicated to dealing with [NIP-34](https://nips.nostr.com/34) git-related Nostr events
|
||||
* [relay29](https://github.com/fiatjaf/relay29), a relay that powers most of the [NIP-29](https://nips.nostr.com/29) Nostr groups ecosystem
|
||||
* [fiatjaf.com](https://fiatjaf.com), a personal website that serves the same content as HTML but also as Nostr events.
|
||||
* [gm-relay](https://github.com/ptrio42/gm-relay), a relay that only accepts GM notes once a day.
|
||||
|
||||
## Other possible use cases
|
||||
|
||||
Other possible use cases, still not developed, include:
|
||||
|
||||
* Bridges: `khatru` was initially developed to serve as an RSS-to-Nostr bridge server that would fetch RSS feeds on demand in order to serve them to Nostr clients. Other similar use cases could fit.
|
||||
* Paid relays: Nostr has multiple relays that charge for write-access currently, but there are many other unexplored ways to make this scheme work: charge per each note, charge per month, charge per month per note, have different payment methods, and so on.
|
||||
* Other whitelisting schemes: _pyramid_ implements a cool inviting scheme for granting access to the relay, same for _triflector_, but there are infinite other possibilities of other ways to grant access to people to an exclusive or community relay.
|
||||
* Just-in-time content generation: instead of storing a bunch of signed JSON and serving that to clients, there could be relays that store data in a more compact format and turn it into Nostr events at the time they receive a request from a Nostr client -- or relays that do some kind of live data generation based on who is connected, not storing anything.
|
||||
* Community relays: some internet communities may want relays that restrict writing or browsing of content only to its members, essentially making it a closed group -- or it could be closed for outsiders to write, but public for them to read and vice-versa.
|
||||
* Automated moderation schemes: relays that are owned by a group (either a static or a dynamic group) can rely on signals from their members, like mutes or reports, to decide what content to allow in its domains and what to disallow, making crowdfunded moderation easy.
|
||||
* Curation: in the same way as community relays can deal with unwanted content, they can also perform curation based on signals from their members (for example, if a member of the relay likes some note from someone that is outside the relay that note can be fetched and stored), creating a dynamic relay that can be browsed by anyone that share the same interests as that community.
|
||||
* Local relays: a relay that can be only browsed by people using the WiFi connection of some event or some building, serving as a way to share temporary or restricted content that only interests people sharing that circumstance.
|
||||
* Cool experiments: relays that only allow one note per user per day, relays that require proof-of-work on event ids], relays that require engagement otherwise you get kicked, relays that return events in different ordering, relays that impose arbitrary funny rules on notes in order for them to be accepted (i.e. they must contain the word "poo"), I don't know!
|
||||
26
khatru/ephemeral.go
Normal file
26
khatru/ephemeral.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package khatru
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
)
|
||||
|
||||
func (rl *Relay) handleEphemeral(ctx context.Context, evt *nostr.Event) error {
|
||||
for _, reject := range rl.RejectEvent {
|
||||
if reject, msg := reject(ctx, evt); reject {
|
||||
if msg == "" {
|
||||
return errors.New("blocked: no reason")
|
||||
} else {
|
||||
return errors.New(nostr.NormalizeOKMessage(msg, "blocked"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, oee := range rl.OnEphemeralEvent {
|
||||
oee(ctx, evt)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
28
khatru/examples/basic-badger/main.go
Normal file
28
khatru/examples/basic-badger/main.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/fiatjaf/eventstore/badger"
|
||||
"github.com/fiatjaf/khatru"
|
||||
)
|
||||
|
||||
func main() {
|
||||
relay := khatru.NewRelay()
|
||||
|
||||
db := badger.BadgerBackend{Path: "/tmp/khatru-badgern-tmp"}
|
||||
if err := db.Init(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
relay.StoreEvent = append(relay.StoreEvent, db.SaveEvent)
|
||||
relay.QueryEvents = append(relay.QueryEvents, db.QueryEvents)
|
||||
relay.CountEvents = append(relay.CountEvents, db.CountEvents)
|
||||
relay.DeleteEvent = append(relay.DeleteEvent, db.DeleteEvent)
|
||||
relay.ReplaceEvent = append(relay.ReplaceEvent, db.ReplaceEvent)
|
||||
relay.Negentropy = true
|
||||
|
||||
fmt.Println("running on :3334")
|
||||
http.ListenAndServe(":3334", relay)
|
||||
}
|
||||
27
khatru/examples/basic-elasticsearch/main.go
Normal file
27
khatru/examples/basic-elasticsearch/main.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/fiatjaf/eventstore/elasticsearch"
|
||||
"github.com/fiatjaf/khatru"
|
||||
)
|
||||
|
||||
func main() {
|
||||
relay := khatru.NewRelay()
|
||||
|
||||
db := elasticsearch.ElasticsearchStorage{URL: ""}
|
||||
if err := db.Init(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
relay.StoreEvent = append(relay.StoreEvent, db.SaveEvent)
|
||||
relay.QueryEvents = append(relay.QueryEvents, db.QueryEvents)
|
||||
relay.CountEvents = append(relay.CountEvents, db.CountEvents)
|
||||
relay.DeleteEvent = append(relay.DeleteEvent, db.DeleteEvent)
|
||||
relay.ReplaceEvent = append(relay.ReplaceEvent, db.ReplaceEvent)
|
||||
|
||||
fmt.Println("running on :3334")
|
||||
http.ListenAndServe(":3334", relay)
|
||||
}
|
||||
29
khatru/examples/basic-lmdb/main.go
Normal file
29
khatru/examples/basic-lmdb/main.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/fiatjaf/eventstore/lmdb"
|
||||
"github.com/fiatjaf/khatru"
|
||||
)
|
||||
|
||||
func main() {
|
||||
relay := khatru.NewRelay()
|
||||
|
||||
db := lmdb.LMDBBackend{Path: "/tmp/khatru-lmdb-tmp"}
|
||||
os.MkdirAll(db.Path, 0o755)
|
||||
if err := db.Init(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
relay.StoreEvent = append(relay.StoreEvent, db.SaveEvent)
|
||||
relay.QueryEvents = append(relay.QueryEvents, db.QueryEvents)
|
||||
relay.CountEvents = append(relay.CountEvents, db.CountEvents)
|
||||
relay.DeleteEvent = append(relay.DeleteEvent, db.DeleteEvent)
|
||||
relay.ReplaceEvent = append(relay.ReplaceEvent, db.ReplaceEvent)
|
||||
|
||||
fmt.Println("running on :3334")
|
||||
http.ListenAndServe(":3334", relay)
|
||||
}
|
||||
27
khatru/examples/basic-postgres/main.go
Normal file
27
khatru/examples/basic-postgres/main.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/fiatjaf/eventstore/postgresql"
|
||||
"github.com/fiatjaf/khatru"
|
||||
)
|
||||
|
||||
func main() {
|
||||
relay := khatru.NewRelay()
|
||||
|
||||
db := postgresql.PostgresBackend{DatabaseURL: "postgresql://localhost:5432/tmp-khatru-relay"}
|
||||
if err := db.Init(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
relay.StoreEvent = append(relay.StoreEvent, db.SaveEvent)
|
||||
relay.QueryEvents = append(relay.QueryEvents, db.QueryEvents)
|
||||
relay.CountEvents = append(relay.CountEvents, db.CountEvents)
|
||||
relay.DeleteEvent = append(relay.DeleteEvent, db.DeleteEvent)
|
||||
relay.ReplaceEvent = append(relay.ReplaceEvent, db.ReplaceEvent)
|
||||
|
||||
fmt.Println("running on :3334")
|
||||
http.ListenAndServe(":3334", relay)
|
||||
}
|
||||
27
khatru/examples/basic-sqlite3/main.go
Normal file
27
khatru/examples/basic-sqlite3/main.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/fiatjaf/eventstore/sqlite3"
|
||||
"github.com/fiatjaf/khatru"
|
||||
)
|
||||
|
||||
func main() {
|
||||
relay := khatru.NewRelay()
|
||||
|
||||
db := sqlite3.SQLite3Backend{DatabaseURL: "/tmp/khatru-sqlite-tmp"}
|
||||
if err := db.Init(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
relay.StoreEvent = append(relay.StoreEvent, db.SaveEvent)
|
||||
relay.QueryEvents = append(relay.QueryEvents, db.QueryEvents)
|
||||
relay.CountEvents = append(relay.CountEvents, db.CountEvents)
|
||||
relay.DeleteEvent = append(relay.DeleteEvent, db.DeleteEvent)
|
||||
relay.ReplaceEvent = append(relay.ReplaceEvent, db.ReplaceEvent)
|
||||
|
||||
fmt.Println("running on :3334")
|
||||
http.ListenAndServe(":3334", relay)
|
||||
}
|
||||
46
khatru/examples/blossom/main.go
Normal file
46
khatru/examples/blossom/main.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/fiatjaf/eventstore/badger"
|
||||
"github.com/fiatjaf/khatru"
|
||||
"github.com/fiatjaf/khatru/blossom"
|
||||
)
|
||||
|
||||
func main() {
|
||||
relay := khatru.NewRelay()
|
||||
|
||||
db := &badger.BadgerBackend{Path: "/tmp/khatru-badger-tmp"}
|
||||
if err := db.Init(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
relay.StoreEvent = append(relay.StoreEvent, db.SaveEvent)
|
||||
relay.QueryEvents = append(relay.QueryEvents, db.QueryEvents)
|
||||
relay.CountEvents = append(relay.CountEvents, db.CountEvents)
|
||||
relay.DeleteEvent = append(relay.DeleteEvent, db.DeleteEvent)
|
||||
relay.ReplaceEvent = append(relay.ReplaceEvent, db.ReplaceEvent)
|
||||
|
||||
bdb := &badger.BadgerBackend{Path: "/tmp/khatru-badger-blossom-tmp"}
|
||||
if err := bdb.Init(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
bl := blossom.New(relay, "http://localhost:3334")
|
||||
bl.Store = blossom.EventStoreBlobIndexWrapper{Store: bdb, ServiceURL: bl.ServiceURL}
|
||||
bl.StoreBlob = append(bl.StoreBlob, func(ctx context.Context, sha256 string, body []byte) error {
|
||||
fmt.Println("storing", sha256, len(body))
|
||||
return nil
|
||||
})
|
||||
bl.LoadBlob = append(bl.LoadBlob, func(ctx context.Context, sha256 string) (io.ReadSeeker, error) {
|
||||
fmt.Println("loading", sha256)
|
||||
blob := strings.NewReader("aaaaa")
|
||||
return blob, nil
|
||||
})
|
||||
|
||||
fmt.Println("running on :3334")
|
||||
http.ListenAndServe(":3334", relay)
|
||||
}
|
||||
40
khatru/examples/exclusive/main.go
Normal file
40
khatru/examples/exclusive/main.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/fiatjaf/eventstore/lmdb"
|
||||
"github.com/fiatjaf/khatru"
|
||||
"github.com/fiatjaf/khatru/policies"
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
)
|
||||
|
||||
func main() {
|
||||
relay := khatru.NewRelay()
|
||||
|
||||
db := lmdb.LMDBBackend{Path: "/tmp/exclusive"}
|
||||
os.MkdirAll(db.Path, 0o755)
|
||||
if err := db.Init(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
relay.StoreEvent = append(relay.StoreEvent, db.SaveEvent)
|
||||
relay.QueryEvents = append(relay.QueryEvents, db.QueryEvents)
|
||||
relay.CountEvents = append(relay.CountEvents, db.CountEvents)
|
||||
relay.DeleteEvent = append(relay.DeleteEvent, db.DeleteEvent)
|
||||
|
||||
relay.RejectEvent = append(relay.RejectEvent, policies.PreventTooManyIndexableTags(10, nil, nil))
|
||||
relay.RejectFilter = append(relay.RejectFilter, policies.NoComplexFilters)
|
||||
|
||||
relay.OnEventSaved = append(relay.OnEventSaved, func(ctx context.Context, event *nostr.Event) {
|
||||
})
|
||||
|
||||
fmt.Println("running on :3334")
|
||||
http.ListenAndServe(":3334", relay)
|
||||
}
|
||||
|
||||
func deleteStuffThatCanBeFoundElsewhere() {
|
||||
}
|
||||
BIN
khatru/examples/readme-demo/demo-memory
Executable file
BIN
khatru/examples/readme-demo/demo-memory
Executable file
Binary file not shown.
98
khatru/examples/readme-demo/main.go
Normal file
98
khatru/examples/readme-demo/main.go
Normal file
@@ -0,0 +1,98 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/fiatjaf/khatru"
|
||||
"github.com/fiatjaf/khatru/policies"
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// create the relay instance
|
||||
relay := khatru.NewRelay()
|
||||
|
||||
// set up some basic properties (will be returned on the NIP-11 endpoint)
|
||||
relay.Info.Name = "my relay"
|
||||
relay.Info.PubKey = "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798"
|
||||
relay.Info.Description = "this is my custom relay"
|
||||
relay.Info.Icon = "https://external-content.duckduckgo.com/iu/?u=https%3A%2F%2Fliquipedia.net%2Fcommons%2Fimages%2F3%2F35%2FSCProbe.jpg&f=1&nofb=1&ipt=0cbbfef25bce41da63d910e86c3c343e6c3b9d63194ca9755351bb7c2efa3359&ipo=images"
|
||||
|
||||
// you must bring your own storage scheme -- if you want to have any
|
||||
store := make(map[string]*nostr.Event, 120)
|
||||
|
||||
// set up the basic relay functions
|
||||
relay.StoreEvent = append(relay.StoreEvent,
|
||||
func(ctx context.Context, event *nostr.Event) error {
|
||||
store[event.ID] = event
|
||||
return nil
|
||||
},
|
||||
)
|
||||
relay.QueryEvents = append(relay.QueryEvents,
|
||||
func(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) {
|
||||
ch := make(chan *nostr.Event)
|
||||
go func() {
|
||||
for _, evt := range store {
|
||||
if filter.Matches(evt) {
|
||||
ch <- evt
|
||||
}
|
||||
}
|
||||
close(ch)
|
||||
}()
|
||||
return ch, nil
|
||||
},
|
||||
)
|
||||
relay.DeleteEvent = append(relay.DeleteEvent,
|
||||
func(ctx context.Context, event *nostr.Event) error {
|
||||
delete(store, event.ID)
|
||||
return nil
|
||||
},
|
||||
)
|
||||
|
||||
// there are many other configurable things you can set
|
||||
relay.RejectEvent = append(relay.RejectEvent,
|
||||
// built-in policies
|
||||
policies.ValidateKind,
|
||||
|
||||
// define your own policies
|
||||
policies.PreventLargeTags(100),
|
||||
func(ctx context.Context, event *nostr.Event) (reject bool, msg string) {
|
||||
if event.PubKey == "fa984bd7dbb282f07e16e7ae87b26a2a7b9b90b7246a44771f0cf5ae58018f52" {
|
||||
return true, "we don't allow this person to write here"
|
||||
}
|
||||
return false, "" // anyone else can
|
||||
},
|
||||
)
|
||||
|
||||
// you can request auth by rejecting an event or a request with the prefix "auth-required: "
|
||||
relay.RejectFilter = append(relay.RejectFilter,
|
||||
// built-in policies
|
||||
policies.NoComplexFilters,
|
||||
|
||||
// define your own policies
|
||||
func(ctx context.Context, filter nostr.Filter) (reject bool, msg string) {
|
||||
if pubkey := khatru.GetAuthed(ctx); pubkey != "" {
|
||||
log.Printf("request from %s\n", pubkey)
|
||||
return false, ""
|
||||
}
|
||||
return true, "auth-required: only authenticated users can read from this relay"
|
||||
// (this will cause an AUTH message to be sent and then a CLOSED message such that clients can
|
||||
// authenticate and then request again)
|
||||
},
|
||||
)
|
||||
// check the docs for more goodies!
|
||||
|
||||
mux := relay.Router()
|
||||
// set up other http handlers
|
||||
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("content-type", "text/html")
|
||||
fmt.Fprintf(w, `<b>welcome</b> to my relay!`)
|
||||
})
|
||||
|
||||
// start the server
|
||||
fmt.Println("running on :3334")
|
||||
http.ListenAndServe(":3334", relay)
|
||||
}
|
||||
70
khatru/examples/routing/main.go
Normal file
70
khatru/examples/routing/main.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"slices"
|
||||
|
||||
"github.com/fiatjaf/eventstore/slicestore"
|
||||
"github.com/fiatjaf/eventstore/sqlite3"
|
||||
"github.com/fiatjaf/khatru"
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
)
|
||||
|
||||
func main() {
|
||||
db1 := slicestore.SliceStore{}
|
||||
db1.Init()
|
||||
r1 := khatru.NewRelay()
|
||||
r1.StoreEvent = append(r1.StoreEvent, db1.SaveEvent)
|
||||
r1.QueryEvents = append(r1.QueryEvents, db1.QueryEvents)
|
||||
r1.CountEvents = append(r1.CountEvents, db1.CountEvents)
|
||||
r1.DeleteEvent = append(r1.DeleteEvent, db1.DeleteEvent)
|
||||
|
||||
db2 := sqlite3.SQLite3Backend{DatabaseURL: "/tmp/t"}
|
||||
db2.Init()
|
||||
r2 := khatru.NewRelay()
|
||||
r2.StoreEvent = append(r2.StoreEvent, db2.SaveEvent)
|
||||
r2.QueryEvents = append(r2.QueryEvents, db2.QueryEvents)
|
||||
r2.CountEvents = append(r2.CountEvents, db2.CountEvents)
|
||||
r2.DeleteEvent = append(r2.DeleteEvent, db2.DeleteEvent)
|
||||
|
||||
db3 := slicestore.SliceStore{}
|
||||
db3.Init()
|
||||
r3 := khatru.NewRelay()
|
||||
r3.StoreEvent = append(r3.StoreEvent, db3.SaveEvent)
|
||||
r3.QueryEvents = append(r3.QueryEvents, db3.QueryEvents)
|
||||
r3.CountEvents = append(r3.CountEvents, db3.CountEvents)
|
||||
r3.DeleteEvent = append(r3.DeleteEvent, db3.DeleteEvent)
|
||||
|
||||
router := khatru.NewRouter()
|
||||
|
||||
router.Route().
|
||||
Req(func(filter nostr.Filter) bool {
|
||||
return slices.Contains(filter.Kinds, 30023)
|
||||
}).
|
||||
Event(func(event *nostr.Event) bool {
|
||||
return event.Kind == 30023
|
||||
}).
|
||||
Relay(r1)
|
||||
|
||||
router.Route().
|
||||
Req(func(filter nostr.Filter) bool {
|
||||
return slices.Contains(filter.Kinds, 1) && slices.Contains(filter.Tags["t"], "spam")
|
||||
}).
|
||||
Event(func(event *nostr.Event) bool {
|
||||
return event.Kind == 1 && event.Tags.FindWithValue("t", "spam") != nil
|
||||
}).
|
||||
Relay(r2)
|
||||
|
||||
router.Route().
|
||||
Req(func(filter nostr.Filter) bool {
|
||||
return slices.Contains(filter.Kinds, 1)
|
||||
}).
|
||||
Event(func(event *nostr.Event) bool {
|
||||
return event.Kind == 1
|
||||
}).
|
||||
Relay(r3)
|
||||
|
||||
fmt.Println("running on :3334")
|
||||
http.ListenAndServe(":3334", router)
|
||||
}
|
||||
150
khatru/expiration.go
Normal file
150
khatru/expiration.go
Normal file
@@ -0,0 +1,150 @@
|
||||
package khatru
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
"github.com/nbd-wtf/go-nostr/nip40"
|
||||
)
|
||||
|
||||
type expiringEvent struct {
|
||||
id string
|
||||
expiresAt nostr.Timestamp
|
||||
}
|
||||
|
||||
type expiringEventHeap []expiringEvent
|
||||
|
||||
func (h expiringEventHeap) Len() int { return len(h) }
|
||||
func (h expiringEventHeap) Less(i, j int) bool { return h[i].expiresAt < h[j].expiresAt }
|
||||
func (h expiringEventHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
|
||||
|
||||
func (h *expiringEventHeap) Push(x interface{}) {
|
||||
*h = append(*h, x.(expiringEvent))
|
||||
}
|
||||
|
||||
func (h *expiringEventHeap) Pop() interface{} {
|
||||
old := *h
|
||||
n := len(old)
|
||||
x := old[n-1]
|
||||
*h = old[0 : n-1]
|
||||
return x
|
||||
}
|
||||
|
||||
type expirationManager struct {
|
||||
events expiringEventHeap
|
||||
mu sync.Mutex
|
||||
relay *Relay
|
||||
interval time.Duration
|
||||
initialScanDone bool
|
||||
}
|
||||
|
||||
func newExpirationManager(relay *Relay) *expirationManager {
|
||||
return &expirationManager{
|
||||
events: make(expiringEventHeap, 0),
|
||||
relay: relay,
|
||||
interval: time.Hour,
|
||||
}
|
||||
}
|
||||
|
||||
func (em *expirationManager) start(ctx context.Context) {
|
||||
ticker := time.NewTicker(em.interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
if !em.initialScanDone {
|
||||
em.initialScan(ctx)
|
||||
em.initialScanDone = true
|
||||
}
|
||||
|
||||
em.checkExpiredEvents(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (em *expirationManager) initialScan(ctx context.Context) {
|
||||
em.mu.Lock()
|
||||
defer em.mu.Unlock()
|
||||
|
||||
// query all events
|
||||
ctx = context.WithValue(ctx, internalCallKey, struct{}{})
|
||||
for _, query := range em.relay.QueryEvents {
|
||||
ch, err := query(ctx, nostr.Filter{})
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for evt := range ch {
|
||||
if expiresAt := nip40.GetExpiration(evt.Tags); expiresAt != -1 {
|
||||
heap.Push(&em.events, expiringEvent{
|
||||
id: evt.ID,
|
||||
expiresAt: expiresAt,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
heap.Init(&em.events)
|
||||
}
|
||||
|
||||
func (em *expirationManager) checkExpiredEvents(ctx context.Context) {
|
||||
em.mu.Lock()
|
||||
defer em.mu.Unlock()
|
||||
|
||||
now := nostr.Now()
|
||||
|
||||
// keep deleting events from the heap as long as they're expired
|
||||
for em.events.Len() > 0 {
|
||||
next := em.events[0]
|
||||
if now < next.expiresAt {
|
||||
break
|
||||
}
|
||||
|
||||
heap.Pop(&em.events)
|
||||
|
||||
ctx := context.WithValue(ctx, internalCallKey, struct{}{})
|
||||
for _, query := range em.relay.QueryEvents {
|
||||
ch, err := query(ctx, nostr.Filter{IDs: []string{next.id}})
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if evt := <-ch; evt != nil {
|
||||
for _, del := range em.relay.DeleteEvent {
|
||||
del(ctx, evt)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (em *expirationManager) trackEvent(evt *nostr.Event) {
|
||||
if expiresAt := nip40.GetExpiration(evt.Tags); expiresAt != -1 {
|
||||
em.mu.Lock()
|
||||
heap.Push(&em.events, expiringEvent{
|
||||
id: evt.ID,
|
||||
expiresAt: expiresAt,
|
||||
})
|
||||
em.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (em *expirationManager) removeEvent(id string) {
|
||||
em.mu.Lock()
|
||||
defer em.mu.Unlock()
|
||||
|
||||
// Find and remove the event from the heap
|
||||
for i := 0; i < len(em.events); i++ {
|
||||
if em.events[i].id == id {
|
||||
heap.Remove(&em.events, i)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
65
khatru/get-started.go
Normal file
65
khatru/get-started.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package khatru
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/fasthttp/websocket"
|
||||
"github.com/rs/cors"
|
||||
)
|
||||
|
||||
func (rl *Relay) Router() *http.ServeMux {
|
||||
return rl.serveMux
|
||||
}
|
||||
|
||||
func (rl *Relay) SetRouter(mux *http.ServeMux) {
|
||||
rl.serveMux = mux
|
||||
}
|
||||
|
||||
// Start creates an http server and starts listening on given host and port.
|
||||
func (rl *Relay) Start(host string, port int, started ...chan bool) error {
|
||||
addr := net.JoinHostPort(host, strconv.Itoa(port))
|
||||
ln, err := net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rl.Addr = ln.Addr().String()
|
||||
rl.httpServer = &http.Server{
|
||||
Handler: cors.Default().Handler(rl),
|
||||
Addr: addr,
|
||||
WriteTimeout: 2 * time.Second,
|
||||
ReadTimeout: 2 * time.Second,
|
||||
IdleTimeout: 30 * time.Second,
|
||||
}
|
||||
|
||||
// notify caller that we're starting
|
||||
for _, started := range started {
|
||||
close(started)
|
||||
}
|
||||
|
||||
if err := rl.httpServer.Serve(ln); err == http.ErrServerClosed {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown sends a websocket close control message to all connected clients.
|
||||
func (rl *Relay) Shutdown(ctx context.Context) {
|
||||
rl.httpServer.Shutdown(ctx)
|
||||
rl.clientsMutex.Lock()
|
||||
defer rl.clientsMutex.Unlock()
|
||||
for ws := range rl.clients {
|
||||
ws.conn.WriteControl(websocket.CloseMessage, nil, time.Now().Add(time.Second))
|
||||
ws.cancel()
|
||||
ws.conn.Close()
|
||||
}
|
||||
clear(rl.clients)
|
||||
rl.listeners = rl.listeners[:0]
|
||||
}
|
||||
72
khatru/go.mod
Normal file
72
khatru/go.mod
Normal file
@@ -0,0 +1,72 @@
|
||||
module github.com/fiatjaf/khatru
|
||||
|
||||
go 1.24.1
|
||||
|
||||
require (
|
||||
github.com/bep/debounce v1.2.1
|
||||
github.com/fasthttp/websocket v1.5.12
|
||||
github.com/fiatjaf/eventstore v0.16.2
|
||||
github.com/liamg/magic v0.0.1
|
||||
github.com/mailru/easyjson v0.9.0
|
||||
github.com/nbd-wtf/go-nostr v0.51.8
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1
|
||||
github.com/rs/cors v1.11.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
)
|
||||
|
||||
require (
|
||||
fiatjaf.com/lib v0.2.0 // indirect
|
||||
github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3 // indirect
|
||||
github.com/PowerDNS/lmdb-go v1.9.3 // indirect
|
||||
github.com/andybalholm/brotli v1.1.1 // indirect
|
||||
github.com/aquasecurity/esquery v0.2.0 // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 // indirect
|
||||
github.com/bytedance/sonic v1.13.2 // indirect
|
||||
github.com/bytedance/sonic/loader v0.2.4 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/cloudwego/base64x v0.1.5 // indirect
|
||||
github.com/coder/websocket v1.8.13 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/decred/dcrd/crypto/blake256 v1.1.0 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
|
||||
github.com/dgraph-io/badger/v4 v4.5.0 // indirect
|
||||
github.com/dgraph-io/ristretto/v2 v2.1.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/elastic/elastic-transport-go/v8 v8.6.0 // indirect
|
||||
github.com/elastic/go-elasticsearch/v7 v7.17.10 // indirect
|
||||
github.com/elastic/go-elasticsearch/v8 v8.16.0 // indirect
|
||||
github.com/fatih/structs v1.1.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
||||
github.com/google/flatbuffers v24.12.23+incompatible // indirect
|
||||
github.com/jmoiron/sqlx v1.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.24 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/savsgio/gotils v0.0.0-20240704082632-aef3928b8a38 // indirect
|
||||
github.com/tidwall/gjson v1.18.0 // indirect
|
||||
github.com/tidwall/match v1.1.1 // indirect
|
||||
github.com/tidwall/pretty v1.2.1 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/valyala/fasthttp v1.59.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/otel v1.32.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.32.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.32.0 // indirect
|
||||
golang.org/x/arch v0.15.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect
|
||||
golang.org/x/net v0.37.0 // indirect
|
||||
golang.org/x/sys v0.31.0 // indirect
|
||||
google.golang.org/protobuf v1.36.2 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
254
khatru/go.sum
Normal file
254
khatru/go.sum
Normal file
@@ -0,0 +1,254 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
fiatjaf.com/lib v0.2.0 h1:TgIJESbbND6GjOgGHxF5jsO6EMjuAxIzZHPo5DXYexs=
|
||||
fiatjaf.com/lib v0.2.0/go.mod h1:Ycqq3+mJ9jAWu7XjbQI1cVr+OFgnHn79dQR5oTII47g=
|
||||
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
|
||||
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3 h1:ClzzXMDDuUbWfNNZqGeYq4PnYOlwlOVIvSyNaIy0ykg=
|
||||
github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3/go.mod h1:we0YA5CsBbH5+/NUzC/AlMmxaDtWlXeNsqrwXjTzmzA=
|
||||
github.com/PowerDNS/lmdb-go v1.9.3 h1:AUMY2pZT8WRpkEv39I9Id3MuoHd+NZbTVpNhruVkPTg=
|
||||
github.com/PowerDNS/lmdb-go v1.9.3/go.mod h1:TE0l+EZK8Z1B4dx070ZxkWTlp8RG1mjN0/+FkFRQMtU=
|
||||
github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
|
||||
github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
|
||||
github.com/aquasecurity/esquery v0.2.0 h1:9WWXve95TE8hbm3736WB7nS6Owl8UGDeu+0jiyE9ttA=
|
||||
github.com/aquasecurity/esquery v0.2.0/go.mod h1:VU+CIFR6C+H142HHZf9RUkp4Eedpo9UrEKeCQHWf9ao=
|
||||
github.com/bep/debounce v1.2.1 h1:v67fRdBA9UQu2NhLFXrSg0Brw7CexQekrBwDMM8bzeY=
|
||||
github.com/bep/debounce v1.2.1/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0=
|
||||
github.com/btcsuite/btcd v0.24.2 h1:aLmxPguqxza+4ag8R1I2nnJjSu2iFn/kqtHTIImswcY=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
github.com/bytedance/sonic v1.13.1 h1:Jyd5CIvdFnkOWuKXr+wm4Nyk2h0yAFsr8ucJgEasO3g=
|
||||
github.com/bytedance/sonic v1.13.1/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4=
|
||||
github.com/bytedance/sonic v1.13.2 h1:8/H1FempDZqC4VqjptGo14QQlJx8VdZJegxs6wwfqpQ=
|
||||
github.com/bytedance/sonic v1.13.2/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4=
|
||||
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
|
||||
github.com/bytedance/sonic/loader v0.2.4 h1:ZWCw4stuXUsn1/+zQDqeE7JKP+QO47tz7QCNan80NzY=
|
||||
github.com/bytedance/sonic/loader v0.2.4/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4=
|
||||
github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
|
||||
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo=
|
||||
github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
|
||||
github.com/coder/websocket v1.8.13 h1:f3QZdXy7uGVz+4uCJy2nTZyM0yTBj8yANEHhqlXZ9FE=
|
||||
github.com/coder/websocket v1.8.13/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
|
||||
github.com/dgraph-io/badger/v4 v4.5.0 h1:TeJE3I1pIWLBjYhIYCA1+uxrjWEoJXImFBMEBVSm16g=
|
||||
github.com/dgraph-io/badger/v4 v4.5.0/go.mod h1:ysgYmIeG8dS/E8kwxT7xHyc7MkmwNYLRoYnFbr7387A=
|
||||
github.com/dgraph-io/ristretto/v2 v2.1.0 h1:59LjpOJLNDULHh8MC4UaegN52lC4JnO2dITsie/Pa8I=
|
||||
github.com/dgraph-io/ristretto/v2 v2.1.0/go.mod h1:uejeqfYXpUomfse0+lO+13ATz4TypQYLJZzBSAemuB4=
|
||||
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y=
|
||||
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
|
||||
github.com/elastic/elastic-transport-go/v8 v8.6.0 h1:Y2S/FBjx1LlCv5m6pWAF2kDJAHoSjSRSJCApolgfthA=
|
||||
github.com/elastic/elastic-transport-go/v8 v8.6.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk=
|
||||
github.com/elastic/go-elasticsearch/v7 v7.6.0/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4=
|
||||
github.com/elastic/go-elasticsearch/v7 v7.17.10 h1:TCQ8i4PmIJuBunvBS6bwT2ybzVFxxUhhltAs3Gyu1yo=
|
||||
github.com/elastic/go-elasticsearch/v7 v7.17.10/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4=
|
||||
github.com/elastic/go-elasticsearch/v8 v8.16.0 h1:f7bR+iBz8GTAVhwyFO3hm4ixsz2eMaEy0QroYnXV3jE=
|
||||
github.com/elastic/go-elasticsearch/v8 v8.16.0/go.mod h1:lGMlgKIbYoRvay3xWBeKahAiJOgmFDsjZC39nmO3H64=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/fasthttp/websocket v1.5.12 h1:e4RGPpWW2HTbL3zV0Y/t7g0ub294LkiuXXUuTOUInlE=
|
||||
github.com/fasthttp/websocket v1.5.12/go.mod h1:I+liyL7/4moHojiOgUOIKEWm9EIxHqxZChS+aMFltyg=
|
||||
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
|
||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||
github.com/fiatjaf/eventstore v0.16.2 h1:h4rHwSwPcqAKqWUsAbYWUhDeSgm2Kp+PBkJc3FgBYu4=
|
||||
github.com/fiatjaf/eventstore v0.16.2/go.mod h1:0gU8fzYO/bG+NQAVlHtJWOlt3JKKFefh5Xjj2d1dLIs=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
|
||||
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/google/flatbuffers v24.12.23+incompatible h1:ubBKR94NR4pXUCY/MUsRVzd9umNW7ht7EG9hHfS9FX8=
|
||||
github.com/google/flatbuffers v24.12.23+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/jgroeneveld/schema v1.0.0 h1:J0E10CrOkiSEsw6dfb1IfrDJD14pf6QLVJ3tRPl/syI=
|
||||
github.com/jgroeneveld/schema v1.0.0/go.mod h1:M14lv7sNMtGvo3ops1MwslaSYgDYxrSmbzWIQ0Mr5rs=
|
||||
github.com/jgroeneveld/trial v2.0.0+incompatible h1:d59ctdgor+VqdZCAiUfVN8K13s0ALDioG5DWwZNtRuQ=
|
||||
github.com/jgroeneveld/trial v2.0.0+incompatible/go.mod h1:I6INLW96EN8WysNBXUFI3M4RIC8ePg9ntAc/Wy+U/+M=
|
||||
github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o=
|
||||
github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
|
||||
github.com/liamg/magic v0.0.1 h1:Ru22ElY+sCh6RvRTWjQzKKCxsEco8hE0co8n1qe7TBM=
|
||||
github.com/liamg/magic v0.0.1/go.mod h1:yQkOmZZI52EA+SQ2xyHpVw8fNvTBruF873Y+Vt6S+fk=
|
||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
|
||||
github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
|
||||
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM=
|
||||
github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/nbd-wtf/go-nostr v0.51.7 h1:dGjtaaFQ1kA3H+vF8wt9a9WYl54K8C0JmVDf4cp+a4A=
|
||||
github.com/nbd-wtf/go-nostr v0.51.7/go.mod h1:d6+DfvMWYG5pA3dmNMBJd6WCHVDDhkXbHqvfljf0Gzg=
|
||||
github.com/nbd-wtf/go-nostr v0.51.8 h1:CIoS+YqChcm4e1L1rfMZ3/mIwTz4CwApM2qx7MHNzmE=
|
||||
github.com/nbd-wtf/go-nostr v0.51.8/go.mod h1:d6+DfvMWYG5pA3dmNMBJd6WCHVDDhkXbHqvfljf0Gzg=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
|
||||
github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
|
||||
github.com/savsgio/gotils v0.0.0-20240704082632-aef3928b8a38 h1:D0vL7YNisV2yqE55+q0lFuGse6U8lxlg7fYTctlT5Gc=
|
||||
github.com/savsgio/gotils v0.0.0-20240704082632-aef3928b8a38/go.mod h1:sM7Mt7uEoCeFSCBM+qBrqvEo+/9vdmj19wzp3yzUhmg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
|
||||
github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
|
||||
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
||||
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||
github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
|
||||
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.59.0 h1:Qu0qYHfXvPk1mSLNqcFtEk6DpxgA26hy6bmydotDpRI=
|
||||
github.com/valyala/fasthttp v1.59.0/go.mod h1:GTxNb9Bc6r2a9D0TWNSPwDz78UxnTGBViY3xZNEqyYU=
|
||||
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
|
||||
github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U=
|
||||
go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg=
|
||||
go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M=
|
||||
go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8=
|
||||
go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
|
||||
go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
|
||||
go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM=
|
||||
go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8=
|
||||
golang.org/x/arch v0.15.0 h1:QtOrQd0bTUnhNVNndMpLHNWrDmYzZ2KDqSrEymqInZw=
|
||||
golang.org/x/arch v0.15.0/go.mod h1:JmwW7aLIoRUKgaTzhkiEFxvcEiQGyOg9BMonBJUS7EE=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw=
|
||||
golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
|
||||
golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.36.2 h1:R8FeyR1/eLmkutZOM5CWghmo5itiG9z0ktFlTVLuTmU=
|
||||
google.golang.org/protobuf v1.36.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
|
||||
430
khatru/handlers.go
Normal file
430
khatru/handlers.go
Normal file
@@ -0,0 +1,430 @@
|
||||
package khatru
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/bep/debounce"
|
||||
"github.com/fasthttp/websocket"
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
"github.com/nbd-wtf/go-nostr/nip42"
|
||||
"github.com/nbd-wtf/go-nostr/nip45"
|
||||
"github.com/nbd-wtf/go-nostr/nip45/hyperloglog"
|
||||
"github.com/nbd-wtf/go-nostr/nip70"
|
||||
"github.com/nbd-wtf/go-nostr/nip77"
|
||||
"github.com/nbd-wtf/go-nostr/nip77/negentropy"
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
"github.com/rs/cors"
|
||||
)
|
||||
|
||||
// ServeHTTP implements http.Handler interface.
|
||||
func (rl *Relay) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
corsMiddleware := cors.New(cors.Options{
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{
|
||||
http.MethodHead,
|
||||
http.MethodGet,
|
||||
http.MethodPost,
|
||||
http.MethodPut,
|
||||
http.MethodPatch,
|
||||
http.MethodDelete,
|
||||
},
|
||||
AllowedHeaders: []string{"Authorization", "*"},
|
||||
MaxAge: 86400,
|
||||
})
|
||||
|
||||
if r.Header.Get("Upgrade") == "websocket" {
|
||||
rl.HandleWebsocket(w, r)
|
||||
} else if r.Header.Get("Accept") == "application/nostr+json" {
|
||||
corsMiddleware.Handler(http.HandlerFunc(rl.HandleNIP11)).ServeHTTP(w, r)
|
||||
} else if r.Header.Get("Content-Type") == "application/nostr+json+rpc" {
|
||||
corsMiddleware.Handler(http.HandlerFunc(rl.HandleNIP86)).ServeHTTP(w, r)
|
||||
} else {
|
||||
corsMiddleware.Handler(rl.serveMux).ServeHTTP(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
func (rl *Relay) HandleWebsocket(w http.ResponseWriter, r *http.Request) {
|
||||
for _, reject := range rl.RejectConnection {
|
||||
if reject(r) {
|
||||
w.WriteHeader(429) // Too many requests
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
conn, err := rl.upgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
rl.Log.Printf("failed to upgrade websocket: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(rl.PingPeriod)
|
||||
|
||||
// NIP-42 challenge
|
||||
challenge := make([]byte, 8)
|
||||
rand.Read(challenge)
|
||||
|
||||
ws := &WebSocket{
|
||||
conn: conn,
|
||||
Request: r,
|
||||
Challenge: hex.EncodeToString(challenge),
|
||||
negentropySessions: xsync.NewMapOf[string, *NegentropySession](),
|
||||
}
|
||||
ws.Context, ws.cancel = context.WithCancel(context.Background())
|
||||
|
||||
rl.clientsMutex.Lock()
|
||||
rl.clients[ws] = make([]listenerSpec, 0, 2)
|
||||
rl.clientsMutex.Unlock()
|
||||
|
||||
ctx, cancel := context.WithCancel(
|
||||
context.WithValue(
|
||||
context.Background(),
|
||||
wsKey, ws,
|
||||
),
|
||||
)
|
||||
|
||||
kill := func() {
|
||||
for _, ondisconnect := range rl.OnDisconnect {
|
||||
ondisconnect(ctx)
|
||||
}
|
||||
|
||||
ticker.Stop()
|
||||
cancel()
|
||||
ws.cancel()
|
||||
ws.conn.Close()
|
||||
|
||||
rl.removeClientAndListeners(ws)
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer kill()
|
||||
|
||||
ws.conn.SetReadLimit(rl.MaxMessageSize)
|
||||
ws.conn.SetReadDeadline(time.Now().Add(rl.PongWait))
|
||||
ws.conn.SetPongHandler(func(string) error {
|
||||
ws.conn.SetReadDeadline(time.Now().Add(rl.PongWait))
|
||||
return nil
|
||||
})
|
||||
|
||||
for _, onconnect := range rl.OnConnect {
|
||||
onconnect(ctx)
|
||||
}
|
||||
|
||||
smp := nostr.NewMessageParser()
|
||||
|
||||
for {
|
||||
typ, msgb, err := ws.conn.ReadMessage()
|
||||
if err != nil {
|
||||
if websocket.IsUnexpectedCloseError(
|
||||
err,
|
||||
websocket.CloseNormalClosure, // 1000
|
||||
websocket.CloseGoingAway, // 1001
|
||||
websocket.CloseNoStatusReceived, // 1005
|
||||
websocket.CloseAbnormalClosure, // 1006
|
||||
4537, // some client seems to send many of these
|
||||
) {
|
||||
rl.Log.Printf("unexpected close error from %s: %v\n", GetIPFromRequest(r), err)
|
||||
}
|
||||
ws.cancel()
|
||||
return
|
||||
}
|
||||
|
||||
if typ == websocket.PingMessage {
|
||||
ws.WriteMessage(websocket.PongMessage, nil)
|
||||
continue
|
||||
}
|
||||
|
||||
// this is safe because ReadMessage() will always create a new slice
|
||||
message := unsafe.String(unsafe.SliceData(msgb), len(msgb))
|
||||
|
||||
// parse messages sequentially otherwise sonic breaks
|
||||
envelope, err := smp.ParseMessage(message)
|
||||
|
||||
// then delegate to the goroutine
|
||||
go func(message string) {
|
||||
if err != nil {
|
||||
if err == nostr.UnknownLabel && rl.Negentropy {
|
||||
envelope = nip77.ParseNegMessage(message)
|
||||
}
|
||||
if envelope == nil {
|
||||
ws.WriteJSON(nostr.NoticeEnvelope("failed to parse envelope: " + err.Error()))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
switch env := envelope.(type) {
|
||||
case *nostr.EventEnvelope:
|
||||
// check id
|
||||
if !env.Event.CheckID() {
|
||||
ws.WriteJSON(nostr.OKEnvelope{EventID: env.Event.ID, OK: false, Reason: "invalid: id is computed incorrectly"})
|
||||
return
|
||||
}
|
||||
|
||||
// check signature
|
||||
if ok, err := env.Event.CheckSignature(); err != nil {
|
||||
ws.WriteJSON(nostr.OKEnvelope{EventID: env.Event.ID, OK: false, Reason: "error: failed to verify signature"})
|
||||
return
|
||||
} else if !ok {
|
||||
ws.WriteJSON(nostr.OKEnvelope{EventID: env.Event.ID, OK: false, Reason: "invalid: signature is invalid"})
|
||||
return
|
||||
}
|
||||
|
||||
// check NIP-70 protected
|
||||
if nip70.IsProtected(env.Event) {
|
||||
authed := GetAuthed(ctx)
|
||||
if authed == "" {
|
||||
RequestAuth(ctx)
|
||||
ws.WriteJSON(nostr.OKEnvelope{
|
||||
EventID: env.Event.ID,
|
||||
OK: false,
|
||||
Reason: "auth-required: must be published by authenticated event author",
|
||||
})
|
||||
return
|
||||
} else if authed != env.Event.PubKey {
|
||||
ws.WriteJSON(nostr.OKEnvelope{
|
||||
EventID: env.Event.ID,
|
||||
OK: false,
|
||||
Reason: "blocked: must be published by event author",
|
||||
})
|
||||
return
|
||||
}
|
||||
} else if nip70.HasEmbeddedProtected(env.Event) {
|
||||
ws.WriteJSON(nostr.OKEnvelope{
|
||||
EventID: env.Event.ID,
|
||||
OK: false,
|
||||
Reason: "blocked: can't repost nip70 protected",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
srl := rl
|
||||
if rl.getSubRelayFromEvent != nil {
|
||||
srl = rl.getSubRelayFromEvent(&env.Event)
|
||||
}
|
||||
|
||||
var ok bool
|
||||
var writeErr error
|
||||
var skipBroadcast bool
|
||||
|
||||
if env.Event.Kind == 5 {
|
||||
// this always returns "blocked: " whenever it returns an error
|
||||
writeErr = srl.handleDeleteRequest(ctx, &env.Event)
|
||||
} else if nostr.IsEphemeralKind(env.Event.Kind) {
|
||||
// this will also always return a prefixed reason
|
||||
writeErr = srl.handleEphemeral(ctx, &env.Event)
|
||||
} else {
|
||||
// this will also always return a prefixed reason
|
||||
skipBroadcast, writeErr = srl.handleNormal(ctx, &env.Event)
|
||||
}
|
||||
|
||||
var reason string
|
||||
if writeErr == nil {
|
||||
ok = true
|
||||
for _, ovw := range srl.OverwriteResponseEvent {
|
||||
ovw(ctx, &env.Event)
|
||||
}
|
||||
if !skipBroadcast {
|
||||
n := srl.notifyListeners(&env.Event)
|
||||
|
||||
// the number of notified listeners matters in ephemeral events
|
||||
if nostr.IsEphemeralKind(env.Event.Kind) {
|
||||
if n == 0 {
|
||||
ok = false
|
||||
reason = "mute: no one was listening for this"
|
||||
} else {
|
||||
reason = "broadcasted to " + strconv.Itoa(n) + " listeners"
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ok = false
|
||||
reason = writeErr.Error()
|
||||
if strings.HasPrefix(reason, "auth-required:") {
|
||||
RequestAuth(ctx)
|
||||
}
|
||||
}
|
||||
ws.WriteJSON(nostr.OKEnvelope{EventID: env.Event.ID, OK: ok, Reason: reason})
|
||||
case *nostr.CountEnvelope:
|
||||
if rl.CountEvents == nil && rl.CountEventsHLL == nil {
|
||||
ws.WriteJSON(nostr.ClosedEnvelope{SubscriptionID: env.SubscriptionID, Reason: "unsupported: this relay does not support NIP-45"})
|
||||
return
|
||||
}
|
||||
|
||||
var total int64
|
||||
var hll *hyperloglog.HyperLogLog
|
||||
|
||||
srl := rl
|
||||
if rl.getSubRelayFromFilter != nil {
|
||||
srl = rl.getSubRelayFromFilter(env.Filter)
|
||||
}
|
||||
|
||||
if offset := nip45.HyperLogLogEventPubkeyOffsetForFilter(env.Filter); offset != -1 {
|
||||
total, hll = srl.handleCountRequestWithHLL(ctx, ws, env.Filter, offset)
|
||||
} else {
|
||||
total = srl.handleCountRequest(ctx, ws, env.Filter)
|
||||
}
|
||||
|
||||
resp := nostr.CountEnvelope{
|
||||
SubscriptionID: env.SubscriptionID,
|
||||
Count: &total,
|
||||
}
|
||||
if hll != nil {
|
||||
resp.HyperLogLog = hll.GetRegisters()
|
||||
}
|
||||
|
||||
ws.WriteJSON(resp)
|
||||
|
||||
case *nostr.ReqEnvelope:
|
||||
eose := sync.WaitGroup{}
|
||||
eose.Add(len(env.Filters))
|
||||
|
||||
// a context just for the "stored events" request handler
|
||||
reqCtx, cancelReqCtx := context.WithCancelCause(ctx)
|
||||
|
||||
// expose subscription id in the context
|
||||
reqCtx = context.WithValue(reqCtx, subscriptionIdKey, env.SubscriptionID)
|
||||
|
||||
// handle each filter separately -- dispatching events as they're loaded from databases
|
||||
for _, filter := range env.Filters {
|
||||
srl := rl
|
||||
if rl.getSubRelayFromFilter != nil {
|
||||
srl = rl.getSubRelayFromFilter(filter)
|
||||
}
|
||||
err := srl.handleRequest(reqCtx, env.SubscriptionID, &eose, ws, filter)
|
||||
if err != nil {
|
||||
// fail everything if any filter is rejected
|
||||
reason := err.Error()
|
||||
if strings.HasPrefix(reason, "auth-required:") {
|
||||
RequestAuth(ctx)
|
||||
}
|
||||
ws.WriteJSON(nostr.ClosedEnvelope{SubscriptionID: env.SubscriptionID, Reason: reason})
|
||||
cancelReqCtx(errors.New("filter rejected"))
|
||||
return
|
||||
} else {
|
||||
rl.addListener(ws, env.SubscriptionID, srl, filter, cancelReqCtx)
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
// when all events have been loaded from databases and dispatched we can fire the EOSE message
|
||||
eose.Wait()
|
||||
ws.WriteJSON(nostr.EOSEEnvelope(env.SubscriptionID))
|
||||
}()
|
||||
case *nostr.CloseEnvelope:
|
||||
id := string(*env)
|
||||
rl.removeListenerId(ws, id)
|
||||
case *nostr.AuthEnvelope:
|
||||
wsBaseUrl := strings.Replace(rl.getBaseURL(r), "http", "ws", 1)
|
||||
if pubkey, ok := nip42.ValidateAuthEvent(&env.Event, ws.Challenge, wsBaseUrl); ok {
|
||||
ws.AuthedPublicKey = pubkey
|
||||
ws.authLock.Lock()
|
||||
if ws.Authed != nil {
|
||||
close(ws.Authed)
|
||||
ws.Authed = nil
|
||||
}
|
||||
ws.authLock.Unlock()
|
||||
ws.WriteJSON(nostr.OKEnvelope{EventID: env.Event.ID, OK: true})
|
||||
} else {
|
||||
ws.WriteJSON(nostr.OKEnvelope{EventID: env.Event.ID, OK: false, Reason: "error: failed to authenticate"})
|
||||
}
|
||||
case *nip77.OpenEnvelope:
|
||||
srl := rl
|
||||
if rl.getSubRelayFromFilter != nil {
|
||||
srl = rl.getSubRelayFromFilter(env.Filter)
|
||||
if !srl.Negentropy {
|
||||
// ignore
|
||||
return
|
||||
}
|
||||
}
|
||||
vec, err := srl.startNegentropySession(ctx, env.Filter)
|
||||
if err != nil {
|
||||
// fail everything if any filter is rejected
|
||||
reason := err.Error()
|
||||
if strings.HasPrefix(reason, "auth-required:") {
|
||||
RequestAuth(ctx)
|
||||
}
|
||||
ws.WriteJSON(nip77.ErrorEnvelope{SubscriptionID: env.SubscriptionID, Reason: reason})
|
||||
return
|
||||
}
|
||||
|
||||
// reconcile to get the next message and return it
|
||||
neg := negentropy.New(vec, 1024*1024)
|
||||
out, err := neg.Reconcile(env.Message)
|
||||
if err != nil {
|
||||
ws.WriteJSON(nip77.ErrorEnvelope{SubscriptionID: env.SubscriptionID, Reason: err.Error()})
|
||||
return
|
||||
}
|
||||
ws.WriteJSON(nip77.MessageEnvelope{SubscriptionID: env.SubscriptionID, Message: out})
|
||||
|
||||
// if the message is not empty that means we'll probably have more reconciliation sessions, so store this
|
||||
if out != "" {
|
||||
deb := debounce.New(time.Second * 7)
|
||||
negSession := &NegentropySession{
|
||||
neg: neg,
|
||||
postponeClose: func() {
|
||||
deb(func() {
|
||||
ws.negentropySessions.Delete(env.SubscriptionID)
|
||||
})
|
||||
},
|
||||
}
|
||||
negSession.postponeClose()
|
||||
|
||||
ws.negentropySessions.Store(env.SubscriptionID, negSession)
|
||||
}
|
||||
case *nip77.MessageEnvelope:
|
||||
negSession, ok := ws.negentropySessions.Load(env.SubscriptionID)
|
||||
if !ok {
|
||||
// bad luck, your request was destroyed
|
||||
ws.WriteJSON(nip77.ErrorEnvelope{SubscriptionID: env.SubscriptionID, Reason: "CLOSED"})
|
||||
return
|
||||
}
|
||||
// reconcile to get the next message and return it
|
||||
out, err := negSession.neg.Reconcile(env.Message)
|
||||
if err != nil {
|
||||
ws.WriteJSON(nip77.ErrorEnvelope{SubscriptionID: env.SubscriptionID, Reason: err.Error()})
|
||||
ws.negentropySessions.Delete(env.SubscriptionID)
|
||||
return
|
||||
}
|
||||
ws.WriteJSON(nip77.MessageEnvelope{SubscriptionID: env.SubscriptionID, Message: out})
|
||||
|
||||
// if there is more reconciliation to do, postpone this
|
||||
if out != "" {
|
||||
negSession.postponeClose()
|
||||
} else {
|
||||
// otherwise we can just close it
|
||||
ws.negentropySessions.Delete(env.SubscriptionID)
|
||||
}
|
||||
case *nip77.CloseEnvelope:
|
||||
ws.negentropySessions.Delete(env.SubscriptionID)
|
||||
}
|
||||
}(message)
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer kill()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
err := ws.WriteMessage(websocket.PingMessage, nil)
|
||||
if err != nil {
|
||||
if !strings.HasSuffix(err.Error(), "use of closed network connection") {
|
||||
rl.Log.Printf("error writing ping: %v; closing websocket\n", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
54
khatru/helpers.go
Normal file
54
khatru/helpers.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package khatru
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
)
|
||||
|
||||
func isOlder(previous, next *nostr.Event) bool {
|
||||
return previous.CreatedAt < next.CreatedAt ||
|
||||
(previous.CreatedAt == next.CreatedAt && previous.ID > next.ID)
|
||||
}
|
||||
|
||||
var privateMasks = func() []net.IPNet {
|
||||
privateCIDRs := []string{
|
||||
"127.0.0.0/8",
|
||||
"10.0.0.0/8",
|
||||
"172.16.0.0/12",
|
||||
"192.168.0.0/16",
|
||||
"fc00::/7",
|
||||
}
|
||||
masks := make([]net.IPNet, len(privateCIDRs))
|
||||
for i, cidr := range privateCIDRs {
|
||||
_, netw, err := net.ParseCIDR(cidr)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
masks[i] = *netw
|
||||
}
|
||||
return masks
|
||||
}()
|
||||
|
||||
func isPrivate(ip net.IP) bool {
|
||||
for _, mask := range privateMasks {
|
||||
if mask.Contains(ip) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func GetIPFromRequest(r *http.Request) string {
|
||||
if xffh := r.Header.Get("X-Forwarded-For"); xffh != "" {
|
||||
for _, v := range strings.Split(xffh, ",") {
|
||||
if ip := net.ParseIP(strings.TrimSpace(v)); ip != nil && ip.IsGlobalUnicast() && !isPrivate(ip) {
|
||||
return ip.String()
|
||||
}
|
||||
}
|
||||
}
|
||||
ip, _, _ := net.SplitHostPort(r.RemoteAddr)
|
||||
return ip
|
||||
}
|
||||
151
khatru/listener.go
Normal file
151
khatru/listener.go
Normal file
@@ -0,0 +1,151 @@
|
||||
package khatru
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"slices"
|
||||
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
)
|
||||
|
||||
var ErrSubscriptionClosedByClient = errors.New("subscription closed by client")
|
||||
|
||||
type listenerSpec struct {
|
||||
id string // kept here so we can easily match against it removeListenerId
|
||||
cancel context.CancelCauseFunc
|
||||
index int
|
||||
subrelay *Relay // this is important when we're dealing with routing, otherwise it will be always the same
|
||||
}
|
||||
|
||||
type listener struct {
|
||||
id string // duplicated here so we can easily send it on notifyListeners
|
||||
filter nostr.Filter
|
||||
ws *WebSocket
|
||||
}
|
||||
|
||||
func (rl *Relay) GetListeningFilters() []nostr.Filter {
|
||||
respfilters := make([]nostr.Filter, len(rl.listeners))
|
||||
for i, l := range rl.listeners {
|
||||
respfilters[i] = l.filter
|
||||
}
|
||||
return respfilters
|
||||
}
|
||||
|
||||
// addListener may be called multiple times for each id and ws -- in which case each filter will
|
||||
// be added as an independent listener
|
||||
func (rl *Relay) addListener(
|
||||
ws *WebSocket,
|
||||
id string,
|
||||
subrelay *Relay,
|
||||
filter nostr.Filter,
|
||||
cancel context.CancelCauseFunc,
|
||||
) {
|
||||
rl.clientsMutex.Lock()
|
||||
defer rl.clientsMutex.Unlock()
|
||||
|
||||
if specs, ok := rl.clients[ws]; ok /* this will always be true unless client has disconnected very rapidly */ {
|
||||
idx := len(subrelay.listeners)
|
||||
rl.clients[ws] = append(specs, listenerSpec{
|
||||
id: id,
|
||||
cancel: cancel,
|
||||
subrelay: subrelay,
|
||||
index: idx,
|
||||
})
|
||||
subrelay.listeners = append(subrelay.listeners, listener{
|
||||
ws: ws,
|
||||
id: id,
|
||||
filter: filter,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// remove a specific subscription id from listeners for a given ws client
|
||||
// and cancel its specific context
|
||||
func (rl *Relay) removeListenerId(ws *WebSocket, id string) {
|
||||
rl.clientsMutex.Lock()
|
||||
defer rl.clientsMutex.Unlock()
|
||||
|
||||
if specs, ok := rl.clients[ws]; ok {
|
||||
// swap delete specs that match this id
|
||||
for s := len(specs) - 1; s >= 0; s-- {
|
||||
spec := specs[s]
|
||||
if spec.id == id {
|
||||
spec.cancel(ErrSubscriptionClosedByClient)
|
||||
specs[s] = specs[len(specs)-1]
|
||||
specs = specs[0 : len(specs)-1]
|
||||
rl.clients[ws] = specs
|
||||
|
||||
// swap delete listeners one at a time, as they may be each in a different subrelay
|
||||
srl := spec.subrelay // == rl in normal cases, but different when this came from a route
|
||||
|
||||
if spec.index != len(srl.listeners)-1 {
|
||||
movedFromIndex := len(srl.listeners) - 1
|
||||
moved := srl.listeners[movedFromIndex] // this wasn't removed, but will be moved
|
||||
srl.listeners[spec.index] = moved
|
||||
|
||||
// now we must update the the listener we just moved
|
||||
// so its .index reflects its new position on srl.listeners
|
||||
movedSpecs := rl.clients[moved.ws]
|
||||
idx := slices.IndexFunc(movedSpecs, func(ls listenerSpec) bool {
|
||||
return ls.index == movedFromIndex && ls.subrelay == srl
|
||||
})
|
||||
movedSpecs[idx].index = spec.index
|
||||
rl.clients[moved.ws] = movedSpecs
|
||||
}
|
||||
srl.listeners = srl.listeners[0 : len(srl.listeners)-1] // finally reduce the slice length
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rl *Relay) removeClientAndListeners(ws *WebSocket) {
|
||||
rl.clientsMutex.Lock()
|
||||
defer rl.clientsMutex.Unlock()
|
||||
if specs, ok := rl.clients[ws]; ok {
|
||||
// swap delete listeners and delete client (all specs will be deleted)
|
||||
for s, spec := range specs {
|
||||
// no need to cancel contexts since they inherit from the main connection context
|
||||
// just delete the listeners (swap-delete)
|
||||
srl := spec.subrelay
|
||||
|
||||
if spec.index != len(srl.listeners)-1 {
|
||||
movedFromIndex := len(srl.listeners) - 1
|
||||
moved := srl.listeners[movedFromIndex] // this wasn't removed, but will be moved
|
||||
srl.listeners[spec.index] = moved
|
||||
|
||||
// temporarily update the spec of the listener being removed to have index == -1
|
||||
// (since it was removed) so it doesn't match in the search below
|
||||
rl.clients[ws][s].index = -1
|
||||
|
||||
// now we must update the the listener we just moved
|
||||
// so its .index reflects its new position on srl.listeners
|
||||
movedSpecs := rl.clients[moved.ws]
|
||||
idx := slices.IndexFunc(movedSpecs, func(ls listenerSpec) bool {
|
||||
return ls.index == movedFromIndex && ls.subrelay == srl
|
||||
})
|
||||
movedSpecs[idx].index = spec.index
|
||||
rl.clients[moved.ws] = movedSpecs
|
||||
}
|
||||
srl.listeners = srl.listeners[0 : len(srl.listeners)-1] // finally reduce the slice length
|
||||
}
|
||||
}
|
||||
delete(rl.clients, ws)
|
||||
}
|
||||
|
||||
// returns how many listeners were notified
|
||||
func (rl *Relay) notifyListeners(event *nostr.Event) int {
|
||||
count := 0
|
||||
listenersloop:
|
||||
for _, listener := range rl.listeners {
|
||||
if listener.filter.Matches(event) {
|
||||
for _, pb := range rl.PreventBroadcast {
|
||||
if pb(listener.ws, event) {
|
||||
continue listenersloop
|
||||
}
|
||||
}
|
||||
listener.ws.WriteJSON(nostr.EventEnvelope{SubscriptionID: &listener.id, Event: *event})
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
188
khatru/listener_fuzz_test.go
Normal file
188
khatru/listener_fuzz_test.go
Normal file
@@ -0,0 +1,188 @@
|
||||
package khatru
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func FuzzRandomListenerClientRemoving(f *testing.F) {
|
||||
f.Add(uint(20), uint(20), uint(1))
|
||||
f.Fuzz(func(t *testing.T, utw uint, ubs uint, ualf uint) {
|
||||
totalWebsockets := int(utw)
|
||||
baseSubs := int(ubs)
|
||||
addListenerFreq := int(ualf) + 1
|
||||
|
||||
rl := NewRelay()
|
||||
|
||||
f := nostr.Filter{Kinds: []int{1}}
|
||||
cancel := func(cause error) {}
|
||||
|
||||
websockets := make([]*WebSocket, 0, totalWebsockets*baseSubs)
|
||||
|
||||
l := 0
|
||||
|
||||
for i := 0; i < totalWebsockets; i++ {
|
||||
ws := &WebSocket{}
|
||||
websockets = append(websockets, ws)
|
||||
rl.clients[ws] = nil
|
||||
}
|
||||
|
||||
s := 0
|
||||
for j := 0; j < baseSubs; j++ {
|
||||
for i := 0; i < totalWebsockets; i++ {
|
||||
ws := websockets[i]
|
||||
w := idFromSeqUpper(i)
|
||||
|
||||
if s%addListenerFreq == 0 {
|
||||
l++
|
||||
rl.addListener(ws, w+":"+idFromSeqLower(j), rl, f, cancel)
|
||||
}
|
||||
|
||||
s++
|
||||
}
|
||||
}
|
||||
|
||||
require.Len(t, rl.clients, totalWebsockets)
|
||||
require.Len(t, rl.listeners, l)
|
||||
|
||||
for ws := range rl.clients {
|
||||
rl.removeClientAndListeners(ws)
|
||||
}
|
||||
|
||||
require.Len(t, rl.clients, 0)
|
||||
require.Len(t, rl.listeners, 0)
|
||||
})
|
||||
}
|
||||
|
||||
func FuzzRandomListenerIdRemoving(f *testing.F) {
|
||||
f.Add(uint(20), uint(20), uint(1), uint(4))
|
||||
f.Fuzz(func(t *testing.T, utw uint, ubs uint, ualf uint, ualef uint) {
|
||||
totalWebsockets := int(utw)
|
||||
baseSubs := int(ubs)
|
||||
addListenerFreq := int(ualf) + 1
|
||||
addExtraListenerFreq := int(ualef) + 1
|
||||
|
||||
if totalWebsockets > 1024 || baseSubs > 1024 {
|
||||
return
|
||||
}
|
||||
|
||||
rl := NewRelay()
|
||||
|
||||
f := nostr.Filter{Kinds: []int{1}}
|
||||
cancel := func(cause error) {}
|
||||
websockets := make([]*WebSocket, 0, totalWebsockets)
|
||||
|
||||
type wsid struct {
|
||||
ws *WebSocket
|
||||
id string
|
||||
}
|
||||
|
||||
subs := make([]wsid, 0, totalWebsockets*baseSubs)
|
||||
extra := 0
|
||||
|
||||
for i := 0; i < totalWebsockets; i++ {
|
||||
ws := &WebSocket{}
|
||||
websockets = append(websockets, ws)
|
||||
rl.clients[ws] = nil
|
||||
}
|
||||
|
||||
s := 0
|
||||
for j := 0; j < baseSubs; j++ {
|
||||
for i := 0; i < totalWebsockets; i++ {
|
||||
ws := websockets[i]
|
||||
w := idFromSeqUpper(i)
|
||||
|
||||
if s%addListenerFreq == 0 {
|
||||
id := w + ":" + idFromSeqLower(j)
|
||||
rl.addListener(ws, id, rl, f, cancel)
|
||||
subs = append(subs, wsid{ws, id})
|
||||
|
||||
if s%addExtraListenerFreq == 0 {
|
||||
rl.addListener(ws, id, rl, f, cancel)
|
||||
extra++
|
||||
}
|
||||
}
|
||||
|
||||
s++
|
||||
}
|
||||
}
|
||||
|
||||
require.Len(t, rl.clients, totalWebsockets)
|
||||
require.Len(t, rl.listeners, len(subs)+extra)
|
||||
|
||||
rand.Shuffle(len(subs), func(i, j int) {
|
||||
subs[i], subs[j] = subs[j], subs[i]
|
||||
})
|
||||
for _, wsidToRemove := range subs {
|
||||
rl.removeListenerId(wsidToRemove.ws, wsidToRemove.id)
|
||||
}
|
||||
|
||||
require.Len(t, rl.listeners, 0)
|
||||
require.Len(t, rl.clients, totalWebsockets)
|
||||
for _, specs := range rl.clients {
|
||||
require.Len(t, specs, 0)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func FuzzRouterListenersPabloCrash(f *testing.F) {
|
||||
f.Add(uint(3), uint(6), uint(2), uint(20))
|
||||
f.Fuzz(func(t *testing.T, totalRelays uint, totalConns uint, subFreq uint, subIterations uint) {
|
||||
totalRelays++
|
||||
totalConns++
|
||||
subFreq++
|
||||
subIterations++
|
||||
|
||||
rl := NewRelay()
|
||||
|
||||
relays := make([]*Relay, int(totalRelays))
|
||||
for i := 0; i < int(totalRelays); i++ {
|
||||
relays[i] = NewRelay()
|
||||
}
|
||||
|
||||
conns := make([]*WebSocket, int(totalConns))
|
||||
for i := 0; i < int(totalConns); i++ {
|
||||
ws := &WebSocket{}
|
||||
conns[i] = ws
|
||||
rl.clients[ws] = make([]listenerSpec, 0, subIterations)
|
||||
}
|
||||
|
||||
f := nostr.Filter{Kinds: []int{1}}
|
||||
cancel := func(cause error) {}
|
||||
|
||||
type wsid struct {
|
||||
ws *WebSocket
|
||||
id string
|
||||
}
|
||||
|
||||
s := 0
|
||||
subs := make([]wsid, 0, subIterations*totalConns*totalRelays)
|
||||
for i, conn := range conns {
|
||||
w := idFromSeqUpper(i)
|
||||
for j := 0; j < int(subIterations); j++ {
|
||||
id := w + ":" + idFromSeqLower(j)
|
||||
for _, rlt := range relays {
|
||||
if s%int(subFreq) == 0 {
|
||||
rl.addListener(conn, id, rlt, f, cancel)
|
||||
subs = append(subs, wsid{conn, id})
|
||||
}
|
||||
s++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, wsid := range subs {
|
||||
rl.removeListenerId(wsid.ws, wsid.id)
|
||||
}
|
||||
|
||||
for _, wsid := range subs {
|
||||
require.Len(t, rl.clients[wsid.ws], 0)
|
||||
}
|
||||
for _, rlt := range relays {
|
||||
require.Len(t, rlt.listeners, 0)
|
||||
}
|
||||
})
|
||||
}
|
||||
545
khatru/listener_test.go
Normal file
545
khatru/listener_test.go
Normal file
@@ -0,0 +1,545 @@
|
||||
package khatru
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func idFromSeqUpper(seq int) string { return idFromSeq(seq, 65, 90) }
|
||||
func idFromSeqLower(seq int) string { return idFromSeq(seq, 97, 122) }
|
||||
func idFromSeq(seq int, min, max int) string {
|
||||
maxSeq := max - min + 1
|
||||
nLetters := seq/maxSeq + 1
|
||||
result := strings.Builder{}
|
||||
result.Grow(nLetters)
|
||||
for l := 0; l < nLetters; l++ {
|
||||
letter := rune(seq%maxSeq + min)
|
||||
result.WriteRune(letter)
|
||||
}
|
||||
return result.String()
|
||||
}
|
||||
|
||||
func TestListenerSetupAndRemoveOnce(t *testing.T) {
|
||||
rl := NewRelay()
|
||||
|
||||
ws1 := &WebSocket{}
|
||||
ws2 := &WebSocket{}
|
||||
|
||||
f1 := nostr.Filter{Kinds: []int{1}}
|
||||
f2 := nostr.Filter{Kinds: []int{2}}
|
||||
f3 := nostr.Filter{Kinds: []int{3}}
|
||||
|
||||
rl.clients[ws1] = nil
|
||||
rl.clients[ws2] = nil
|
||||
|
||||
var cancel func(cause error) = nil
|
||||
|
||||
t.Run("adding listeners", func(t *testing.T) {
|
||||
rl.addListener(ws1, "1a", rl, f1, cancel)
|
||||
rl.addListener(ws1, "1b", rl, f2, cancel)
|
||||
rl.addListener(ws2, "2a", rl, f3, cancel)
|
||||
rl.addListener(ws1, "1c", rl, f3, cancel)
|
||||
|
||||
require.Equal(t, map[*WebSocket][]listenerSpec{
|
||||
ws1: {
|
||||
{"1a", cancel, 0, rl},
|
||||
{"1b", cancel, 1, rl},
|
||||
{"1c", cancel, 3, rl},
|
||||
},
|
||||
ws2: {
|
||||
{"2a", cancel, 2, rl},
|
||||
},
|
||||
}, rl.clients)
|
||||
|
||||
require.Equal(t, []listener{
|
||||
{"1a", f1, ws1},
|
||||
{"1b", f2, ws1},
|
||||
{"2a", f3, ws2},
|
||||
{"1c", f3, ws1},
|
||||
}, rl.listeners)
|
||||
})
|
||||
|
||||
t.Run("removing a client", func(t *testing.T) {
|
||||
rl.removeClientAndListeners(ws1)
|
||||
|
||||
require.Equal(t, map[*WebSocket][]listenerSpec{
|
||||
ws2: {
|
||||
{"2a", cancel, 0, rl},
|
||||
},
|
||||
}, rl.clients)
|
||||
|
||||
require.Equal(t, []listener{
|
||||
{"2a", f3, ws2},
|
||||
}, rl.listeners)
|
||||
})
|
||||
}
|
||||
|
||||
func TestListenerMoreConvolutedCase(t *testing.T) {
|
||||
rl := NewRelay()
|
||||
|
||||
ws1 := &WebSocket{}
|
||||
ws2 := &WebSocket{}
|
||||
ws3 := &WebSocket{}
|
||||
ws4 := &WebSocket{}
|
||||
|
||||
f1 := nostr.Filter{Kinds: []int{1}}
|
||||
f2 := nostr.Filter{Kinds: []int{2}}
|
||||
f3 := nostr.Filter{Kinds: []int{3}}
|
||||
|
||||
rl.clients[ws1] = nil
|
||||
rl.clients[ws2] = nil
|
||||
rl.clients[ws3] = nil
|
||||
rl.clients[ws4] = nil
|
||||
|
||||
var cancel func(cause error) = nil
|
||||
|
||||
t.Run("adding listeners", func(t *testing.T) {
|
||||
rl.addListener(ws1, "c", rl, f1, cancel)
|
||||
rl.addListener(ws2, "b", rl, f2, cancel)
|
||||
rl.addListener(ws3, "a", rl, f3, cancel)
|
||||
rl.addListener(ws4, "d", rl, f3, cancel)
|
||||
rl.addListener(ws2, "b", rl, f1, cancel)
|
||||
|
||||
require.Equal(t, map[*WebSocket][]listenerSpec{
|
||||
ws1: {
|
||||
{"c", cancel, 0, rl},
|
||||
},
|
||||
ws2: {
|
||||
{"b", cancel, 1, rl},
|
||||
{"b", cancel, 4, rl},
|
||||
},
|
||||
ws3: {
|
||||
{"a", cancel, 2, rl},
|
||||
},
|
||||
ws4: {
|
||||
{"d", cancel, 3, rl},
|
||||
},
|
||||
}, rl.clients)
|
||||
|
||||
require.Equal(t, []listener{
|
||||
{"c", f1, ws1},
|
||||
{"b", f2, ws2},
|
||||
{"a", f3, ws3},
|
||||
{"d", f3, ws4},
|
||||
{"b", f1, ws2},
|
||||
}, rl.listeners)
|
||||
})
|
||||
|
||||
t.Run("removing a client", func(t *testing.T) {
|
||||
rl.removeClientAndListeners(ws2)
|
||||
|
||||
require.Equal(t, map[*WebSocket][]listenerSpec{
|
||||
ws1: {
|
||||
{"c", cancel, 0, rl},
|
||||
},
|
||||
ws3: {
|
||||
{"a", cancel, 2, rl},
|
||||
},
|
||||
ws4: {
|
||||
{"d", cancel, 1, rl},
|
||||
},
|
||||
}, rl.clients)
|
||||
|
||||
require.Equal(t, []listener{
|
||||
{"c", f1, ws1},
|
||||
{"d", f3, ws4},
|
||||
{"a", f3, ws3},
|
||||
}, rl.listeners)
|
||||
})
|
||||
|
||||
t.Run("reorganize the first case differently and then remove again", func(t *testing.T) {
|
||||
rl.clients = map[*WebSocket][]listenerSpec{
|
||||
ws1: {
|
||||
{"c", cancel, 1, rl},
|
||||
},
|
||||
ws2: {
|
||||
{"b", cancel, 2, rl},
|
||||
{"b", cancel, 4, rl},
|
||||
},
|
||||
ws3: {
|
||||
{"a", cancel, 0, rl},
|
||||
},
|
||||
ws4: {
|
||||
{"d", cancel, 3, rl},
|
||||
},
|
||||
}
|
||||
rl.listeners = []listener{
|
||||
{"a", f3, ws3},
|
||||
{"c", f1, ws1},
|
||||
{"b", f2, ws2},
|
||||
{"d", f3, ws4},
|
||||
{"b", f1, ws2},
|
||||
}
|
||||
|
||||
rl.removeClientAndListeners(ws2)
|
||||
|
||||
require.Equal(t, map[*WebSocket][]listenerSpec{
|
||||
ws1: {
|
||||
{"c", cancel, 1, rl},
|
||||
},
|
||||
ws3: {
|
||||
{"a", cancel, 0, rl},
|
||||
},
|
||||
ws4: {
|
||||
{"d", cancel, 2, rl},
|
||||
},
|
||||
}, rl.clients)
|
||||
|
||||
require.Equal(t, []listener{
|
||||
{"a", f3, ws3},
|
||||
{"c", f1, ws1},
|
||||
{"d", f3, ws4},
|
||||
}, rl.listeners)
|
||||
})
|
||||
}
|
||||
|
||||
func TestListenerMoreStuffWithMultipleRelays(t *testing.T) {
|
||||
rl := NewRelay()
|
||||
|
||||
ws1 := &WebSocket{}
|
||||
ws2 := &WebSocket{}
|
||||
ws3 := &WebSocket{}
|
||||
ws4 := &WebSocket{}
|
||||
|
||||
f1 := nostr.Filter{Kinds: []int{1}}
|
||||
f2 := nostr.Filter{Kinds: []int{2}}
|
||||
f3 := nostr.Filter{Kinds: []int{3}}
|
||||
|
||||
rlx := NewRelay()
|
||||
rly := NewRelay()
|
||||
rlz := NewRelay()
|
||||
|
||||
rl.clients[ws1] = nil
|
||||
rl.clients[ws2] = nil
|
||||
rl.clients[ws3] = nil
|
||||
rl.clients[ws4] = nil
|
||||
|
||||
var cancel func(cause error) = nil
|
||||
|
||||
t.Run("adding listeners", func(t *testing.T) {
|
||||
rl.addListener(ws1, "c", rlx, f1, cancel)
|
||||
rl.addListener(ws2, "b", rly, f2, cancel)
|
||||
rl.addListener(ws3, "a", rlz, f3, cancel)
|
||||
rl.addListener(ws4, "d", rlx, f3, cancel)
|
||||
rl.addListener(ws4, "e", rlx, f3, cancel)
|
||||
rl.addListener(ws3, "a", rlx, f3, cancel)
|
||||
rl.addListener(ws4, "e", rly, f3, cancel)
|
||||
rl.addListener(ws3, "f", rly, f3, cancel)
|
||||
rl.addListener(ws1, "g", rlz, f1, cancel)
|
||||
rl.addListener(ws2, "g", rlz, f2, cancel)
|
||||
|
||||
require.Equal(t, map[*WebSocket][]listenerSpec{
|
||||
ws1: {
|
||||
{"c", cancel, 0, rlx},
|
||||
{"g", cancel, 1, rlz},
|
||||
},
|
||||
ws2: {
|
||||
{"b", cancel, 0, rly},
|
||||
{"g", cancel, 2, rlz},
|
||||
},
|
||||
ws3: {
|
||||
{"a", cancel, 0, rlz},
|
||||
{"a", cancel, 3, rlx},
|
||||
{"f", cancel, 2, rly},
|
||||
},
|
||||
ws4: {
|
||||
{"d", cancel, 1, rlx},
|
||||
{"e", cancel, 2, rlx},
|
||||
{"e", cancel, 1, rly},
|
||||
},
|
||||
}, rl.clients)
|
||||
|
||||
require.Equal(t, []listener{
|
||||
{"c", f1, ws1},
|
||||
{"d", f3, ws4},
|
||||
{"e", f3, ws4},
|
||||
{"a", f3, ws3},
|
||||
}, rlx.listeners)
|
||||
|
||||
require.Equal(t, []listener{
|
||||
{"b", f2, ws2},
|
||||
{"e", f3, ws4},
|
||||
{"f", f3, ws3},
|
||||
}, rly.listeners)
|
||||
|
||||
require.Equal(t, []listener{
|
||||
{"a", f3, ws3},
|
||||
{"g", f1, ws1},
|
||||
{"g", f2, ws2},
|
||||
}, rlz.listeners)
|
||||
})
|
||||
|
||||
t.Run("removing a subscription id", func(t *testing.T) {
|
||||
// removing 'd' from ws4
|
||||
rl.clients[ws4][0].cancel = func(cause error) {} // set since removing will call it
|
||||
rl.removeListenerId(ws4, "d")
|
||||
|
||||
require.Equal(t, map[*WebSocket][]listenerSpec{
|
||||
ws1: {
|
||||
{"c", cancel, 0, rlx},
|
||||
{"g", cancel, 1, rlz},
|
||||
},
|
||||
ws2: {
|
||||
{"b", cancel, 0, rly},
|
||||
{"g", cancel, 2, rlz},
|
||||
},
|
||||
ws3: {
|
||||
{"a", cancel, 0, rlz},
|
||||
{"a", cancel, 1, rlx},
|
||||
{"f", cancel, 2, rly},
|
||||
},
|
||||
ws4: {
|
||||
{"e", cancel, 1, rly},
|
||||
{"e", cancel, 2, rlx},
|
||||
},
|
||||
}, rl.clients)
|
||||
|
||||
require.Equal(t, []listener{
|
||||
{"c", f1, ws1},
|
||||
{"a", f3, ws3},
|
||||
{"e", f3, ws4},
|
||||
}, rlx.listeners)
|
||||
|
||||
require.Equal(t, []listener{
|
||||
{"b", f2, ws2},
|
||||
{"e", f3, ws4},
|
||||
{"f", f3, ws3},
|
||||
}, rly.listeners)
|
||||
|
||||
require.Equal(t, []listener{
|
||||
{"a", f3, ws3},
|
||||
{"g", f1, ws1},
|
||||
{"g", f2, ws2},
|
||||
}, rlz.listeners)
|
||||
})
|
||||
|
||||
t.Run("removing another subscription id", func(t *testing.T) {
|
||||
// removing 'a' from ws3
|
||||
rl.clients[ws3][0].cancel = func(cause error) {} // set since removing will call it
|
||||
rl.clients[ws3][1].cancel = func(cause error) {} // set since removing will call it
|
||||
rl.removeListenerId(ws3, "a")
|
||||
|
||||
require.Equal(t, map[*WebSocket][]listenerSpec{
|
||||
ws1: {
|
||||
{"c", cancel, 0, rlx},
|
||||
{"g", cancel, 1, rlz},
|
||||
},
|
||||
ws2: {
|
||||
{"b", cancel, 0, rly},
|
||||
{"g", cancel, 0, rlz},
|
||||
},
|
||||
ws3: {
|
||||
{"f", cancel, 2, rly},
|
||||
},
|
||||
ws4: {
|
||||
{"e", cancel, 1, rly},
|
||||
{"e", cancel, 1, rlx},
|
||||
},
|
||||
}, rl.clients)
|
||||
|
||||
require.Equal(t, []listener{
|
||||
{"c", f1, ws1},
|
||||
{"e", f3, ws4},
|
||||
}, rlx.listeners)
|
||||
|
||||
require.Equal(t, []listener{
|
||||
{"b", f2, ws2},
|
||||
{"e", f3, ws4},
|
||||
{"f", f3, ws3},
|
||||
}, rly.listeners)
|
||||
|
||||
require.Equal(t, []listener{
|
||||
{"g", f2, ws2},
|
||||
{"g", f1, ws1},
|
||||
}, rlz.listeners)
|
||||
})
|
||||
|
||||
t.Run("removing a connection", func(t *testing.T) {
|
||||
rl.removeClientAndListeners(ws2)
|
||||
|
||||
require.Equal(t, map[*WebSocket][]listenerSpec{
|
||||
ws1: {
|
||||
{"c", cancel, 0, rlx},
|
||||
{"g", cancel, 0, rlz},
|
||||
},
|
||||
ws3: {
|
||||
{"f", cancel, 0, rly},
|
||||
},
|
||||
ws4: {
|
||||
{"e", cancel, 1, rly},
|
||||
{"e", cancel, 1, rlx},
|
||||
},
|
||||
}, rl.clients)
|
||||
|
||||
require.Equal(t, []listener{
|
||||
{"c", f1, ws1},
|
||||
{"e", f3, ws4},
|
||||
}, rlx.listeners)
|
||||
|
||||
require.Equal(t, []listener{
|
||||
{"f", f3, ws3},
|
||||
{"e", f3, ws4},
|
||||
}, rly.listeners)
|
||||
|
||||
require.Equal(t, []listener{
|
||||
{"g", f1, ws1},
|
||||
}, rlz.listeners)
|
||||
})
|
||||
|
||||
t.Run("removing another subscription id", func(t *testing.T) {
|
||||
// removing 'e' from ws4
|
||||
rl.clients[ws4][0].cancel = func(cause error) {} // set since removing will call it
|
||||
rl.clients[ws4][1].cancel = func(cause error) {} // set since removing will call it
|
||||
rl.removeListenerId(ws4, "e")
|
||||
|
||||
require.Equal(t, map[*WebSocket][]listenerSpec{
|
||||
ws1: {
|
||||
{"c", cancel, 0, rlx},
|
||||
{"g", cancel, 0, rlz},
|
||||
},
|
||||
ws3: {
|
||||
{"f", cancel, 0, rly},
|
||||
},
|
||||
ws4: {},
|
||||
}, rl.clients)
|
||||
|
||||
require.Equal(t, []listener{
|
||||
{"c", f1, ws1},
|
||||
}, rlx.listeners)
|
||||
|
||||
require.Equal(t, []listener{
|
||||
{"f", f3, ws3},
|
||||
}, rly.listeners)
|
||||
|
||||
require.Equal(t, []listener{
|
||||
{"g", f1, ws1},
|
||||
}, rlz.listeners)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRandomListenerClientRemoving(t *testing.T) {
|
||||
rl := NewRelay()
|
||||
|
||||
f := nostr.Filter{Kinds: []int{1}}
|
||||
cancel := func(cause error) {}
|
||||
|
||||
websockets := make([]*WebSocket, 0, 20)
|
||||
|
||||
l := 0
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
ws := &WebSocket{}
|
||||
websockets = append(websockets, ws)
|
||||
rl.clients[ws] = nil
|
||||
}
|
||||
|
||||
for j := 0; j < 20; j++ {
|
||||
for i := 0; i < 20; i++ {
|
||||
ws := websockets[i]
|
||||
w := idFromSeqUpper(i)
|
||||
|
||||
if rand.Intn(2) < 1 {
|
||||
l++
|
||||
rl.addListener(ws, w+":"+idFromSeqLower(j), rl, f, cancel)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
require.Len(t, rl.clients, 20)
|
||||
require.Len(t, rl.listeners, l)
|
||||
|
||||
for ws := range rl.clients {
|
||||
rl.removeClientAndListeners(ws)
|
||||
}
|
||||
|
||||
require.Len(t, rl.clients, 0)
|
||||
require.Len(t, rl.listeners, 0)
|
||||
}
|
||||
|
||||
func TestRandomListenerIdRemoving(t *testing.T) {
|
||||
rl := NewRelay()
|
||||
|
||||
f := nostr.Filter{Kinds: []int{1}}
|
||||
cancel := func(cause error) {}
|
||||
|
||||
websockets := make([]*WebSocket, 0, 20)
|
||||
|
||||
type wsid struct {
|
||||
ws *WebSocket
|
||||
id string
|
||||
}
|
||||
|
||||
subs := make([]wsid, 0, 20*20)
|
||||
extra := 0
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
ws := &WebSocket{}
|
||||
websockets = append(websockets, ws)
|
||||
rl.clients[ws] = nil
|
||||
}
|
||||
|
||||
for j := 0; j < 20; j++ {
|
||||
for i := 0; i < 20; i++ {
|
||||
ws := websockets[i]
|
||||
w := idFromSeqUpper(i)
|
||||
|
||||
if rand.Intn(2) < 1 {
|
||||
id := w + ":" + idFromSeqLower(j)
|
||||
rl.addListener(ws, id, rl, f, cancel)
|
||||
subs = append(subs, wsid{ws, id})
|
||||
|
||||
if rand.Intn(5) < 1 {
|
||||
rl.addListener(ws, id, rl, f, cancel)
|
||||
extra++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
require.Len(t, rl.clients, 20)
|
||||
require.Len(t, rl.listeners, len(subs)+extra)
|
||||
|
||||
rand.Shuffle(len(subs), func(i, j int) {
|
||||
subs[i], subs[j] = subs[j], subs[i]
|
||||
})
|
||||
for _, wsidToRemove := range subs {
|
||||
rl.removeListenerId(wsidToRemove.ws, wsidToRemove.id)
|
||||
}
|
||||
|
||||
require.Len(t, rl.listeners, 0)
|
||||
require.Len(t, rl.clients, 20)
|
||||
for _, specs := range rl.clients {
|
||||
require.Len(t, specs, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRouterListenersPabloCrash(t *testing.T) {
|
||||
rl := NewRelay()
|
||||
|
||||
rla := NewRelay()
|
||||
rlb := NewRelay()
|
||||
|
||||
ws1 := &WebSocket{}
|
||||
ws2 := &WebSocket{}
|
||||
ws3 := &WebSocket{}
|
||||
|
||||
rl.clients[ws1] = nil
|
||||
rl.clients[ws2] = nil
|
||||
rl.clients[ws3] = nil
|
||||
|
||||
f := nostr.Filter{Kinds: []int{1}}
|
||||
cancel := func(cause error) {}
|
||||
|
||||
rl.addListener(ws1, ":1", rla, f, cancel)
|
||||
rl.addListener(ws2, ":1", rlb, f, cancel)
|
||||
rl.addListener(ws3, "a", rlb, f, cancel)
|
||||
rl.addListener(ws3, "b", rla, f, cancel)
|
||||
rl.addListener(ws3, "c", rlb, f, cancel)
|
||||
|
||||
rl.removeClientAndListeners(ws1)
|
||||
rl.removeClientAndListeners(ws3)
|
||||
}
|
||||
53
khatru/negentropy.go
Normal file
53
khatru/negentropy.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package khatru
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/fiatjaf/eventstore"
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
"github.com/nbd-wtf/go-nostr/nip77/negentropy"
|
||||
"github.com/nbd-wtf/go-nostr/nip77/negentropy/storage/vector"
|
||||
)
|
||||
|
||||
type NegentropySession struct {
|
||||
neg *negentropy.Negentropy
|
||||
postponeClose func()
|
||||
}
|
||||
|
||||
func (rl *Relay) startNegentropySession(ctx context.Context, filter nostr.Filter) (*vector.Vector, error) {
|
||||
ctx = eventstore.SetNegentropy(ctx)
|
||||
|
||||
// do the same overwrite/reject flow we do in normal REQs
|
||||
for _, ovw := range rl.OverwriteFilter {
|
||||
ovw(ctx, &filter)
|
||||
}
|
||||
if filter.LimitZero {
|
||||
return nil, fmt.Errorf("invalid limit 0")
|
||||
}
|
||||
for _, reject := range rl.RejectFilter {
|
||||
if reject, msg := reject(ctx, filter); reject {
|
||||
return nil, errors.New(nostr.NormalizeOKMessage(msg, "blocked"))
|
||||
}
|
||||
}
|
||||
|
||||
// fetch events and add them to a negentropy Vector store
|
||||
vec := vector.New()
|
||||
for _, query := range rl.QueryEvents {
|
||||
ch, err := query(ctx, filter)
|
||||
if err != nil {
|
||||
continue
|
||||
} else if ch == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for event := range ch {
|
||||
// since the goal here is to sync databases we won't do fancy stuff like overwrite events
|
||||
vec.Insert(event.CreatedAt, event.ID)
|
||||
}
|
||||
}
|
||||
vec.Seal()
|
||||
|
||||
return vec, nil
|
||||
}
|
||||
38
khatru/nip11.go
Normal file
38
khatru/nip11.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package khatru
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (rl *Relay) HandleNIP11(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/nostr+json")
|
||||
|
||||
info := *rl.Info
|
||||
|
||||
if len(rl.DeleteEvent) > 0 {
|
||||
info.AddSupportedNIP(9)
|
||||
}
|
||||
if len(rl.CountEvents) > 0 {
|
||||
info.AddSupportedNIP(45)
|
||||
}
|
||||
if rl.Negentropy {
|
||||
info.AddSupportedNIP(77)
|
||||
}
|
||||
|
||||
// resolve relative icon and banner URLs against base URL
|
||||
baseURL := rl.getBaseURL(r)
|
||||
if info.Icon != "" && !strings.HasPrefix(info.Icon, "http://") && !strings.HasPrefix(info.Icon, "https://") {
|
||||
info.Icon = strings.TrimSuffix(baseURL, "/") + "/" + strings.TrimPrefix(info.Icon, "/")
|
||||
}
|
||||
if info.Banner != "" && !strings.HasPrefix(info.Banner, "http://") && !strings.HasPrefix(info.Banner, "https://") {
|
||||
info.Banner = strings.TrimSuffix(baseURL, "/") + "/" + strings.TrimPrefix(info.Banner, "/")
|
||||
}
|
||||
|
||||
for _, ovw := range rl.OverwriteRelayInformation {
|
||||
info = ovw(r.Context(), r, info)
|
||||
}
|
||||
|
||||
json.NewEncoder(w).Encode(info)
|
||||
}
|
||||
328
khatru/nip86.go
Normal file
328
khatru/nip86.go
Normal file
@@ -0,0 +1,328 @@
|
||||
package khatru
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
"github.com/nbd-wtf/go-nostr/nip86"
|
||||
)
|
||||
|
||||
type RelayManagementAPI struct {
|
||||
RejectAPICall []func(ctx context.Context, mp nip86.MethodParams) (reject bool, msg string)
|
||||
|
||||
BanPubKey func(ctx context.Context, pubkey string, reason string) error
|
||||
ListBannedPubKeys func(ctx context.Context) ([]nip86.PubKeyReason, error)
|
||||
AllowPubKey func(ctx context.Context, pubkey string, reason string) error
|
||||
ListAllowedPubKeys func(ctx context.Context) ([]nip86.PubKeyReason, error)
|
||||
ListEventsNeedingModeration func(ctx context.Context) ([]nip86.IDReason, error)
|
||||
AllowEvent func(ctx context.Context, id string, reason string) error
|
||||
BanEvent func(ctx context.Context, id string, reason string) error
|
||||
ListBannedEvents func(ctx context.Context) ([]nip86.IDReason, error)
|
||||
ListAllowedEvents func(ctx context.Context) ([]nip86.IDReason, error)
|
||||
ChangeRelayName func(ctx context.Context, name string) error
|
||||
ChangeRelayDescription func(ctx context.Context, desc string) error
|
||||
ChangeRelayIcon func(ctx context.Context, icon string) error
|
||||
AllowKind func(ctx context.Context, kind int) error
|
||||
DisallowKind func(ctx context.Context, kind int) error
|
||||
ListAllowedKinds func(ctx context.Context) ([]int, error)
|
||||
ListDisAllowedKinds func(ctx context.Context) ([]int, error)
|
||||
BlockIP func(ctx context.Context, ip net.IP, reason string) error
|
||||
UnblockIP func(ctx context.Context, ip net.IP, reason string) error
|
||||
ListBlockedIPs func(ctx context.Context) ([]nip86.IPReason, error)
|
||||
Stats func(ctx context.Context) (nip86.Response, error)
|
||||
GrantAdmin func(ctx context.Context, pubkey string, methods []string) error
|
||||
RevokeAdmin func(ctx context.Context, pubkey string, methods []string) error
|
||||
Generic func(ctx context.Context, request nip86.Request) (nip86.Response, error)
|
||||
}
|
||||
|
||||
func (rl *Relay) HandleNIP86(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/nostr+json+rpc")
|
||||
|
||||
var (
|
||||
resp nip86.Response
|
||||
ctx = r.Context()
|
||||
req nip86.Request
|
||||
mp nip86.MethodParams
|
||||
evt nostr.Event
|
||||
payloadHash [32]byte
|
||||
)
|
||||
|
||||
payload, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
resp.Error = "empty request"
|
||||
goto respond
|
||||
}
|
||||
payloadHash = sha256.Sum256(payload)
|
||||
|
||||
{
|
||||
auth := r.Header.Get("Authorization")
|
||||
spl := strings.Split(auth, "Nostr ")
|
||||
if len(spl) != 2 {
|
||||
resp.Error = "missing auth"
|
||||
goto respond
|
||||
}
|
||||
|
||||
evtj, err := base64.StdEncoding.DecodeString(spl[1])
|
||||
if err != nil {
|
||||
resp.Error = "invalid base64 auth"
|
||||
goto respond
|
||||
}
|
||||
if err := json.Unmarshal(evtj, &evt); err != nil {
|
||||
resp.Error = "invalid auth event json"
|
||||
goto respond
|
||||
}
|
||||
if ok, _ := evt.CheckSignature(); !ok {
|
||||
resp.Error = "invalid auth event"
|
||||
goto respond
|
||||
}
|
||||
|
||||
if uTag := evt.Tags.Find("u"); uTag == nil || rl.getBaseURL(r) != uTag[1] {
|
||||
resp.Error = "invalid 'u' tag"
|
||||
goto respond
|
||||
} else if pht := evt.Tags.FindWithValue("payload", hex.EncodeToString(payloadHash[:])); pht == nil {
|
||||
resp.Error = "invalid auth event payload hash"
|
||||
goto respond
|
||||
} else if evt.CreatedAt < nostr.Now()-30 {
|
||||
resp.Error = "auth event is too old"
|
||||
goto respond
|
||||
}
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(payload, &req); err != nil {
|
||||
resp.Error = "invalid json body"
|
||||
goto respond
|
||||
}
|
||||
|
||||
mp, err = nip86.DecodeRequest(req)
|
||||
if err != nil {
|
||||
resp.Error = fmt.Sprintf("invalid params: %s", err)
|
||||
goto respond
|
||||
}
|
||||
|
||||
ctx = context.WithValue(ctx, nip86HeaderAuthKey, evt.PubKey)
|
||||
for _, rac := range rl.ManagementAPI.RejectAPICall {
|
||||
if reject, msg := rac(ctx, mp); reject {
|
||||
resp.Error = msg
|
||||
goto respond
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := mp.(nip86.SupportedMethods); ok {
|
||||
mat := reflect.TypeOf(rl.ManagementAPI)
|
||||
mav := reflect.ValueOf(rl.ManagementAPI)
|
||||
|
||||
methods := make([]string, 0, mat.NumField())
|
||||
for i := 0; i < mat.NumField(); i++ {
|
||||
field := mat.Field(i)
|
||||
|
||||
// danger: this assumes the struct fields are appropriately named
|
||||
methodName := strings.ToLower(field.Name)
|
||||
|
||||
// assign this only if the function was defined
|
||||
if mav.Field(i).Interface() != nil {
|
||||
methods[i] = methodName
|
||||
}
|
||||
}
|
||||
resp.Result = methods
|
||||
} else {
|
||||
switch thing := mp.(type) {
|
||||
case nip86.BanPubKey:
|
||||
if rl.ManagementAPI.BanPubKey == nil {
|
||||
resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName())
|
||||
} else if err := rl.ManagementAPI.BanPubKey(ctx, thing.PubKey, thing.Reason); err != nil {
|
||||
resp.Error = err.Error()
|
||||
} else {
|
||||
resp.Result = true
|
||||
}
|
||||
case nip86.ListBannedPubKeys:
|
||||
if rl.ManagementAPI.ListBannedPubKeys == nil {
|
||||
resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName())
|
||||
} else if result, err := rl.ManagementAPI.ListBannedPubKeys(ctx); err != nil {
|
||||
resp.Error = err.Error()
|
||||
} else {
|
||||
resp.Result = result
|
||||
}
|
||||
case nip86.AllowPubKey:
|
||||
if rl.ManagementAPI.AllowPubKey == nil {
|
||||
resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName())
|
||||
} else if err := rl.ManagementAPI.AllowPubKey(ctx, thing.PubKey, thing.Reason); err != nil {
|
||||
resp.Error = err.Error()
|
||||
} else {
|
||||
resp.Result = true
|
||||
}
|
||||
case nip86.ListAllowedPubKeys:
|
||||
if rl.ManagementAPI.ListAllowedPubKeys == nil {
|
||||
resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName())
|
||||
} else if result, err := rl.ManagementAPI.ListAllowedPubKeys(ctx); err != nil {
|
||||
resp.Error = err.Error()
|
||||
} else {
|
||||
resp.Result = result
|
||||
}
|
||||
case nip86.BanEvent:
|
||||
if rl.ManagementAPI.BanEvent == nil {
|
||||
resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName())
|
||||
} else if err := rl.ManagementAPI.BanEvent(ctx, thing.ID, thing.Reason); err != nil {
|
||||
resp.Error = err.Error()
|
||||
} else {
|
||||
resp.Result = true
|
||||
}
|
||||
case nip86.AllowEvent:
|
||||
if rl.ManagementAPI.AllowEvent == nil {
|
||||
resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName())
|
||||
} else if err := rl.ManagementAPI.AllowEvent(ctx, thing.ID, thing.Reason); err != nil {
|
||||
resp.Error = err.Error()
|
||||
} else {
|
||||
resp.Result = true
|
||||
}
|
||||
case nip86.ListEventsNeedingModeration:
|
||||
if rl.ManagementAPI.ListEventsNeedingModeration == nil {
|
||||
resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName())
|
||||
} else if result, err := rl.ManagementAPI.ListEventsNeedingModeration(ctx); err != nil {
|
||||
resp.Error = err.Error()
|
||||
} else {
|
||||
resp.Result = result
|
||||
}
|
||||
case nip86.ListBannedEvents:
|
||||
if rl.ManagementAPI.ListBannedEvents == nil {
|
||||
resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName())
|
||||
} else if result, err := rl.ManagementAPI.ListEventsNeedingModeration(ctx); err != nil {
|
||||
resp.Error = err.Error()
|
||||
} else {
|
||||
resp.Result = result
|
||||
}
|
||||
case nip86.ChangeRelayName:
|
||||
if rl.ManagementAPI.ChangeRelayName == nil {
|
||||
resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName())
|
||||
} else if err := rl.ManagementAPI.ChangeRelayName(ctx, thing.Name); err != nil {
|
||||
resp.Error = err.Error()
|
||||
} else {
|
||||
resp.Result = true
|
||||
}
|
||||
case nip86.ChangeRelayDescription:
|
||||
if rl.ManagementAPI.ChangeRelayDescription == nil {
|
||||
resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName())
|
||||
} else if err := rl.ManagementAPI.ChangeRelayDescription(ctx, thing.Description); err != nil {
|
||||
resp.Error = err.Error()
|
||||
} else {
|
||||
resp.Result = true
|
||||
}
|
||||
case nip86.ChangeRelayIcon:
|
||||
if rl.ManagementAPI.ChangeRelayIcon == nil {
|
||||
resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName())
|
||||
} else if err := rl.ManagementAPI.ChangeRelayIcon(ctx, thing.IconURL); err != nil {
|
||||
resp.Error = err.Error()
|
||||
} else {
|
||||
resp.Result = true
|
||||
}
|
||||
case nip86.AllowKind:
|
||||
if rl.ManagementAPI.AllowKind == nil {
|
||||
resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName())
|
||||
} else if err := rl.ManagementAPI.AllowKind(ctx, thing.Kind); err != nil {
|
||||
resp.Error = err.Error()
|
||||
} else {
|
||||
resp.Result = true
|
||||
}
|
||||
case nip86.DisallowKind:
|
||||
if rl.ManagementAPI.DisallowKind == nil {
|
||||
resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName())
|
||||
} else if err := rl.ManagementAPI.DisallowKind(ctx, thing.Kind); err != nil {
|
||||
resp.Error = err.Error()
|
||||
} else {
|
||||
resp.Result = true
|
||||
}
|
||||
case nip86.ListAllowedKinds:
|
||||
if rl.ManagementAPI.ListAllowedKinds == nil {
|
||||
resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName())
|
||||
} else if result, err := rl.ManagementAPI.ListAllowedKinds(ctx); err != nil {
|
||||
resp.Error = err.Error()
|
||||
} else {
|
||||
resp.Result = result
|
||||
}
|
||||
case nip86.BlockIP:
|
||||
if rl.ManagementAPI.BlockIP == nil {
|
||||
resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName())
|
||||
} else if err := rl.ManagementAPI.BlockIP(ctx, thing.IP, thing.Reason); err != nil {
|
||||
resp.Error = err.Error()
|
||||
} else {
|
||||
resp.Result = true
|
||||
}
|
||||
case nip86.UnblockIP:
|
||||
if rl.ManagementAPI.UnblockIP == nil {
|
||||
resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName())
|
||||
} else if err := rl.ManagementAPI.UnblockIP(ctx, thing.IP, thing.Reason); err != nil {
|
||||
resp.Error = err.Error()
|
||||
} else {
|
||||
resp.Result = true
|
||||
}
|
||||
case nip86.ListBlockedIPs:
|
||||
if rl.ManagementAPI.ListBlockedIPs == nil {
|
||||
resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName())
|
||||
} else if result, err := rl.ManagementAPI.ListBlockedIPs(ctx); err != nil {
|
||||
resp.Error = err.Error()
|
||||
} else {
|
||||
resp.Result = result
|
||||
}
|
||||
case nip86.Stats:
|
||||
if rl.ManagementAPI.Stats == nil {
|
||||
resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName())
|
||||
} else if result, err := rl.ManagementAPI.Stats(ctx); err != nil {
|
||||
resp.Error = err.Error()
|
||||
} else {
|
||||
resp.Result = result
|
||||
}
|
||||
case nip86.GrantAdmin:
|
||||
if rl.ManagementAPI.GrantAdmin == nil {
|
||||
resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName())
|
||||
} else if err := rl.ManagementAPI.GrantAdmin(ctx, thing.Pubkey, thing.AllowMethods); err != nil {
|
||||
resp.Error = err.Error()
|
||||
} else {
|
||||
resp.Result = true
|
||||
}
|
||||
case nip86.RevokeAdmin:
|
||||
if rl.ManagementAPI.RevokeAdmin == nil {
|
||||
resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName())
|
||||
} else if err := rl.ManagementAPI.RevokeAdmin(ctx, thing.Pubkey, thing.DisallowMethods); err != nil {
|
||||
resp.Error = err.Error()
|
||||
} else {
|
||||
resp.Result = true
|
||||
}
|
||||
case nip86.ListDisallowedKinds:
|
||||
if rl.ManagementAPI.ListDisAllowedKinds == nil {
|
||||
resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName())
|
||||
} else if result, err := rl.ManagementAPI.ListDisAllowedKinds(ctx); err != nil {
|
||||
resp.Error = err.Error()
|
||||
} else {
|
||||
resp.Result = result
|
||||
}
|
||||
case nip86.ListAllowedEvents:
|
||||
if rl.ManagementAPI.ListAllowedEvents == nil {
|
||||
resp.Error = fmt.Sprintf("method %s not supported", thing.MethodName())
|
||||
} else if result, err := rl.ManagementAPI.ListAllowedEvents(ctx); err != nil {
|
||||
resp.Error = err.Error()
|
||||
} else {
|
||||
resp.Result = result
|
||||
}
|
||||
default:
|
||||
if rl.ManagementAPI.Generic == nil {
|
||||
resp.Error = fmt.Sprintf("method '%s' not known", mp.MethodName())
|
||||
} else if result, err := rl.ManagementAPI.Generic(ctx, req); err != nil {
|
||||
resp.Error = err.Error()
|
||||
} else {
|
||||
resp.Result = result
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
respond:
|
||||
json.NewEncoder(w).Encode(resp)
|
||||
}
|
||||
117
khatru/policies/events.go
Normal file
117
khatru/policies/events.go
Normal file
@@ -0,0 +1,117 @@
|
||||
package policies
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
"github.com/nbd-wtf/go-nostr/nip70"
|
||||
)
|
||||
|
||||
// PreventTooManyIndexableTags returns a function that can be used as a RejectFilter that will reject
|
||||
// events with more indexable (single-character) tags than the specified number.
|
||||
//
|
||||
// If ignoreKinds is given this restriction will not apply to these kinds (useful for allowing a bigger).
|
||||
// If onlyKinds is given then all other kinds will be ignored.
|
||||
func PreventTooManyIndexableTags(max int, ignoreKinds []int, onlyKinds []int) func(context.Context, *nostr.Event) (bool, string) {
|
||||
slices.Sort(ignoreKinds)
|
||||
slices.Sort(onlyKinds)
|
||||
|
||||
ignore := func(kind int) bool { return false }
|
||||
if len(ignoreKinds) > 0 {
|
||||
ignore = func(kind int) bool {
|
||||
_, isIgnored := slices.BinarySearch(ignoreKinds, kind)
|
||||
return isIgnored
|
||||
}
|
||||
}
|
||||
if len(onlyKinds) > 0 {
|
||||
ignore = func(kind int) bool {
|
||||
_, isApplicable := slices.BinarySearch(onlyKinds, kind)
|
||||
return !isApplicable
|
||||
}
|
||||
}
|
||||
|
||||
return func(ctx context.Context, event *nostr.Event) (reject bool, msg string) {
|
||||
if ignore(event.Kind) {
|
||||
return false, ""
|
||||
}
|
||||
|
||||
ntags := 0
|
||||
for _, tag := range event.Tags {
|
||||
if len(tag) > 0 && len(tag[0]) == 1 {
|
||||
ntags++
|
||||
}
|
||||
}
|
||||
if ntags > max {
|
||||
return true, "too many indexable tags"
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
}
|
||||
|
||||
// PreventLargeTags rejects events that have indexable tag values greater than maxTagValueLen.
|
||||
func PreventLargeTags(maxTagValueLen int) func(context.Context, *nostr.Event) (bool, string) {
|
||||
return func(ctx context.Context, event *nostr.Event) (reject bool, msg string) {
|
||||
for _, tag := range event.Tags {
|
||||
if len(tag) > 1 && len(tag[0]) == 1 {
|
||||
if len(tag[1]) > maxTagValueLen {
|
||||
return true, "event contains too large tags"
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
}
|
||||
|
||||
// RestrictToSpecifiedKinds returns a function that can be used as a RejectFilter that will reject
|
||||
// any events with kinds different than the specified ones.
|
||||
func RestrictToSpecifiedKinds(allowEphemeral bool, kinds ...uint16) func(context.Context, *nostr.Event) (bool, string) {
|
||||
// sort the kinds in increasing order
|
||||
slices.Sort(kinds)
|
||||
|
||||
return func(ctx context.Context, event *nostr.Event) (reject bool, msg string) {
|
||||
if allowEphemeral && nostr.IsEphemeralKind(event.Kind) {
|
||||
return false, ""
|
||||
}
|
||||
|
||||
if _, allowed := slices.BinarySearch(kinds, uint16(event.Kind)); allowed {
|
||||
return false, ""
|
||||
}
|
||||
|
||||
return true, fmt.Sprintf("received event kind %d not allowed", event.Kind)
|
||||
}
|
||||
}
|
||||
|
||||
func PreventTimestampsInThePast(threshold time.Duration) func(context.Context, *nostr.Event) (bool, string) {
|
||||
thresholdSeconds := nostr.Timestamp(threshold.Seconds())
|
||||
return func(ctx context.Context, event *nostr.Event) (reject bool, msg string) {
|
||||
if nostr.Now()-event.CreatedAt > thresholdSeconds {
|
||||
return true, "event too old"
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
}
|
||||
|
||||
func PreventTimestampsInTheFuture(threshold time.Duration) func(context.Context, *nostr.Event) (bool, string) {
|
||||
thresholdSeconds := nostr.Timestamp(threshold.Seconds())
|
||||
return func(ctx context.Context, event *nostr.Event) (reject bool, msg string) {
|
||||
if event.CreatedAt-nostr.Now() > thresholdSeconds {
|
||||
return true, "event too much in the future"
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
}
|
||||
|
||||
func RejectEventsWithBase64Media(ctx context.Context, evt *nostr.Event) (bool, string) {
|
||||
return strings.Contains(evt.Content, "data:image/") || strings.Contains(evt.Content, "data:video/"), "event with base64 media"
|
||||
}
|
||||
|
||||
func OnlyAllowNIP70ProtectedEvents(ctx context.Context, event *nostr.Event) (reject bool, msg string) {
|
||||
if nip70.IsProtected(*event) {
|
||||
return false, ""
|
||||
}
|
||||
return true, "blocked: we only accept events protected with the nip70 \"-\" tag"
|
||||
}
|
||||
93
khatru/policies/filters.go
Normal file
93
khatru/policies/filters.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package policies
|
||||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
|
||||
"github.com/fiatjaf/khatru"
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
)
|
||||
|
||||
// NoComplexFilters disallows filters with more than 2 tags.
|
||||
func NoComplexFilters(ctx context.Context, filter nostr.Filter) (reject bool, msg string) {
|
||||
items := len(filter.Tags) + len(filter.Kinds)
|
||||
|
||||
if items > 4 && len(filter.Tags) > 2 {
|
||||
return true, "too many things to filter for"
|
||||
}
|
||||
|
||||
return false, ""
|
||||
}
|
||||
|
||||
// MustAuth requires all subscribers to be authenticated
|
||||
func MustAuth(ctx context.Context, filter nostr.Filter) (reject bool, msg string) {
|
||||
if khatru.GetAuthed(ctx) == "" {
|
||||
return true, "auth-required: all requests must be authenticated"
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
|
||||
// NoEmptyFilters disallows filters that don't have at least a tag, a kind, an author or an id.
|
||||
func NoEmptyFilters(ctx context.Context, filter nostr.Filter) (reject bool, msg string) {
|
||||
c := len(filter.Kinds) + len(filter.IDs) + len(filter.Authors)
|
||||
for _, tagItems := range filter.Tags {
|
||||
c += len(tagItems)
|
||||
}
|
||||
if c == 0 {
|
||||
return true, "can't handle empty filters"
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
|
||||
// AntiSyncBots tries to prevent people from syncing kind:1s from this relay to else by always
|
||||
// requiring an author parameter at least.
|
||||
func AntiSyncBots(ctx context.Context, filter nostr.Filter) (reject bool, msg string) {
|
||||
return (len(filter.Kinds) == 0 || slices.Contains(filter.Kinds, 1)) &&
|
||||
len(filter.Authors) == 0, "an author must be specified to get their kind:1 notes"
|
||||
}
|
||||
|
||||
func NoSearchQueries(ctx context.Context, filter nostr.Filter) (reject bool, msg string) {
|
||||
if filter.Search != "" {
|
||||
return true, "search is not supported"
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
|
||||
func RemoveSearchQueries(ctx context.Context, filter *nostr.Filter) {
|
||||
if filter.Search != "" {
|
||||
filter.Search = ""
|
||||
filter.LimitZero = true // signals that this query should be just skipped
|
||||
}
|
||||
}
|
||||
|
||||
func RemoveAllButKinds(kinds ...uint16) func(context.Context, *nostr.Filter) {
|
||||
return func(ctx context.Context, filter *nostr.Filter) {
|
||||
if n := len(filter.Kinds); n > 0 {
|
||||
newKinds := make([]int, 0, n)
|
||||
for i := 0; i < n; i++ {
|
||||
if k := filter.Kinds[i]; slices.Contains(kinds, uint16(k)) {
|
||||
newKinds = append(newKinds, k)
|
||||
}
|
||||
}
|
||||
filter.Kinds = newKinds
|
||||
if len(filter.Kinds) == 0 {
|
||||
filter.LimitZero = true // signals that this query should be just skipped
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func RemoveAllButTags(tagNames ...string) func(context.Context, *nostr.Filter) {
|
||||
return func(ctx context.Context, filter *nostr.Filter) {
|
||||
if n := len(filter.Tags); n > 0 {
|
||||
for tagName := range filter.Tags {
|
||||
if !slices.Contains(tagNames, tagName) {
|
||||
delete(filter.Tags, tagName)
|
||||
}
|
||||
}
|
||||
if len(filter.Tags) == 0 {
|
||||
filter.LimitZero = true // signals that this query should be just skipped
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
42
khatru/policies/helpers.go
Normal file
42
khatru/policies/helpers.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package policies
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
)
|
||||
|
||||
func startRateLimitSystem[K comparable](
|
||||
tokensPerInterval int,
|
||||
interval time.Duration,
|
||||
maxTokens int,
|
||||
) func(key K) (ratelimited bool) {
|
||||
negativeBuckets := xsync.NewMapOf[K, *atomic.Int32]()
|
||||
maxTokensInt32 := int32(maxTokens)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
time.Sleep(interval)
|
||||
for key, bucket := range negativeBuckets.Range {
|
||||
newv := bucket.Add(int32(-tokensPerInterval))
|
||||
if newv <= 0 {
|
||||
negativeBuckets.Delete(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return func(key K) bool {
|
||||
nb, _ := negativeBuckets.LoadOrStore(key, &atomic.Int32{})
|
||||
|
||||
if nb.Load() < maxTokensInt32 {
|
||||
nb.Add(1)
|
||||
// rate limit not reached yet
|
||||
return false
|
||||
}
|
||||
|
||||
// rate limit reached
|
||||
return true
|
||||
}
|
||||
}
|
||||
29
khatru/policies/kind_validation.go
Normal file
29
khatru/policies/kind_validation.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package policies
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
)
|
||||
|
||||
func ValidateKind(ctx context.Context, evt *nostr.Event) (bool, string) {
|
||||
switch evt.Kind {
|
||||
case 0:
|
||||
var m struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
json.Unmarshal([]byte(evt.Content), &m)
|
||||
if m.Name == "" {
|
||||
return true, "missing json name in kind 0"
|
||||
}
|
||||
case 1:
|
||||
return false, ""
|
||||
case 2:
|
||||
return true, "this kind has been deprecated"
|
||||
}
|
||||
|
||||
// TODO: all other kinds
|
||||
|
||||
return false, ""
|
||||
}
|
||||
38
khatru/policies/nip04.go
Normal file
38
khatru/policies/nip04.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package policies
|
||||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
|
||||
"github.com/fiatjaf/khatru"
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
)
|
||||
|
||||
// RejectKind04Snoopers prevents reading NIP-04 messages from people not involved in the conversation.
|
||||
func RejectKind04Snoopers(ctx context.Context, filter nostr.Filter) (bool, string) {
|
||||
// prevent kind-4 events from being returned to unauthed users,
|
||||
// only when authentication is a thing
|
||||
if !slices.Contains(filter.Kinds, 4) {
|
||||
return false, ""
|
||||
}
|
||||
|
||||
ws := khatru.GetConnection(ctx)
|
||||
senders := filter.Authors
|
||||
receivers, _ := filter.Tags["p"]
|
||||
switch {
|
||||
case ws.AuthedPublicKey == "":
|
||||
// not authenticated
|
||||
return true, "restricted: this relay does not serve kind-4 to unauthenticated users, does your client implement NIP-42?"
|
||||
case len(senders) == 1 && len(receivers) < 2 && (senders[0] == ws.AuthedPublicKey):
|
||||
// allowed filter: ws.authed is sole sender (filter specifies one or all receivers)
|
||||
return false, ""
|
||||
case len(receivers) == 1 && len(senders) < 2 && (receivers[0] == ws.AuthedPublicKey):
|
||||
// allowed filter: ws.authed is sole receiver (filter specifies one or all senders)
|
||||
return false, ""
|
||||
default:
|
||||
// restricted filter: do not return any events,
|
||||
// even if other elements in filters array were not restricted).
|
||||
// client should know better.
|
||||
return true, "restricted: authenticated user does not have authorization for requested filters."
|
||||
}
|
||||
}
|
||||
46
khatru/policies/ratelimits.go
Normal file
46
khatru/policies/ratelimits.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package policies
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/fiatjaf/khatru"
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
)
|
||||
|
||||
func EventIPRateLimiter(tokensPerInterval int, interval time.Duration, maxTokens int) func(ctx context.Context, _ *nostr.Event) (reject bool, msg string) {
|
||||
rl := startRateLimitSystem[string](tokensPerInterval, interval, maxTokens)
|
||||
|
||||
return func(ctx context.Context, _ *nostr.Event) (reject bool, msg string) {
|
||||
ip := khatru.GetIP(ctx)
|
||||
if ip == "" {
|
||||
return false, ""
|
||||
}
|
||||
return rl(ip), "rate-limited: slow down, please"
|
||||
}
|
||||
}
|
||||
|
||||
func EventPubKeyRateLimiter(tokensPerInterval int, interval time.Duration, maxTokens int) func(ctx context.Context, _ *nostr.Event) (reject bool, msg string) {
|
||||
rl := startRateLimitSystem[string](tokensPerInterval, interval, maxTokens)
|
||||
|
||||
return func(ctx context.Context, evt *nostr.Event) (reject bool, msg string) {
|
||||
return rl(evt.PubKey), "rate-limited: slow down, please"
|
||||
}
|
||||
}
|
||||
|
||||
func ConnectionRateLimiter(tokensPerInterval int, interval time.Duration, maxTokens int) func(r *http.Request) bool {
|
||||
rl := startRateLimitSystem[string](tokensPerInterval, interval, maxTokens)
|
||||
|
||||
return func(r *http.Request) bool {
|
||||
return rl(khatru.GetIPFromRequest(r))
|
||||
}
|
||||
}
|
||||
|
||||
func FilterIPRateLimiter(tokensPerInterval int, interval time.Duration, maxTokens int) func(ctx context.Context, _ nostr.Filter) (reject bool, msg string) {
|
||||
rl := startRateLimitSystem[string](tokensPerInterval, interval, maxTokens)
|
||||
|
||||
return func(ctx context.Context, _ nostr.Filter) (reject bool, msg string) {
|
||||
return rl(khatru.GetIP(ctx)), "rate-limited: there is a bug in the client, no one should be making so many requests"
|
||||
}
|
||||
}
|
||||
23
khatru/policies/sane_defaults.go
Normal file
23
khatru/policies/sane_defaults.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package policies
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/fiatjaf/khatru"
|
||||
)
|
||||
|
||||
func ApplySaneDefaults(relay *khatru.Relay) {
|
||||
relay.RejectEvent = append(relay.RejectEvent,
|
||||
RejectEventsWithBase64Media,
|
||||
EventIPRateLimiter(2, time.Minute*3, 10),
|
||||
)
|
||||
|
||||
relay.RejectFilter = append(relay.RejectFilter,
|
||||
NoComplexFilters,
|
||||
FilterIPRateLimiter(20, time.Minute, 100),
|
||||
)
|
||||
|
||||
relay.RejectConnection = append(relay.RejectConnection,
|
||||
ConnectionRateLimiter(1, time.Minute*5, 100),
|
||||
)
|
||||
}
|
||||
145
khatru/relay.go
Normal file
145
khatru/relay.go
Normal file
@@ -0,0 +1,145 @@
|
||||
package khatru
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/fasthttp/websocket"
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
"github.com/nbd-wtf/go-nostr/nip11"
|
||||
"github.com/nbd-wtf/go-nostr/nip45/hyperloglog"
|
||||
)
|
||||
|
||||
func NewRelay() *Relay {
|
||||
ctx := context.Background()
|
||||
|
||||
rl := &Relay{
|
||||
Log: log.New(os.Stderr, "[khatru-relay] ", log.LstdFlags),
|
||||
|
||||
Info: &nip11.RelayInformationDocument{
|
||||
Software: "https://github.com/fiatjaf/khatru",
|
||||
Version: "n/a",
|
||||
SupportedNIPs: []any{1, 11, 40, 42, 70, 86},
|
||||
},
|
||||
|
||||
upgrader: websocket.Upgrader{
|
||||
ReadBufferSize: 1024,
|
||||
WriteBufferSize: 1024,
|
||||
CheckOrigin: func(r *http.Request) bool { return true },
|
||||
},
|
||||
|
||||
clients: make(map[*WebSocket][]listenerSpec, 100),
|
||||
listeners: make([]listener, 0, 100),
|
||||
|
||||
serveMux: &http.ServeMux{},
|
||||
|
||||
WriteWait: 10 * time.Second,
|
||||
PongWait: 60 * time.Second,
|
||||
PingPeriod: 30 * time.Second,
|
||||
MaxMessageSize: 512000,
|
||||
}
|
||||
|
||||
rl.expirationManager = newExpirationManager(rl)
|
||||
go rl.expirationManager.start(ctx)
|
||||
|
||||
return rl
|
||||
}
|
||||
|
||||
type Relay struct {
|
||||
// setting this variable overwrites the hackish workaround we do to try to figure out our own base URL
|
||||
ServiceURL string
|
||||
|
||||
// hooks that will be called at various times
|
||||
RejectEvent []func(ctx context.Context, event *nostr.Event) (reject bool, msg string)
|
||||
OverwriteDeletionOutcome []func(ctx context.Context, target *nostr.Event, deletion *nostr.Event) (acceptDeletion bool, msg string)
|
||||
StoreEvent []func(ctx context.Context, event *nostr.Event) error
|
||||
ReplaceEvent []func(ctx context.Context, event *nostr.Event) error
|
||||
DeleteEvent []func(ctx context.Context, event *nostr.Event) error
|
||||
OnEventSaved []func(ctx context.Context, event *nostr.Event)
|
||||
OnEphemeralEvent []func(ctx context.Context, event *nostr.Event)
|
||||
RejectFilter []func(ctx context.Context, filter nostr.Filter) (reject bool, msg string)
|
||||
RejectCountFilter []func(ctx context.Context, filter nostr.Filter) (reject bool, msg string)
|
||||
OverwriteFilter []func(ctx context.Context, filter *nostr.Filter)
|
||||
QueryEvents []func(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error)
|
||||
CountEvents []func(ctx context.Context, filter nostr.Filter) (int64, error)
|
||||
CountEventsHLL []func(ctx context.Context, filter nostr.Filter, offset int) (int64, *hyperloglog.HyperLogLog, error)
|
||||
RejectConnection []func(r *http.Request) bool
|
||||
OnConnect []func(ctx context.Context)
|
||||
OnDisconnect []func(ctx context.Context)
|
||||
OverwriteRelayInformation []func(ctx context.Context, r *http.Request, info nip11.RelayInformationDocument) nip11.RelayInformationDocument
|
||||
OverwriteResponseEvent []func(ctx context.Context, event *nostr.Event)
|
||||
PreventBroadcast []func(ws *WebSocket, event *nostr.Event) bool
|
||||
|
||||
// these are used when this relays acts as a router
|
||||
routes []Route
|
||||
getSubRelayFromEvent func(*nostr.Event) *Relay // used for handling EVENTs
|
||||
getSubRelayFromFilter func(nostr.Filter) *Relay // used for handling REQs
|
||||
|
||||
// setting up handlers here will enable these methods
|
||||
ManagementAPI RelayManagementAPI
|
||||
|
||||
// editing info will affect the NIP-11 responses
|
||||
Info *nip11.RelayInformationDocument
|
||||
|
||||
// Default logger, as set by NewServer, is a stdlib logger prefixed with "[khatru-relay] ",
|
||||
// outputting to stderr.
|
||||
Log *log.Logger
|
||||
|
||||
// for establishing websockets
|
||||
upgrader websocket.Upgrader
|
||||
|
||||
// keep a connection reference to all connected clients for Server.Shutdown
|
||||
// also used for keeping track of who is listening to what
|
||||
clients map[*WebSocket][]listenerSpec
|
||||
listeners []listener
|
||||
clientsMutex sync.Mutex
|
||||
|
||||
// set this to true to support negentropy
|
||||
Negentropy bool
|
||||
|
||||
// in case you call Server.Start
|
||||
Addr string
|
||||
serveMux *http.ServeMux
|
||||
httpServer *http.Server
|
||||
|
||||
// websocket options
|
||||
WriteWait time.Duration // Time allowed to write a message to the peer.
|
||||
PongWait time.Duration // Time allowed to read the next pong message from the peer.
|
||||
PingPeriod time.Duration // Send pings to peer with this period. Must be less than pongWait.
|
||||
MaxMessageSize int64 // Maximum message size allowed from peer.
|
||||
|
||||
// NIP-40 expiration manager
|
||||
expirationManager *expirationManager
|
||||
}
|
||||
|
||||
func (rl *Relay) getBaseURL(r *http.Request) string {
|
||||
if rl.ServiceURL != "" {
|
||||
return rl.ServiceURL
|
||||
}
|
||||
|
||||
host := r.Header.Get("X-Forwarded-Host")
|
||||
if host == "" {
|
||||
host = r.Host
|
||||
}
|
||||
proto := r.Header.Get("X-Forwarded-Proto")
|
||||
if proto == "" {
|
||||
if host == "localhost" {
|
||||
proto = "http"
|
||||
} else if strings.Contains(host, ":") {
|
||||
// has a port number
|
||||
proto = "http"
|
||||
} else if _, err := strconv.Atoi(strings.ReplaceAll(host, ".", "")); err == nil {
|
||||
// it's a naked IP
|
||||
proto = "http"
|
||||
} else {
|
||||
proto = "https"
|
||||
}
|
||||
}
|
||||
return proto + "://" + host
|
||||
}
|
||||
361
khatru/relay_test.go
Normal file
361
khatru/relay_test.go
Normal file
@@ -0,0 +1,361 @@
|
||||
package khatru
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/fiatjaf/eventstore/slicestore"
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
)
|
||||
|
||||
func TestBasicRelayFunctionality(t *testing.T) {
|
||||
// setup relay with in-memory store
|
||||
relay := NewRelay()
|
||||
store := slicestore.SliceStore{}
|
||||
store.Init()
|
||||
relay.StoreEvent = append(relay.StoreEvent, store.SaveEvent)
|
||||
relay.QueryEvents = append(relay.QueryEvents, store.QueryEvents)
|
||||
relay.DeleteEvent = append(relay.DeleteEvent, store.DeleteEvent)
|
||||
|
||||
// start test server
|
||||
server := httptest.NewServer(relay)
|
||||
defer server.Close()
|
||||
|
||||
// create test keys
|
||||
sk1 := nostr.GeneratePrivateKey()
|
||||
pk1, err := nostr.GetPublicKey(sk1)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get public key 1: %v", err)
|
||||
}
|
||||
sk2 := nostr.GeneratePrivateKey()
|
||||
pk2, err := nostr.GetPublicKey(sk2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get public key 2: %v", err)
|
||||
}
|
||||
|
||||
// helper to create signed events
|
||||
createEvent := func(sk string, kind int, content string, tags nostr.Tags) nostr.Event {
|
||||
pk, err := nostr.GetPublicKey(sk)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get public key: %v", err)
|
||||
}
|
||||
evt := nostr.Event{
|
||||
PubKey: pk,
|
||||
CreatedAt: nostr.Now(),
|
||||
Kind: kind,
|
||||
Tags: tags,
|
||||
Content: content,
|
||||
}
|
||||
evt.Sign(sk)
|
||||
return evt
|
||||
}
|
||||
|
||||
// connect two test clients
|
||||
url := "ws" + server.URL[4:]
|
||||
client1, err := nostr.RelayConnect(context.Background(), url)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to connect client1: %v", err)
|
||||
}
|
||||
defer client1.Close()
|
||||
|
||||
client2, err := nostr.RelayConnect(context.Background(), url)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to connect client2: %v", err)
|
||||
}
|
||||
defer client2.Close()
|
||||
|
||||
// test 1: store and query events
|
||||
t.Run("store and query events", func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
evt1 := createEvent(sk1, 1, "hello world", nil)
|
||||
err := client1.Publish(ctx, evt1)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to publish event: %v", err)
|
||||
}
|
||||
|
||||
// Query the event back
|
||||
sub, err := client2.Subscribe(ctx, []nostr.Filter{{
|
||||
Authors: []string{pk1},
|
||||
Kinds: []int{1},
|
||||
}})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to subscribe: %v", err)
|
||||
}
|
||||
defer sub.Unsub()
|
||||
|
||||
// Wait for event
|
||||
select {
|
||||
case env := <-sub.Events:
|
||||
if env.ID != evt1.ID {
|
||||
t.Errorf("got wrong event: %v", env.ID)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
t.Fatal("timeout waiting for event")
|
||||
}
|
||||
})
|
||||
|
||||
// test 2: live event subscription
|
||||
t.Run("live event subscription", func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Setup subscription first
|
||||
sub, err := client1.Subscribe(ctx, []nostr.Filter{{
|
||||
Authors: []string{pk2},
|
||||
Kinds: []int{1},
|
||||
}})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to subscribe: %v", err)
|
||||
}
|
||||
defer sub.Unsub()
|
||||
|
||||
// Publish event from client2
|
||||
evt2 := createEvent(sk2, 1, "testing live events", nil)
|
||||
err = client2.Publish(ctx, evt2)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to publish event: %v", err)
|
||||
}
|
||||
|
||||
// Wait for event on subscription
|
||||
select {
|
||||
case env := <-sub.Events:
|
||||
if env.ID != evt2.ID {
|
||||
t.Errorf("got wrong event: %v", env.ID)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
t.Fatal("timeout waiting for live event")
|
||||
}
|
||||
})
|
||||
|
||||
// test 3: event deletion
|
||||
t.Run("event deletion", func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Create an event to be deleted
|
||||
evt3 := createEvent(sk1, 1, "delete me", nil)
|
||||
err = client1.Publish(ctx, evt3)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to publish event: %v", err)
|
||||
}
|
||||
|
||||
// Create deletion event
|
||||
delEvent := createEvent(sk1, 5, "deleting", nostr.Tags{{"e", evt3.ID}})
|
||||
err = client1.Publish(ctx, delEvent)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to publish deletion event: %v", err)
|
||||
}
|
||||
|
||||
// Try to query the deleted event
|
||||
sub, err := client2.Subscribe(ctx, []nostr.Filter{{
|
||||
IDs: []string{evt3.ID},
|
||||
}})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to subscribe: %v", err)
|
||||
}
|
||||
defer sub.Unsub()
|
||||
|
||||
// Should get EOSE without receiving the deleted event
|
||||
gotEvent := false
|
||||
for {
|
||||
select {
|
||||
case <-sub.Events:
|
||||
gotEvent = true
|
||||
case <-sub.EndOfStoredEvents:
|
||||
if gotEvent {
|
||||
t.Error("should not have received deleted event")
|
||||
}
|
||||
return
|
||||
case <-ctx.Done():
|
||||
t.Fatal("timeout waiting for EOSE")
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// test 4: teplaceable events
|
||||
t.Run("replaceable events", func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// create initial kind:0 event
|
||||
evt1 := createEvent(sk1, 0, `{"name":"initial"}`, nil)
|
||||
evt1.CreatedAt = 1000 // Set specific timestamp for testing
|
||||
evt1.Sign(sk1)
|
||||
err = client1.Publish(ctx, evt1)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to publish initial event: %v", err)
|
||||
}
|
||||
|
||||
// create newer event that should replace the first
|
||||
evt2 := createEvent(sk1, 0, `{"name":"newer"}`, nil)
|
||||
evt2.CreatedAt = 2000 // Newer timestamp
|
||||
evt2.Sign(sk1)
|
||||
err = client1.Publish(ctx, evt2)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to publish newer event: %v", err)
|
||||
}
|
||||
|
||||
// create older event that should not replace the current one
|
||||
evt3 := createEvent(sk1, 0, `{"name":"older"}`, nil)
|
||||
evt3.CreatedAt = 1500 // Older than evt2
|
||||
evt3.Sign(sk1)
|
||||
err = client1.Publish(ctx, evt3)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to publish older event: %v", err)
|
||||
}
|
||||
|
||||
// query to verify only the newest event exists
|
||||
sub, err := client2.Subscribe(ctx, []nostr.Filter{{
|
||||
Authors: []string{pk1},
|
||||
Kinds: []int{0},
|
||||
}})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to subscribe: %v", err)
|
||||
}
|
||||
defer sub.Unsub()
|
||||
|
||||
// should only get one event back (the newest one)
|
||||
var receivedEvents []*nostr.Event
|
||||
for {
|
||||
select {
|
||||
case env := <-sub.Events:
|
||||
receivedEvents = append(receivedEvents, env)
|
||||
case <-sub.EndOfStoredEvents:
|
||||
if len(receivedEvents) != 1 {
|
||||
t.Errorf("expected exactly 1 event, got %d", len(receivedEvents))
|
||||
}
|
||||
if len(receivedEvents) > 0 && receivedEvents[0].Content != `{"name":"newer"}` {
|
||||
t.Errorf("expected newest event content, got %s", receivedEvents[0].Content)
|
||||
}
|
||||
return
|
||||
case <-ctx.Done():
|
||||
t.Fatal("timeout waiting for events")
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// test 5: event expiration
|
||||
t.Run("event expiration", func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// create a new relay with shorter expiration check interval
|
||||
relay := NewRelay()
|
||||
relay.expirationManager.interval = 3 * time.Second // check every 3 seconds
|
||||
store := slicestore.SliceStore{}
|
||||
store.Init()
|
||||
relay.StoreEvent = append(relay.StoreEvent, store.SaveEvent)
|
||||
relay.QueryEvents = append(relay.QueryEvents, store.QueryEvents)
|
||||
relay.DeleteEvent = append(relay.DeleteEvent, store.DeleteEvent)
|
||||
|
||||
// start test server
|
||||
server := httptest.NewServer(relay)
|
||||
defer server.Close()
|
||||
|
||||
// connect test client
|
||||
url := "ws" + server.URL[4:]
|
||||
client, err := nostr.RelayConnect(context.Background(), url)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to connect client: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
// create event that expires in 2 seconds
|
||||
expiration := strconv.FormatInt(int64(nostr.Now()+2), 10)
|
||||
evt := createEvent(sk1, 1, "i will expire soon", nostr.Tags{{"expiration", expiration}})
|
||||
err = client.Publish(ctx, evt)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to publish event: %v", err)
|
||||
}
|
||||
|
||||
// verify event exists initially
|
||||
sub, err := client.Subscribe(ctx, []nostr.Filter{{
|
||||
IDs: []string{evt.ID},
|
||||
}})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to subscribe: %v", err)
|
||||
}
|
||||
|
||||
// should get the event
|
||||
select {
|
||||
case env := <-sub.Events:
|
||||
if env.ID != evt.ID {
|
||||
t.Error("got wrong event")
|
||||
}
|
||||
case <-ctx.Done():
|
||||
t.Fatal("timeout waiting for event")
|
||||
}
|
||||
sub.Unsub()
|
||||
|
||||
// wait for expiration check (>3 seconds)
|
||||
time.Sleep(4 * time.Second)
|
||||
|
||||
// verify event no longer exists
|
||||
sub, err = client.Subscribe(ctx, []nostr.Filter{{
|
||||
IDs: []string{evt.ID},
|
||||
}})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to subscribe: %v", err)
|
||||
}
|
||||
defer sub.Unsub()
|
||||
|
||||
// should get EOSE without receiving the expired event
|
||||
gotEvent := false
|
||||
for {
|
||||
select {
|
||||
case <-sub.Events:
|
||||
gotEvent = true
|
||||
case <-sub.EndOfStoredEvents:
|
||||
if gotEvent {
|
||||
t.Error("should not have received expired event")
|
||||
}
|
||||
return
|
||||
case <-ctx.Done():
|
||||
t.Fatal("timeout waiting for EOSE")
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// test 6: unauthorized deletion
|
||||
t.Run("unauthorized deletion", func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// create an event from client1
|
||||
evt4 := createEvent(sk1, 1, "try to delete me", nil)
|
||||
err = client1.Publish(ctx, evt4)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to publish event: %v", err)
|
||||
}
|
||||
|
||||
// Try to delete it with client2
|
||||
delEvent := createEvent(sk2, 5, "trying to delete", nostr.Tags{{"e", evt4.ID}})
|
||||
err = client2.Publish(ctx, delEvent)
|
||||
if err == nil {
|
||||
t.Fatalf("should have failed to publish deletion event: %v", err)
|
||||
}
|
||||
|
||||
// Verify event still exists
|
||||
sub, err := client1.Subscribe(ctx, []nostr.Filter{{
|
||||
IDs: []string{evt4.ID},
|
||||
}})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to subscribe: %v", err)
|
||||
}
|
||||
defer sub.Unsub()
|
||||
|
||||
select {
|
||||
case env := <-sub.Events:
|
||||
if env.ID != evt4.ID {
|
||||
t.Error("got wrong event")
|
||||
}
|
||||
case <-ctx.Done():
|
||||
t.Fatal("event should still exist")
|
||||
}
|
||||
})
|
||||
}
|
||||
119
khatru/responding.go
Normal file
119
khatru/responding.go
Normal file
@@ -0,0 +1,119 @@
|
||||
package khatru
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
"github.com/nbd-wtf/go-nostr/nip45/hyperloglog"
|
||||
)
|
||||
|
||||
func (rl *Relay) handleRequest(ctx context.Context, id string, eose *sync.WaitGroup, ws *WebSocket, filter nostr.Filter) error {
|
||||
defer eose.Done()
|
||||
|
||||
// overwrite the filter (for example, to eliminate some kinds or
|
||||
// that we know we don't support)
|
||||
for _, ovw := range rl.OverwriteFilter {
|
||||
ovw(ctx, &filter)
|
||||
}
|
||||
|
||||
if filter.LimitZero {
|
||||
// don't do any queries, just subscribe to future events
|
||||
return nil
|
||||
}
|
||||
|
||||
// then check if we'll reject this filter (we apply this after overwriting
|
||||
// because we may, for example, remove some things from the incoming filters
|
||||
// that we know we don't support, and then if the end result is an empty
|
||||
// filter we can just reject it)
|
||||
for _, reject := range rl.RejectFilter {
|
||||
if reject, msg := reject(ctx, filter); reject {
|
||||
return errors.New(nostr.NormalizeOKMessage(msg, "blocked"))
|
||||
}
|
||||
}
|
||||
|
||||
// run the functions to query events (generally just one,
|
||||
// but we might be fetching stuff from multiple places)
|
||||
eose.Add(len(rl.QueryEvents))
|
||||
for _, query := range rl.QueryEvents {
|
||||
ch, err := query(ctx, filter)
|
||||
if err != nil {
|
||||
ws.WriteJSON(nostr.NoticeEnvelope(err.Error()))
|
||||
eose.Done()
|
||||
continue
|
||||
} else if ch == nil {
|
||||
eose.Done()
|
||||
continue
|
||||
}
|
||||
|
||||
go func(ch chan *nostr.Event) {
|
||||
for event := range ch {
|
||||
for _, ovw := range rl.OverwriteResponseEvent {
|
||||
ovw(ctx, event)
|
||||
}
|
||||
ws.WriteJSON(nostr.EventEnvelope{SubscriptionID: &id, Event: *event})
|
||||
}
|
||||
eose.Done()
|
||||
}(ch)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rl *Relay) handleCountRequest(ctx context.Context, ws *WebSocket, filter nostr.Filter) int64 {
|
||||
// check if we'll reject this filter
|
||||
for _, reject := range rl.RejectCountFilter {
|
||||
if rejecting, msg := reject(ctx, filter); rejecting {
|
||||
ws.WriteJSON(nostr.NoticeEnvelope(msg))
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// run the functions to count (generally it will be just one)
|
||||
var subtotal int64 = 0
|
||||
for _, count := range rl.CountEvents {
|
||||
res, err := count(ctx, filter)
|
||||
if err != nil {
|
||||
ws.WriteJSON(nostr.NoticeEnvelope(err.Error()))
|
||||
}
|
||||
subtotal += res
|
||||
}
|
||||
|
||||
return subtotal
|
||||
}
|
||||
|
||||
func (rl *Relay) handleCountRequestWithHLL(
|
||||
ctx context.Context,
|
||||
ws *WebSocket,
|
||||
filter nostr.Filter,
|
||||
offset int,
|
||||
) (int64, *hyperloglog.HyperLogLog) {
|
||||
// check if we'll reject this filter
|
||||
for _, reject := range rl.RejectCountFilter {
|
||||
if rejecting, msg := reject(ctx, filter); rejecting {
|
||||
ws.WriteJSON(nostr.NoticeEnvelope(msg))
|
||||
return 0, nil
|
||||
}
|
||||
}
|
||||
|
||||
// run the functions to count (generally it will be just one)
|
||||
var subtotal int64 = 0
|
||||
var hll *hyperloglog.HyperLogLog
|
||||
for _, countHLL := range rl.CountEventsHLL {
|
||||
res, fhll, err := countHLL(ctx, filter, offset)
|
||||
if err != nil {
|
||||
ws.WriteJSON(nostr.NoticeEnvelope(err.Error()))
|
||||
}
|
||||
subtotal += res
|
||||
if fhll != nil {
|
||||
if hll == nil {
|
||||
hll = fhll
|
||||
} else {
|
||||
hll.Merge(fhll)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return subtotal, hll
|
||||
}
|
||||
67
khatru/router.go
Normal file
67
khatru/router.go
Normal file
@@ -0,0 +1,67 @@
|
||||
package khatru
|
||||
|
||||
import (
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
)
|
||||
|
||||
type Router struct{ *Relay }
|
||||
|
||||
type Route struct {
|
||||
eventMatcher func(*nostr.Event) bool
|
||||
filterMatcher func(nostr.Filter) bool
|
||||
relay *Relay
|
||||
}
|
||||
|
||||
type routeBuilder struct {
|
||||
router *Router
|
||||
eventMatcher func(*nostr.Event) bool
|
||||
filterMatcher func(nostr.Filter) bool
|
||||
}
|
||||
|
||||
func NewRouter() *Router {
|
||||
rr := &Router{Relay: NewRelay()}
|
||||
rr.routes = make([]Route, 0, 3)
|
||||
rr.getSubRelayFromFilter = func(f nostr.Filter) *Relay {
|
||||
for _, route := range rr.routes {
|
||||
if route.filterMatcher(f) {
|
||||
return route.relay
|
||||
}
|
||||
}
|
||||
return rr.Relay
|
||||
}
|
||||
rr.getSubRelayFromEvent = func(e *nostr.Event) *Relay {
|
||||
for _, route := range rr.routes {
|
||||
if route.eventMatcher(e) {
|
||||
return route.relay
|
||||
}
|
||||
}
|
||||
return rr.Relay
|
||||
}
|
||||
return rr
|
||||
}
|
||||
|
||||
func (rr *Router) Route() routeBuilder {
|
||||
return routeBuilder{
|
||||
router: rr,
|
||||
filterMatcher: func(f nostr.Filter) bool { return false },
|
||||
eventMatcher: func(e *nostr.Event) bool { return false },
|
||||
}
|
||||
}
|
||||
|
||||
func (rb routeBuilder) Req(fn func(nostr.Filter) bool) routeBuilder {
|
||||
rb.filterMatcher = fn
|
||||
return rb
|
||||
}
|
||||
|
||||
func (rb routeBuilder) Event(fn func(*nostr.Event) bool) routeBuilder {
|
||||
rb.eventMatcher = fn
|
||||
return rb
|
||||
}
|
||||
|
||||
func (rb routeBuilder) Relay(relay *Relay) {
|
||||
rb.router.routes = append(rb.router.routes, Route{
|
||||
filterMatcher: rb.filterMatcher,
|
||||
eventMatcher: rb.eventMatcher,
|
||||
relay: relay,
|
||||
})
|
||||
}
|
||||
61
khatru/utils.go
Normal file
61
khatru/utils.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package khatru
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
)
|
||||
|
||||
const (
|
||||
wsKey = iota
|
||||
subscriptionIdKey
|
||||
nip86HeaderAuthKey
|
||||
internalCallKey
|
||||
)
|
||||
|
||||
func RequestAuth(ctx context.Context) {
|
||||
ws := GetConnection(ctx)
|
||||
ws.authLock.Lock()
|
||||
if ws.Authed == nil {
|
||||
ws.Authed = make(chan struct{})
|
||||
}
|
||||
ws.authLock.Unlock()
|
||||
ws.WriteJSON(nostr.AuthEnvelope{Challenge: &ws.Challenge})
|
||||
}
|
||||
|
||||
func GetConnection(ctx context.Context) *WebSocket {
|
||||
wsi := ctx.Value(wsKey)
|
||||
if wsi != nil {
|
||||
return wsi.(*WebSocket)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetAuthed(ctx context.Context) string {
|
||||
if conn := GetConnection(ctx); conn != nil {
|
||||
return conn.AuthedPublicKey
|
||||
}
|
||||
if nip86Auth := ctx.Value(nip86HeaderAuthKey); nip86Auth != nil {
|
||||
return nip86Auth.(string)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// IsInternalCall returns true when a call to QueryEvents, for example, is being made because of a deletion
|
||||
// or expiration request.
|
||||
func IsInternalCall(ctx context.Context) bool {
|
||||
return ctx.Value(internalCallKey) != nil
|
||||
}
|
||||
|
||||
func GetIP(ctx context.Context) string {
|
||||
conn := GetConnection(ctx)
|
||||
if conn == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return GetIPFromRequest(conn.Request)
|
||||
}
|
||||
|
||||
func GetSubscriptionID(ctx context.Context) string {
|
||||
return ctx.Value(subscriptionIdKey).(string)
|
||||
}
|
||||
46
khatru/websocket.go
Normal file
46
khatru/websocket.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package khatru
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/fasthttp/websocket"
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
)
|
||||
|
||||
type WebSocket struct {
|
||||
conn *websocket.Conn
|
||||
mutex sync.Mutex
|
||||
|
||||
// original request
|
||||
Request *http.Request
|
||||
|
||||
// this Context will be canceled whenever the connection is closed from the client side or server-side.
|
||||
Context context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
// nip42
|
||||
Challenge string
|
||||
AuthedPublicKey string
|
||||
Authed chan struct{}
|
||||
|
||||
// nip77
|
||||
negentropySessions *xsync.MapOf[string, *NegentropySession]
|
||||
|
||||
authLock sync.Mutex
|
||||
}
|
||||
|
||||
func (ws *WebSocket) WriteJSON(any any) error {
|
||||
ws.mutex.Lock()
|
||||
err := ws.conn.WriteJSON(any)
|
||||
ws.mutex.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
func (ws *WebSocket) WriteMessage(t int, b []byte) error {
|
||||
ws.mutex.Lock()
|
||||
err := ws.conn.WriteMessage(t, b)
|
||||
ws.mutex.Unlock()
|
||||
return err
|
||||
}
|
||||
Reference in New Issue
Block a user