bring in khatru and eventstore.
This commit is contained in:
85
khatru/docs/core/auth.md
Normal file
85
khatru/docs/core/auth.md
Normal file
@@ -0,0 +1,85 @@
|
||||
---
|
||||
outline: deep
|
||||
---
|
||||
|
||||
# NIP-42 `AUTH`
|
||||
|
||||
`khatru` supports [NIP-42](https://nips.nostr.com/42) out of the box. The functionality is exposed in the following ways.
|
||||
|
||||
## Sending arbitrary `AUTH` challenges
|
||||
|
||||
At any time you can send an `AUTH` message to a client that is making a request.
|
||||
|
||||
It makes sense to give the user the option to authenticate right after they establish a connection, for example, when you have a relay that works differently depending on whether the user is authenticated or not.
|
||||
|
||||
```go
|
||||
relay := khatru.NewRelay()
|
||||
|
||||
relay.OnConnect = append(relay.OnConnect, func(ctx context.Context) {
|
||||
khatru.RequestAuth(ctx)
|
||||
})
|
||||
```
|
||||
|
||||
This will send a NIP-42 `AUTH` challenge message to the client so it will have the option to authenticate itself whenever it wants to.
|
||||
|
||||
## Signaling to the client that a specific query requires an authenticated user
|
||||
|
||||
If on `RejectFilter` or `RejectEvent` you prefix the message with `auth-required: `, that will automatically send an `AUTH` message before a `CLOSED` or `OK` with that prefix, such that the client will immediately be able to know it must authenticate to proceed and will already have the challenge required for that, so they can immediately replay the request.
|
||||
|
||||
```go
|
||||
relay.RejectFilter = append(relay.RejectFilter, func(ctx context.Context, filter nostr.Filter) (bool, string) {
|
||||
return true, "auth-required: this query requires you to be authenticated"
|
||||
})
|
||||
relay.RejectEvent = append(relay.RejectEvent, func(ctx context.Context, event *nostr.Event) (bool, string) {
|
||||
return true, "auth-required: publishing this event requires authentication"
|
||||
})
|
||||
```
|
||||
|
||||
## Reading the auth status of a client
|
||||
|
||||
After a client is authenticated and opens a new subscription with `REQ` or sends a new event with `EVENT`, you'll be able to read the public key they're authenticated with.
|
||||
|
||||
```go
|
||||
relay.RejectFilter = append(relay.RejectFilter, func(ctx context.Context, filter nostr.Filter) (bool, string) {
|
||||
authenticatedUser := khatru.GetAuthed(ctx)
|
||||
})
|
||||
```
|
||||
|
||||
## Telling an authenticated user they're still not allowed to do something
|
||||
|
||||
If the user is authenticated but still not allowed (because some specific filters or events are only accessible to some specific users) you can reply on `RejectFilter` or `RejectEvent` with a message prefixed with `"restricted: "` to make that clear to clients.
|
||||
|
||||
```go
|
||||
relay.RejectFilter = append(relay.RejectFilter, func(ctx context.Context, filter nostr.Filter) (bool, string) {
|
||||
authenticatedUser := khatru.GetAuthed(ctx)
|
||||
|
||||
if slices.Contain(authorizedUsers, authenticatedUser) {
|
||||
return false
|
||||
} else {
|
||||
return true, "restricted: you're not a member of the privileged group that can read that stuff"
|
||||
}
|
||||
})
|
||||
```
|
||||
|
||||
## Reacting to a successful authentication
|
||||
|
||||
Each `khatru.WebSocket` object has an `.Authed` channel that is closed whenever that connection performs a successful authentication.
|
||||
|
||||
You can use that to emulate a listener for these events in case you want to keep track of who is authenticating in real time and not only check it when they request for something.
|
||||
|
||||
```go
|
||||
relay.OnConnect = append(relay.OnConnect,
|
||||
khatru.RequestAuth,
|
||||
func(ctx context.Context) {
|
||||
go func(ctx context.Context) {
|
||||
conn := khatru.GetConnection(ctx)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
fmt.Println("connection closed")
|
||||
case <-conn.Authed:
|
||||
fmt.Println("authenticated as", conn.AuthedPublicKey)
|
||||
}
|
||||
}(ctx)
|
||||
},
|
||||
)
|
||||
```
|
||||
93
khatru/docs/core/blossom.md
Normal file
93
khatru/docs/core/blossom.md
Normal file
@@ -0,0 +1,93 @@
|
||||
---
|
||||
outline: deep
|
||||
---
|
||||
|
||||
# Blossom: Media Storage
|
||||
|
||||
Khatru comes with a built-in Blossom HTTP handler that allows you to store and serve media blobs using storage backend you want (filesystem, S3 etc).
|
||||
|
||||
## Basic Setup
|
||||
|
||||
Here's a minimal example of what you should do to enable it:
|
||||
|
||||
```go
|
||||
func main() {
|
||||
relay := khatru.NewRelay()
|
||||
|
||||
// create blossom server with the relay and service URL
|
||||
bl := blossom.New(relay, "http://localhost:3334")
|
||||
|
||||
// create a database for keeping track of blob metadata
|
||||
// (do not use the same database used for the relay events)
|
||||
bl.Store = blossom.EventStoreBlobIndexWrapper{Store: blobdb, ServiceURL: bl.ServiceURL}
|
||||
|
||||
// implement the required storage functions
|
||||
bl.StoreBlob = append(bl.StoreBlob, func(ctx context.Context, sha256 string, body []byte) error {
|
||||
// store the blob data somewhere
|
||||
return nil
|
||||
})
|
||||
bl.LoadBlob = append(bl.LoadBlob, func(ctx context.Context, sha256 string) (io.ReadSeeker, error) {
|
||||
// load and return the blob data
|
||||
return nil, nil
|
||||
})
|
||||
bl.DeleteBlob = append(bl.DeleteBlob, func(ctx context.Context, sha256 string) error {
|
||||
// delete the blob data
|
||||
return nil
|
||||
})
|
||||
|
||||
http.ListenAndServe(":3334", relay)
|
||||
}
|
||||
```
|
||||
|
||||
## Storage Backend Integration
|
||||
|
||||
You can integrate any storage backend by implementing the three core functions:
|
||||
|
||||
- `StoreBlob`: Save the blob data
|
||||
- `LoadBlob`: Retrieve the blob data
|
||||
- `DeleteBlob`: Remove the blob data
|
||||
|
||||
## Upload Restrictions
|
||||
|
||||
You can implement upload restrictions using the `RejectUpload` hook. Here's an example that limits file size and restricts uploads to whitelisted users:
|
||||
|
||||
```go
|
||||
const maxFileSize = 10 * 1024 * 1024 // 10MB
|
||||
|
||||
var allowedUsers = map[string]bool{
|
||||
"pubkey1": true,
|
||||
"pubkey2": true,
|
||||
}
|
||||
|
||||
bl.RejectUpload = append(bl.RejectUpload, func(ctx context.Context, auth *nostr.Event, size int, ext string) (bool, string, int) {
|
||||
// check file size
|
||||
if size > maxFileSize {
|
||||
return true, "file too large", 413
|
||||
}
|
||||
|
||||
// check if user is allowed
|
||||
if auth == nil || !allowedUsers[auth.PubKey] {
|
||||
return true, "unauthorized", 403
|
||||
}
|
||||
|
||||
return false, "", 0
|
||||
})
|
||||
```
|
||||
|
||||
There are other `Reject*` hooks you can also implement, but this is the most important one.
|
||||
|
||||
## Tracking blob metadata
|
||||
|
||||
Blossom needs a database to keep track of blob metadata in order to know which user owns each blob, for example (and mind you that more than one user might own the same blob so when of them deletes the blob we don't actually delete it because the other user still has a claim to it). The simplest way to do it currently is by relying on a wrapper on top of fake Nostr events over eventstore, which is `EventStoreBlobIndexWrapper`, but other solutions can be used.
|
||||
|
||||
```go
|
||||
db := &badger.BadgerBackend{Path: "/tmp/khatru-badger-blossom-blobstore"}
|
||||
db.Init()
|
||||
|
||||
bl.Store = blossom.EventStoreBlobIndexWrapper{
|
||||
Store: db,
|
||||
ServiceURL: bl.ServiceURL,
|
||||
}
|
||||
```
|
||||
|
||||
This will store blob metadata as special `kind:24242` events, but you shouldn't have to worry about it as the wrapper handles all the complexity of tracking ownership and managing blob lifecycle. Jut avoid reusing the same datastore that is used for the actual relay events unless you know what you're doing.
|
||||
72
khatru/docs/core/embed.md
Normal file
72
khatru/docs/core/embed.md
Normal file
@@ -0,0 +1,72 @@
|
||||
---
|
||||
outline: deep
|
||||
---
|
||||
|
||||
# Mixing a `khatru` relay with other HTTP handlers
|
||||
|
||||
If you already have a web server with all its HTML handlers or a JSON HTTP API or anything like that, something like:
|
||||
|
||||
```go
|
||||
func main() {
|
||||
mux := http.NewServeMux()
|
||||
|
||||
mux.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir("./static"))))
|
||||
mux.HandleFunc("/.well-known/nostr.json", handleNIP05)
|
||||
mux.HandleFunc("/page/{page}", handlePage)
|
||||
mux.HandleFunc("/", handleHomePage)
|
||||
|
||||
log.Printf("listening at http://0.0.0.0:8080")
|
||||
http.ListenAndServe("0.0.0.0:8080", mux)
|
||||
}
|
||||
```
|
||||
|
||||
Then you can easily inject a relay or two there in alternative paths if you want:
|
||||
|
||||
```diff
|
||||
mux := http.NewServeMux()
|
||||
|
||||
+ relay1 := khatru.NewRelay()
|
||||
+ relay2 := khatru.NewRelay()
|
||||
+ // and so on
|
||||
|
||||
mux.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir("./static"))))
|
||||
mux.HandleFunc("/.well-known/nostr.json", handleNIP05)
|
||||
mux.HandleFunc("/page/{page}", handlePage)
|
||||
mux.HandleFunc("/", handleHomePage)
|
||||
+ mux.Handle("/relay1", relay1)
|
||||
+ mux.Handle("/relay2", relay2)
|
||||
+ // and so forth
|
||||
|
||||
log.Printf("listening at http://0.0.0.0:8080")
|
||||
```
|
||||
|
||||
Imagine each of these relay handlers is different, each can be using a different eventstore and have different policies for writing and reading.
|
||||
|
||||
## Exposing a relay interface at the root
|
||||
|
||||
If you want to expose your relay at the root path `/` that is also possible. You can just use it as the `mux` directly:
|
||||
|
||||
```go
|
||||
func main() {
|
||||
relay := khatru.NewRelay()
|
||||
// ... -- relay configuration steps (omitted for brevity)
|
||||
|
||||
mux := relay.Router() // the relay comes with its own http.ServeMux inside
|
||||
|
||||
mux.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir("./static"))))
|
||||
mux.HandleFunc("/.well-known/nostr.json", handleNIP05)
|
||||
mux.HandleFunc("/page/{page}", handlePage)
|
||||
mux.HandleFunc("/", handleHomePage)
|
||||
|
||||
log.Printf("listening at http://0.0.0.0:8080")
|
||||
http.ListenAndServe("0.0.0.0:8080", mux)
|
||||
}
|
||||
```
|
||||
|
||||
Every [`khatru.Relay`](https://pkg.go.dev/github.com/fiatjaf/khatru#Relay) instance comes with its own ['http.ServeMux`](https://pkg.go.dev/net/http#ServeMux) inside. It ensures all requests are handled normally, but intercepts the requests that are pertinent to the relay operation, specifically the WebSocket requests, and the [NIP-11](https://nips.nostr.com/11) and the [NIP-86](https://nips.nostr.com/86) HTTP requests.
|
||||
|
||||
## Exposing multiple relays at the same path or at the root
|
||||
|
||||
That's also possible, as long as you have a way of differentiating each HTTP request that comes at the middleware level and associating it with a `khatru.Relay` instance in the background.
|
||||
|
||||
See [dynamic](../cookbook/dynamic) for an example that does that using the subdomain. [`countries`](https://git.fiatjaf.com/countries) does it using the requester country implied from its IP address.
|
||||
99
khatru/docs/core/eventstore.md
Normal file
99
khatru/docs/core/eventstore.md
Normal file
@@ -0,0 +1,99 @@
|
||||
---
|
||||
outline: deep
|
||||
---
|
||||
|
||||
# Event Storage
|
||||
|
||||
Khatru doesn't make any assumptions about how you'll want to store events. Any function can be plugged in to the `StoreEvent`, `DeleteEvent`, `ReplaceEvent` and `QueryEvents` hooks.
|
||||
|
||||
However the [`eventstore`](https://github.com/fiatjaf/eventstore) library has adapters that you can easily plug into `khatru`'s hooks.
|
||||
|
||||
# Using the `eventstore` library
|
||||
|
||||
The library includes many different adapters -- often called "backends" --, written by different people and with different levels of quality, reliability and speed.
|
||||
|
||||
For all of them you start by instantiating a struct containing some basic options and a pointer (a file path for local databases, a connection string for remote databases) to the data. Then you call `.Init()` and if all is well you're ready to start storing, querying and deleting events, so you can pass the respective functions to their `khatru` counterparts. These eventstores also expose a `.Close()` function that must be called if you're going to stop using that store and keep your application open.
|
||||
|
||||
Here's an example with the [Badger](https://pkg.go.dev/github.com/fiatjaf/eventstore/badger) adapter, made for the [Badger](https://github.com/dgraph-io/badger) embedded key-value database:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/fiatjaf/eventstore/badger"
|
||||
"github.com/fiatjaf/khatru"
|
||||
)
|
||||
|
||||
func main() {
|
||||
relay := khatru.NewRelay()
|
||||
|
||||
db := badger.BadgerBackend{Path: "/tmp/khatru-badger-tmp"}
|
||||
if err := db.Init(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
relay.StoreEvent = append(relay.StoreEvent, db.SaveEvent)
|
||||
relay.QueryEvents = append(relay.QueryEvents, db.QueryEvents)
|
||||
relay.CountEvents = append(relay.CountEvents, db.CountEvents)
|
||||
relay.DeleteEvent = append(relay.DeleteEvent, db.DeleteEvent)
|
||||
relay.ReplaceEvent = append(relay.ReplaceEvent, db.ReplaceEvent)
|
||||
|
||||
fmt.Println("running on :3334")
|
||||
http.ListenAndServe(":3334", relay)
|
||||
}
|
||||
```
|
||||
|
||||
[LMDB](https://pkg.go.dev/github.com/fiatjaf/eventstore/lmdb) works the same way.
|
||||
|
||||
[SQLite](https://pkg.go.dev/github.com/fiatjaf/eventstore/sqlite3) also stores things locally so it only needs a `Path`.
|
||||
|
||||
[PostgreSQL](https://pkg.go.dev/github.com/fiatjaf/eventstore/postgresql) and [MySQL](https://pkg.go.dev/github.com/fiatjaf/eventstore/mysql) use remote connections to database servers, so they take a `DatabaseURL` parameter, but after that it's the same.
|
||||
|
||||
## Using two at a time
|
||||
|
||||
If you want to use two different adapters at the same time that's easy. Just add both to the corresponding slices:
|
||||
|
||||
```go
|
||||
relay.StoreEvent = append(relay.StoreEvent, db1.SaveEvent, db2.SaveEvent)
|
||||
relay.QueryEvents = append(relay.QueryEvents, db1.QueryEvents, db2.SaveEvent)
|
||||
```
|
||||
|
||||
But that will duplicate events on both and then return duplicated events on each query.
|
||||
|
||||
## Sharding
|
||||
|
||||
You can do a kind of sharding, for example, by storing some events in one store and others in another:
|
||||
|
||||
For example, maybe you want kind 1 events in `db1` and kind 30023 events in `db30023`:
|
||||
|
||||
```go
|
||||
relay.StoreEvent = append(relay.StoreEvent, func (ctx context.Context, evt *nostr.Event) error {
|
||||
switch evt.Kind {
|
||||
case 1:
|
||||
return db1.StoreEvent(ctx, evt)
|
||||
case 30023:
|
||||
return db30023.StoreEvent(ctx, evt)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
})
|
||||
relay.QueryEvents = append(relay.QueryEvents, func (ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) {
|
||||
for _, kind := range filter.Kinds {
|
||||
switch kind {
|
||||
case 1:
|
||||
filter1 := filter
|
||||
filter1.Kinds = []int{1}
|
||||
return db1.QueryEvents(ctx, filter1)
|
||||
case 30023:
|
||||
filter30023 := filter
|
||||
filter30023.Kinds = []int{30023}
|
||||
return db30023.QueryEvents(ctx, filter30023)
|
||||
default:
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
})
|
||||
```
|
||||
85
khatru/docs/core/management.md
Normal file
85
khatru/docs/core/management.md
Normal file
@@ -0,0 +1,85 @@
|
||||
---
|
||||
outline: deep
|
||||
---
|
||||
|
||||
# Management API
|
||||
|
||||
[NIP-86](https://nips.nostr.com/86) specifies a set of RPC methods for managing the boring aspects of relays, such as whitelisting or banning users, banning individual events, banning IPs and so on.
|
||||
|
||||
All [`khatru.Relay`](https://pkg.go.dev/github.com/fiatjaf/khatru#Relay) instances expose a field `ManagementAPI` with a [`RelayManagementAPI`](https://pkg.go.dev/github.com/fiatjaf/khatru#RelayManagementAPI) instance inside, which can be used for creating handlers for each of the RPC methods.
|
||||
|
||||
There is also a generic `RejectAPICall` which is a slice of functions that will be called before any RPC method, if they exist and, if any of them returns true, the request will be rejected.
|
||||
|
||||
The most basic implementation of a `RejectAPICall` handler would be one that checks the public key of the caller with a hardcoded public key of the relay owner:
|
||||
|
||||
```go
|
||||
var owner = "<my-own-pubkey>"
|
||||
var allowedPubkeys = make([]string, 0, 10)
|
||||
|
||||
func main () {
|
||||
relay := khatru.NewRelay()
|
||||
|
||||
relay.ManagementAPI.RejectAPICall = append(relay.ManagementAPI.RejectAPICall,
|
||||
func(ctx context.Context, mp nip86.MethodParams) (reject bool, msg string) {
|
||||
user := khatru.GetAuthed(ctx)
|
||||
if user != owner {
|
||||
return true, "go away, intruder"
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
)
|
||||
|
||||
relay.ManagementAPI.AllowPubKey = func(ctx context.Context, pubkey string, reason string) error {
|
||||
allowedPubkeys = append(allowedPubkeys, pubkey)
|
||||
return nil
|
||||
}
|
||||
relay.ManagementAPI.BanPubKey = func(ctx context.Context, pubkey string, reason string) error {
|
||||
idx := slices.Index(allowedPubkeys, pubkey)
|
||||
if idx == -1 {
|
||||
return fmt.Errorf("pubkey already not allowed")
|
||||
}
|
||||
allowedPubkeys = slices.Delete(allowedPubkeys, idx, idx+1)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
You can also not provide any `RejectAPICall` handler and do the approval specifically on each RPC handler.
|
||||
|
||||
In the following example any current member can include any other pubkey, and anyone who was added before is able to remove any pubkey that was added afterwards (not a very good idea, but serves as an example).
|
||||
|
||||
```go
|
||||
var allowedPubkeys = []string{"<my-own-pubkey>"}
|
||||
|
||||
func main () {
|
||||
relay := khatru.NewRelay()
|
||||
|
||||
relay.ManagementAPI.AllowPubKey = func(ctx context.Context, pubkey string, reason string) error {
|
||||
caller := khatru.GetAuthed(ctx)
|
||||
|
||||
if slices.Contains(allowedPubkeys, caller) {
|
||||
allowedPubkeys = append(allowedPubkeys, pubkey)
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("you're not authorized")
|
||||
}
|
||||
relay.ManagementAPI.BanPubKey = func(ctx context.Context, pubkey string, reason string) error {
|
||||
caller := khatru.GetAuthed(ctx)
|
||||
|
||||
callerIdx := slices.Index(allowedPubkeys, caller)
|
||||
if callerIdx == -1 {
|
||||
return fmt.Errorf("you're not even allowed here")
|
||||
}
|
||||
|
||||
targetIdx := slices.Index(allowedPubkeys, pubkey)
|
||||
if targetIdx < callerIdx {
|
||||
// target is a bigger OG than the caller, so it has bigger influence and can't be removed
|
||||
return fmt.Errorf("you're less powerful than the pubkey you're trying to remove")
|
||||
}
|
||||
|
||||
// allow deletion since the target came after the caller
|
||||
allowedPubkeys = slices.Delete(allowedPubkeys, targetIdx, targetIdx+1)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
```
|
||||
63
khatru/docs/core/routing.md
Normal file
63
khatru/docs/core/routing.md
Normal file
@@ -0,0 +1,63 @@
|
||||
---
|
||||
outline: deep
|
||||
---
|
||||
|
||||
# Request Routing
|
||||
|
||||
If you have one (or more) set of policies that have to be executed in sequence (for example, first you check for the presence of a tag, then later in the next policies you use that tag without checking) and they only apply to some class of events, but you still want your relay to deal with other classes of events that can lead to cumbersome sets of rules, always having to check if an event meets the requirements and so on. There is where routing can help you.
|
||||
|
||||
It also can be handy if you get a [`khatru.Relay`](https://pkg.go.dev/github.com/fiatjaf/khatru#Relay) from somewhere else, like a library such as [`relay29`](https://github.com/fiatjaf/relay29), and you want to combine it with other policies without some interfering with the others. As in the example below:
|
||||
|
||||
```go
|
||||
sk := os.Getenv("RELAY_SECRET_KEY")
|
||||
|
||||
// a relay for NIP-29 groups
|
||||
groupsStore := badger.BadgerBackend{}
|
||||
groupsStore.Init()
|
||||
groupsRelay, _ := khatru29.Init(relay29.Options{Domain: "example.com", DB: groupsStore, SecretKey: sk})
|
||||
// ...
|
||||
|
||||
// a relay for everything else
|
||||
publicStore := slicestore.SliceStore{}
|
||||
publicStore.Init()
|
||||
publicRelay := khatru.NewRelay()
|
||||
publicRelay.StoreEvent = append(publicRelay.StoreEvent, publicStore.SaveEvent)
|
||||
publicRelay.QueryEvents = append(publicRelay.QueryEvents, publicStore.QueryEvents)
|
||||
publicRelay.CountEvents = append(publicRelay.CountEvents, publicStore.CountEvents)
|
||||
publicRelay.DeleteEvent = append(publicRelay.DeleteEvent, publicStore.DeleteEvent)
|
||||
// ...
|
||||
|
||||
// a higher-level relay that just routes between the two above
|
||||
router := khatru.NewRouter()
|
||||
|
||||
// route requests and events to the groups relay
|
||||
router.Route().
|
||||
Req(func (filter nostr.Filter) bool {
|
||||
_, hasHTag := filter.Tags["h"]
|
||||
if hasHTag {
|
||||
return true
|
||||
}
|
||||
return slices.Contains(filter.Kinds, func (k int) bool { return k == 39000 || k == 39001 || k == 39002 })
|
||||
}).
|
||||
Event(func (event *nostr.Event) bool {
|
||||
switch {
|
||||
case event.Kind <= 9021 && event.Kind >= 9000:
|
||||
return true
|
||||
case event.Kind <= 39010 && event.Kind >= 39000:
|
||||
return true
|
||||
case event.Kind <= 12 && event.Kind >= 9:
|
||||
return true
|
||||
case event.Tags.Find("h") != nil:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}).
|
||||
Relay(groupsRelay)
|
||||
|
||||
// route requests and events to the other
|
||||
router.Route().
|
||||
Req(func (filter nostr.Filter) bool { return true }).
|
||||
Event(func (event *nostr.Event) bool { return true }).
|
||||
Relay(publicRelay)
|
||||
```
|
||||
Reference in New Issue
Block a user