Compare commits

...

59 Commits

Author SHA1 Message Date
binwiederhier
e55d1cee6b Attachment fixes to address inconsistencies between DB and backend store 2026-03-24 17:37:50 -04:00
binwiederhier
dd760f16f7 Merge branch 'main' of github.com:binwiederhier/ntfy 2026-03-23 13:05:09 -04:00
binwiederhier
d00277107a Release notes 2026-03-23 13:04:47 -04:00
binwiederhier
b95efe8dd3 Tighten message IDs in attachments 2026-03-23 12:54:13 -04:00
binwiederhier
075f2ffa15 Refine sync() to properly update sizes 2026-03-23 12:44:40 -04:00
Philipp C. Heckel
2f6a044c34 Merge pull request #1656 from binwiederhier/attachment-s3
S3 attachment storage
2026-03-22 21:33:07 -04:00
binwiederhier
e6192c94bd Docs updates 2026-03-22 21:24:43 -04:00
binwiederhier
e87a3e62fe Fix workflows to not double run 2026-03-22 21:11:21 -04:00
binwiederhier
233ec0973d bump wait time 2026-03-22 21:01:33 -04:00
binwiederhier
69cc80ec1e Add comments about AWS S3 2026-03-22 20:52:25 -04:00
binwiederhier
4d07897d2d RWMutex 2026-03-22 16:20:45 -04:00
binwiederhier
a04128520d Run S3 tests in CI 2026-03-22 16:17:17 -04:00
binwiederhier
ef051afc09 Update base-url in examples 2026-03-22 16:06:57 -04:00
binwiederhier
f8397838e6 Update docs 2026-03-22 16:05:48 -04:00
binwiederhier
59ec76e8b2 Fix brittle tests, move delete batching into client package, run s3 tests against real bucket 2026-03-22 15:10:28 -04:00
binwiederhier
9b3408d4c4 Merge branch 'main' of github.com:binwiederhier/ntfy into attachment-s3 2026-03-22 08:43:45 -04:00
binwiederhier
536c6f5807 More consistent logging 2026-03-22 08:38:41 -04:00
binwiederhier
56b63c475c Remove MinIO mention 2026-03-22 08:28:18 -04:00
binwiederhier
fa33d63138 More tests 2026-03-22 08:26:18 -04:00
binwiederhier
f76135c5ab Large objects test 2026-03-22 08:23:46 -04:00
binwiederhier
f2d4575831 Use real S3 for tests 2026-03-22 08:15:23 -04:00
binwiederhier
ad501feab1 Rewrite tests 2026-03-21 21:59:59 -04:00
binwiederhier
b81218953a Allow streaming to S3 2026-03-21 21:14:49 -04:00
Philipp C. Heckel
6f47d77200 Merge pull request #1670 from nihalgonsalves/ng/fix-dockerfile-build
build: fix Dockerfile-build, bump EoL go/node
2026-03-21 20:56:42 -04:00
Nihal Gonsalves
d5466f30e5 perf: speed up deps using buildkit cache 2026-03-22 01:22:13 +01:00
Nihal Gonsalves
52cad203fa build: fix Dockerfile-build, bump EoL go/node 2026-03-22 00:50:47 +01:00
binwiederhier
b3a8f18019 Docs 2026-03-21 17:03:29 -04:00
binwiederhier
78d3138565 Fix flaky test 2026-03-21 16:54:16 -04:00
binwiederhier
6a820b5030 Tags 2026-03-21 16:29:58 -04:00
binwiederhier
1742302f83 More tests and human review 2026-03-21 16:27:41 -04:00
Philipp C. Heckel
f25f0e21f0 Merge pull request #1668 from nihalgonsalves/patch-1
Update web app FAQ to include `require-login`
2026-03-21 16:25:08 -04:00
Nihal Gonsalves
22225c5bd0 Update web app FAQ to include require-login 2026-03-21 21:20:15 +01:00
binwiederhier
393f730d11 More manual review and refinement 2026-03-21 16:12:46 -04:00
binwiederhier
02ea09ab0f Refine, manual review, re-org 2026-03-21 15:52:45 -04:00
binwiederhier
039ef19e6c Merge branch 'main' into attachment-s3 2026-03-19 22:44:30 -04:00
binwiederhier
f9974d8a2f Merge branch 'main' of https://hosted.weblate.org/git/ntfy/web 2026-03-19 22:44:15 -04:00
binwiederhier
0d162b984b Bump 2026-03-19 22:44:08 -04:00
binwiederhier
1f270b68e0 Simplify a little, manual review 2026-03-19 22:42:38 -04:00
binwiederhier
d86e20173c Move stuff around 2026-03-19 21:46:52 -04:00
binwiederhier
a1b403d23c Remove s3 config option, reduce size when removing files 2026-03-19 21:11:36 -04:00
Guillaume Petit
5ca0cf9912 Translated using Weblate (French)
Currently translated at 100.0% (407 of 407 strings)

Translation: ntfy/Web app
Translate-URL: https://hosted.weblate.org/projects/ntfy/web/fr/
2026-03-19 18:09:47 +01:00
binwiederhier
ef314960d0 Refactor 2026-03-17 20:53:41 -04:00
binwiederhier
cffa57950a Logs 2026-03-17 16:25:45 -04:00
binwiederhier
a47d692cbf Fix bug 2026-03-17 07:50:28 -04:00
binwiederhier
6b11bc7468 Merge branch 'main' into attachment-s3 2026-03-16 21:11:38 -04:00
binwiederhier
d9efe50848 Email validation 2026-03-16 21:03:33 -04:00
binwiederhier
2ad78edca1 Release notes 2026-03-16 20:13:39 -04:00
binwiederhier
86015e100c Multipart upload 2026-03-16 20:00:19 -04:00
binwiederhier
458fbad770 Merge branch 'main' into attachment-s3 2026-03-16 15:53:21 -04:00
binwiederhier
9b1a32ec56 Refine 2026-03-16 15:52:57 -04:00
binwiederhier
3d9ce69042 PG races 2026-03-16 15:48:36 -04:00
binwiederhier
59ce581ba2 Fix postgres primary/replica races 2026-03-16 11:21:21 -04:00
binwiederhier
df82fdf44c Add HTTP 413 to normal errors to not log 2026-03-16 10:27:23 -04:00
binwiederhier
3a37ea32f7 Webpush: Fix FK issue with Postgres 2026-03-16 10:24:16 -04:00
binwiederhier
790ba243c7 S3 WIP 2026-03-16 09:48:26 -04:00
binwiederhier
4487299a80 Merge branch 'main' into attachment-s3 2026-03-16 05:43:20 -04:00
binwiederhier
6b38acb23a Route authorization query to read-only database replica to reduce primary database load 2026-03-15 22:01:19 -04:00
binwiederhier
b4ec6fa8df AWS deps.. 2026-03-15 10:12:23 -04:00
binwiederhier
d517ce4a2a WIP: S3 2026-03-14 21:10:46 -04:00
55 changed files with 3505 additions and 747 deletions

View File

@@ -1,5 +1,8 @@
name: build
on: [ push, pull_request ]
on:
push:
branches: [ main ]
pull_request:
jobs:
build:
runs-on: ubuntu-latest
@@ -9,11 +12,11 @@ jobs:
- name: Install Go
uses: actions/setup-go@v4
with:
go-version: '1.24.x'
go-version: '1.25.x'
- name: Install node
uses: actions/setup-node@v3
with:
node-version: '20'
node-version: '24'
cache: 'npm'
cache-dependency-path: './web/package-lock.json'
- name: Install dependencies

View File

@@ -22,17 +22,18 @@ jobs:
--health-retries 5
env:
NTFY_TEST_DATABASE_URL: "postgres://ntfy:ntfy@localhost:5432/ntfy_test?sslmode=disable"
NTFY_TEST_S3_URL: ${{ secrets.NTFY_TEST_S3_URL }}
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Install Go
uses: actions/setup-go@v4
with:
go-version: '1.24.x'
go-version: '1.25.x'
- name: Install node
uses: actions/setup-node@v3
with:
node-version: '20'
node-version: '24'
cache: 'npm'
cache-dependency-path: './web/package-lock.json'
- name: Docker login

View File

@@ -1,5 +1,8 @@
name: test
on: [ push, pull_request ]
on:
push:
branches: [ main ]
pull_request:
jobs:
test:
runs-on: ubuntu-latest
@@ -19,17 +22,18 @@ jobs:
--health-retries 5
env:
NTFY_TEST_DATABASE_URL: "postgres://ntfy:ntfy@localhost:5432/ntfy_test?sslmode=disable"
NTFY_TEST_S3_URL: ${{ secrets.NTFY_TEST_S3_URL }}
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Install Go
uses: actions/setup-go@v4
with:
go-version: '1.24.x'
go-version: '1.25.x'
- name: Install node
uses: actions/setup-node@v3
with:
node-version: '20'
node-version: '24'
cache: 'npm'
cache-dependency-path: './web/package-lock.json'
- name: Install dependencies

1
.gitignore vendored
View File

@@ -9,6 +9,7 @@ server/site/
tools/fbsend/fbsend
tools/pgimport/pgimport
tools/loadtest/loadtest
tools/s3cli/s3cli
playground/
secrets/
*.iml

View File

@@ -1,8 +1,8 @@
FROM golang:1.24-bullseye as builder
FROM golang:1.25-bookworm AS builder
ARG VERSION=dev
ARG COMMIT=unknown
ARG NODE_MAJOR=18
ARG NODE_MAJOR=24
RUN apt-get update && apt-get install -y \
build-essential ca-certificates curl gnupg \
@@ -21,14 +21,14 @@ ADD Makefile .
# docs
ADD ./requirements.txt .
RUN make docs-deps
RUN --mount=type=cache,target=/root/.cache/pip make docs-deps
ADD ./mkdocs.yml .
ADD ./docs ./docs
RUN make docs-build
# web
ADD ./web/package.json ./web/package-lock.json ./web/
RUN make web-deps
RUN --mount=type=cache,target=/root/.npm make web-deps
ADD ./web ./web
RUN make web-build
@@ -41,7 +41,11 @@ ADD ./server ./server
ADD ./user ./user
ADD ./util ./util
ADD ./payments ./payments
RUN make VERSION=$VERSION COMMIT=$COMMIT cli-linux-server
ADD ./db ./db
ADD ./message ./message
ADD ./model ./model
ADD ./webpush ./webpush
RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build make VERSION=$VERSION COMMIT=$COMMIT cli-linux-server
FROM alpine

23
attachment/backend.go Normal file
View File

@@ -0,0 +1,23 @@
package attachment
import (
"io"
"time"
)
// backendObject represents an object stored in a backend.
type object struct {
ID string
Size int64
LastModified time.Time
}
// backend is a minimal I/O interface for storing and retrieving attachment files.
// It has no knowledge of size tracking, limiting, or ID validation.
type backend interface {
Put(id string, reader io.Reader, untrustedLength int64) error
Get(id string) (io.ReadCloser, int64, error)
List() ([]object, error)
Delete(ids ...string) error
DeleteIncomplete(cutoff time.Time) error
}

View File

@@ -0,0 +1,94 @@
package attachment
import (
"fmt"
"io"
"os"
"path/filepath"
"time"
)
type fileBackend struct {
dir string
}
var _ backend = (*fileBackend)(nil)
func newFileBackend(dir string) (*fileBackend, error) {
if err := os.MkdirAll(dir, 0700); err != nil {
return nil, err
}
return &fileBackend{dir: dir}, nil
}
func (b *fileBackend) Put(id string, reader io.Reader, untrustedLength int64) error {
if untrustedLength > 0 {
reader = io.LimitReader(reader, untrustedLength)
}
file := filepath.Join(b.dir, id)
f, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600)
if err != nil {
return err
}
defer f.Close()
n, err := io.Copy(f, reader)
if err != nil {
os.Remove(file)
return err
} else if untrustedLength > 0 && n != untrustedLength {
os.Remove(file)
return fmt.Errorf("content length mismatch: claimed %d, got %d", untrustedLength, n)
}
if err := f.Close(); err != nil {
os.Remove(file)
return err
}
return nil
}
func (b *fileBackend) List() ([]object, error) {
entries, err := os.ReadDir(b.dir)
if err != nil {
return nil, err
}
objects := make([]object, 0, len(entries))
for _, e := range entries {
info, err := e.Info()
if err != nil {
return nil, err
}
objects = append(objects, object{
ID: e.Name(),
Size: info.Size(),
LastModified: info.ModTime(),
})
}
return objects, nil
}
func (b *fileBackend) Get(id string) (io.ReadCloser, int64, error) {
file := filepath.Join(b.dir, id)
stat, err := os.Stat(file)
if err != nil {
return nil, 0, err
}
f, err := os.Open(file)
if err != nil {
return nil, 0, err
}
return f, stat.Size(), nil
}
func (b *fileBackend) Delete(ids ...string) error {
for _, id := range ids {
file := filepath.Join(b.dir, id)
if err := os.Remove(file); err != nil && !os.IsNotExist(err) {
return err
}
}
return nil
}
func (b *fileBackend) DeleteIncomplete(_ time.Time) error {
return nil
}

51
attachment/backend_s3.go Normal file
View File

@@ -0,0 +1,51 @@
package attachment
import (
"context"
"io"
"time"
"heckel.io/ntfy/v2/s3"
)
type s3Backend struct {
client *s3.Client
}
var _ backend = (*s3Backend)(nil)
func newS3Backend(client *s3.Client) *s3Backend {
return &s3Backend{client: client}
}
func (b *s3Backend) Put(id string, reader io.Reader, untrustedLength int64) error {
return b.client.PutObject(context.Background(), id, reader, untrustedLength)
}
func (b *s3Backend) Get(id string) (io.ReadCloser, int64, error) {
return b.client.GetObject(context.Background(), id)
}
func (b *s3Backend) List() ([]object, error) {
objects, err := b.client.ListObjectsV2(context.Background())
if err != nil {
return nil, err
}
result := make([]object, 0, len(objects))
for _, obj := range objects {
result = append(result, object{
ID: obj.Key,
Size: obj.Size,
LastModified: obj.LastModified,
})
}
return result, nil
}
func (b *s3Backend) Delete(ids ...string) error {
return b.client.DeleteObjects(context.Background(), ids)
}
func (b *s3Backend) DeleteIncomplete(cutoff time.Time) error {
return b.client.AbortIncompleteUploads(context.Background(), cutoff)
}

234
attachment/store.go Normal file
View File

@@ -0,0 +1,234 @@
package attachment
import (
"errors"
"fmt"
"io"
"sync"
"time"
"heckel.io/ntfy/v2/log"
"heckel.io/ntfy/v2/model"
"heckel.io/ntfy/v2/s3"
"heckel.io/ntfy/v2/util"
)
const (
tagStore = "attachment_store"
syncInterval = 15 * time.Minute // How often to run the background sync loop
orphanGracePeriod = time.Hour // Don't delete orphaned objects younger than this to avoid races with in-flight uploads
)
var errInvalidFileID = errors.New("invalid file ID")
// Store manages attachment storage with shared logic for size tracking, limiting,
// ID validation, and background sync to reconcile storage with the database.
type Store struct {
backend backend
limit int64 // Defined limit of the store in bytes
size int64 // Current size of the store in bytes
sizes map[string]int64 // File ID -> size, for subtracting on Remove
attachmentsWithSizes func() (map[string]int64, error) // Returns file ID -> size for active attachments
closeChan chan struct{}
mu sync.RWMutex // Protects size and sizes
}
// NewFileStore creates a new file-system backed attachment cache
func NewFileStore(dir string, totalSizeLimit int64, attachmentsWithSizes func() (map[string]int64, error)) (*Store, error) {
b, err := newFileBackend(dir)
if err != nil {
return nil, err
}
return newStore(b, totalSizeLimit, attachmentsWithSizes)
}
// NewS3Store creates a new S3-backed attachment cache. The s3URL must be in the format:
//
// s3://ACCESS_KEY:SECRET_KEY@BUCKET[/PREFIX]?region=REGION[&endpoint=ENDPOINT]
func NewS3Store(s3URL string, totalSizeLimit int64, attachmentsWithSizes func() (map[string]int64, error)) (*Store, error) {
config, err := s3.ParseURL(s3URL)
if err != nil {
return nil, err
}
return newStore(newS3Backend(s3.New(config)), totalSizeLimit, attachmentsWithSizes)
}
func newStore(backend backend, totalSizeLimit int64, attachmentsWithSizes func() (map[string]int64, error)) (*Store, error) {
c := &Store{
backend: backend,
limit: totalSizeLimit,
sizes: make(map[string]int64),
attachmentsWithSizes: attachmentsWithSizes,
closeChan: make(chan struct{}),
}
// Hydrate sizes from the database immediately so that Size()/Remaining()/Remove()
// are accurate from the start, without waiting for the first sync() call.
if attachmentsWithSizes != nil {
attachments, err := attachmentsWithSizes()
if err != nil {
return nil, fmt.Errorf("attachment store: failed to load existing attachments: %w", err)
}
for id, size := range attachments {
c.sizes[id] = size
c.size += size
}
go c.syncLoop()
}
return c, nil
}
// Write stores an attachment file. The id is validated, and the write is subject to
// the total size limit and any additional limiters. The untrustedLength is a hint
// from the client's Content-Length header; backends may use it to optimize uploads (e.g.
// streaming directly to S3 without buffering).
func (c *Store) Write(id string, reader io.Reader, untrustedLength int64, limiters ...util.Limiter) (int64, error) {
if !model.ValidMessageID(id) {
return 0, errInvalidFileID
}
log.Tag(tagStore).Field("message_id", id).Debug("Writing attachment")
limiters = append(limiters, util.NewFixedLimiter(c.Remaining()))
countingReader := util.NewCountingReader(reader)
limitReader := util.NewLimitReader(countingReader, limiters...)
if err := c.backend.Put(id, limitReader, untrustedLength); err != nil {
c.backend.Delete(id) //nolint:errcheck
return 0, err
}
size := countingReader.Total()
c.mu.Lock()
c.size += size
c.sizes[id] = size
c.mu.Unlock()
return size, nil
}
// Read retrieves an attachment file by ID
func (c *Store) Read(id string) (io.ReadCloser, int64, error) {
if !model.ValidMessageID(id) {
return nil, 0, errInvalidFileID
}
return c.backend.Get(id)
}
// Remove deletes attachment files by ID and subtracts their known sizes from
// the total. Sizes for objects not tracked (e.g. written before this process
// started and before the first sync) are corrected by the next sync() call.
func (c *Store) Remove(ids ...string) error {
for _, id := range ids {
if !model.ValidMessageID(id) {
return errInvalidFileID
}
}
// Remove from backend
for _, id := range ids {
log.Tag(tagStore).Field("message_id", id).Debug("Removing attachment")
}
if err := c.backend.Delete(ids...); err != nil {
return err
}
// Update total cache size
c.mu.Lock()
for _, id := range ids {
if size, ok := c.sizes[id]; ok {
c.size -= size
delete(c.sizes, id)
}
}
if c.size < 0 {
c.size = 0
}
c.mu.Unlock()
return nil
}
// sync reconciles the backend storage with the database. It lists all objects,
// deletes orphans (not in the valid ID set and older than 1 hour), and recomputes
// the total size from the existing attachments in the database.
func (c *Store) sync() error {
if c.attachmentsWithSizes == nil {
return nil
}
attachmentsWithSizes, err := c.attachmentsWithSizes()
if err != nil {
return fmt.Errorf("attachment sync: failed to get existing attachments: %w", err)
}
remoteObjects, err := c.backend.List()
if err != nil {
return fmt.Errorf("attachment sync: failed to list objects: %w", err)
}
// Calculate total cache size and collect orphaned attachments, excluding objects younger
// than the grace period to account for races, and skipping objects with invalid IDs.
cutoff := time.Now().Add(-orphanGracePeriod)
var orphanIDs []string
var count, totalSize int64
sizes := make(map[string]int64, len(remoteObjects))
for _, obj := range remoteObjects {
if !model.ValidMessageID(obj.ID) {
continue
}
if _, ok := attachmentsWithSizes[obj.ID]; !ok && obj.LastModified.Before(cutoff) {
orphanIDs = append(orphanIDs, obj.ID)
} else {
count++
totalSize += attachmentsWithSizes[obj.ID]
sizes[obj.ID] = attachmentsWithSizes[obj.ID]
}
}
log.Tag(tagStore).Debug("Attachment store updated: %d attachment(s), %s", count, util.FormatSizeHuman(totalSize))
c.mu.Lock()
c.size = totalSize
c.sizes = sizes
c.mu.Unlock()
// Delete orphaned attachments
if len(orphanIDs) > 0 {
log.Tag(tagStore).Debug("Deleting %d orphaned attachment(s)", len(orphanIDs))
if err := c.backend.Delete(orphanIDs...); err != nil {
return fmt.Errorf("attachment sync: failed to delete orphaned objects: %w", err)
}
}
// Clean up incomplete uploads (S3 only)
if err := c.backend.DeleteIncomplete(cutoff); err != nil {
log.Tag(tagStore).Err(err).Warn("Failed to abort incomplete uploads from attachment cache")
}
return nil
}
// Size returns the current total size of all attachments
func (c *Store) Size() int64 {
c.mu.RLock()
defer c.mu.RUnlock()
return c.size
}
// Remaining returns the remaining capacity for attachments
func (c *Store) Remaining() int64 {
c.mu.RLock()
defer c.mu.RUnlock()
remaining := c.limit - c.size
if remaining < 0 {
return 0
}
return remaining
}
// Close stops the background sync goroutine
func (c *Store) Close() {
close(c.closeChan)
}
func (c *Store) syncLoop() {
if err := c.sync(); err != nil {
log.Tag(tagStore).Err(err).Warn("Attachment sync failed")
}
ticker := time.NewTicker(syncInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if err := c.sync(); err != nil {
log.Tag(tagStore).Err(err).Warn("Attachment sync failed")
}
case <-c.closeChan:
return
}
}
}

View File

@@ -0,0 +1,16 @@
package attachment
import (
"testing"
"github.com/stretchr/testify/require"
)
func newTestFileStore(t *testing.T, totalSizeLimit int64) (dir string, cache *Store) {
t.Helper()
dir = t.TempDir()
cache, err := NewFileStore(dir, totalSizeLimit, nil)
require.Nil(t, err)
t.Cleanup(func() { cache.Close() })
return dir, cache
}

120
attachment/store_s3_test.go Normal file
View File

@@ -0,0 +1,120 @@
package attachment
import (
"context"
"io"
"os"
"strings"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
"heckel.io/ntfy/v2/s3"
)
func TestS3Store_WriteWithPrefix(t *testing.T) {
s3URL := os.Getenv("NTFY_TEST_S3_URL")
if s3URL == "" {
t.Skip("NTFY_TEST_S3_URL not set")
}
cfg, err := s3.ParseURL(s3URL)
require.Nil(t, err)
cfg.Prefix = "test-prefix"
client := s3.New(cfg)
deleteAllObjects(t, client)
backend := newS3Backend(client)
cache, err := newStore(backend, 10*1024, nil)
require.Nil(t, err)
t.Cleanup(func() {
deleteAllObjects(t, client)
cache.Close()
})
size, err := cache.Write("abcdefghijkl", strings.NewReader("test"), 0)
require.Nil(t, err)
require.Equal(t, int64(4), size)
reader, _, err := cache.Read("abcdefghijkl")
require.Nil(t, err)
data, err := io.ReadAll(reader)
reader.Close()
require.Nil(t, err)
require.Equal(t, "test", string(data))
}
// --- Helpers ---
func newTestRealS3Store(t *testing.T, totalSizeLimit int64) (*Store, *modTimeOverrideBackend) {
t.Helper()
s3URL := os.Getenv("NTFY_TEST_S3_URL")
if s3URL == "" {
t.Skip("NTFY_TEST_S3_URL not set")
}
cfg, err := s3.ParseURL(s3URL)
require.Nil(t, err)
if cfg.Prefix != "" {
cfg.Prefix = cfg.Prefix + "/testpkg-attachment"
} else {
cfg.Prefix = "testpkg-attachment"
}
client := s3.New(cfg)
inner := newS3Backend(client)
wrapper := &modTimeOverrideBackend{backend: inner, modTimes: make(map[string]time.Time)}
deleteAllObjects(t, client)
store, err := newStore(wrapper, totalSizeLimit, nil)
require.Nil(t, err)
t.Cleanup(func() {
deleteAllObjects(t, client)
store.Close()
})
return store, wrapper
}
func deleteAllObjects(t *testing.T, client *s3.Client) {
t.Helper()
for i := 0; i < 20; i++ {
objects, err := client.ListObjectsV2(context.Background())
require.Nil(t, err)
if len(objects) == 0 {
return
}
keys := make([]string, len(objects))
for j, obj := range objects {
keys[j] = obj.Key
}
require.Nil(t, client.DeleteObjects(context.Background(), keys))
time.Sleep(200 * time.Millisecond)
}
t.Fatal("timed out waiting for bucket to be empty")
}
// modTimeOverrideBackend wraps a backend and allows overriding LastModified times returned by List().
// This is used in tests to simulate old objects on backends (like real S3) where
// LastModified cannot be set directly.
type modTimeOverrideBackend struct {
backend
mu sync.Mutex
modTimes map[string]time.Time // object ID -> override time
}
func (b *modTimeOverrideBackend) List() ([]object, error) {
objects, err := b.backend.List()
if err != nil {
return nil, err
}
b.mu.Lock()
defer b.mu.Unlock()
for i, obj := range objects {
if t, ok := b.modTimes[obj.ID]; ok {
objects[i].LastModified = t
}
}
return objects, nil
}
func (b *modTimeOverrideBackend) setModTime(id string, t time.Time) {
b.mu.Lock()
b.modTimes[id] = t
b.mu.Unlock()
}

352
attachment/store_test.go Normal file
View File

@@ -0,0 +1,352 @@
package attachment
import (
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/stretchr/testify/require"
"heckel.io/ntfy/v2/util"
)
const testSizeLimit = 10 * 1024
func TestStore_WriteReadRemove(t *testing.T) {
forEachBackend(t, testSizeLimit, func(t *testing.T, s *Store, _ func(string)) {
// Write
size, err := s.Write("abcdefghijkl", strings.NewReader("hello world"), 0)
require.Nil(t, err)
require.Equal(t, int64(11), size)
require.Equal(t, int64(11), s.Size())
// Read back
reader, readSize, err := s.Read("abcdefghijkl")
require.Nil(t, err)
require.Equal(t, int64(11), readSize)
data, err := io.ReadAll(reader)
reader.Close()
require.Nil(t, err)
require.Equal(t, "hello world", string(data))
// Remove
require.Nil(t, s.Remove("abcdefghijkl"))
require.Equal(t, int64(0), s.Size())
// Read after remove should fail
_, _, err = s.Read("abcdefghijkl")
require.Error(t, err)
})
}
func TestStore_WriteRemoveMultiple(t *testing.T) {
forEachBackend(t, testSizeLimit, func(t *testing.T, s *Store, _ func(string)) {
for i := 0; i < 5; i++ {
_, err := s.Write(fmt.Sprintf("abcdefghijk%d", i), bytes.NewReader(make([]byte, 100)), 0)
require.Nil(t, err)
}
require.Equal(t, int64(500), s.Size())
require.Nil(t, s.Remove("abcdefghijk1", "abcdefghijk3"))
require.Equal(t, int64(300), s.Size())
// Removed files should not be readable
_, _, err := s.Read("abcdefghijk1")
require.Error(t, err)
_, _, err = s.Read("abcdefghijk3")
require.Error(t, err)
// Remaining files should still be readable
for _, id := range []string{"abcdefghijk0", "abcdefghijk2", "abcdefghijk4"} {
reader, _, err := s.Read(id)
require.Nil(t, err)
reader.Close()
}
})
}
func TestStore_WriteTotalSizeLimit(t *testing.T) {
forEachBackend(t, 100, func(t *testing.T, s *Store, _ func(string)) {
// First write fits
_, err := s.Write("abcdefghijk0", bytes.NewReader(make([]byte, 80)), 0)
require.Nil(t, err)
require.Equal(t, int64(80), s.Size())
require.Equal(t, int64(20), s.Remaining())
// Second write exceeds total limit
_, err = s.Write("abcdefghijk1", bytes.NewReader(make([]byte, 50)), 0)
require.ErrorIs(t, err, util.ErrLimitReached)
})
}
func TestStore_WriteAdditionalLimiter(t *testing.T) {
forEachBackend(t, testSizeLimit, func(t *testing.T, s *Store, _ func(string)) {
_, err := s.Write("abcdefghijkl", bytes.NewReader(make([]byte, 200)), 0, util.NewFixedLimiter(100))
require.ErrorIs(t, err, util.ErrLimitReached)
// File should not be readable (was cleaned up)
_, _, err = s.Read("abcdefghijkl")
require.Error(t, err)
})
}
func TestStore_WriteWithLimiter(t *testing.T) {
forEachBackend(t, testSizeLimit, func(t *testing.T, s *Store, _ func(string)) {
size, err := s.Write("abcdefghijkl", strings.NewReader("normal file"), 0, util.NewFixedLimiter(999))
require.Nil(t, err)
require.Equal(t, int64(11), size)
require.Equal(t, int64(11), s.Size())
})
}
func TestStore_WriteOverwriteSameID(t *testing.T) {
forEachBackend(t, testSizeLimit, func(t *testing.T, s *Store, _ func(string)) {
// Write 100 bytes
_, err := s.Write("abcdefghijkl", bytes.NewReader(make([]byte, 100)), 0)
require.Nil(t, err)
require.Equal(t, int64(100), s.Size())
// Overwrite with 50 bytes
_, err = s.Write("abcdefghijkl", bytes.NewReader(make([]byte, 50)), 0)
require.Nil(t, err)
require.Equal(t, int64(150), s.Size()) // Store tracks both writes
// Read back should return the latest content
reader, readSize, err := s.Read("abcdefghijkl")
require.Nil(t, err)
require.Equal(t, int64(50), readSize)
reader.Close()
})
}
func TestStore_WriteAfterFailure(t *testing.T) {
forEachBackend(t, testSizeLimit, func(t *testing.T, s *Store, _ func(string)) {
// Failed write: limiter rejects it
_, err := s.Write("abcdefghijkl", bytes.NewReader(make([]byte, 200)), 0, util.NewFixedLimiter(100))
require.ErrorIs(t, err, util.ErrLimitReached)
require.Equal(t, int64(0), s.Size())
// Subsequent write with a different ID should succeed
size, err := s.Write("abcdefghijk2", strings.NewReader("hello"), 0)
require.Nil(t, err)
require.Equal(t, int64(5), size)
require.Equal(t, int64(5), s.Size())
// The failed ID should not be readable
_, _, err = s.Read("abcdefghijkl")
require.Error(t, err)
// The successful ID should be readable
reader, _, err := s.Read("abcdefghijk2")
require.Nil(t, err)
reader.Close()
})
}
func TestStore_SyncRecomputesSize(t *testing.T) {
forEachBackend(t, testSizeLimit, func(t *testing.T, s *Store, makeOld func(string)) {
// Write two files
_, err := s.Write("abcdefghijk0", bytes.NewReader(make([]byte, 100)), 0)
require.Nil(t, err)
_, err = s.Write("abcdefghijk1", bytes.NewReader(make([]byte, 200)), 0)
require.Nil(t, err)
require.Equal(t, int64(300), s.Size())
// Corrupt the in-memory size tracking
s.mu.Lock()
s.size = 999
s.mu.Unlock()
require.Equal(t, int64(999), s.Size())
// Set attachmentsWithSizes to include both files so nothing gets deleted
s.attachmentsWithSizes = func() (map[string]int64, error) {
return map[string]int64{"abcdefghijk0": 100, "abcdefghijk1": 200}, nil
}
// Sync should recompute size from the backend
require.Nil(t, s.sync())
require.Equal(t, int64(300), s.Size())
})
}
func TestStore_ReadNotFound(t *testing.T) {
forEachBackend(t, testSizeLimit, func(t *testing.T, s *Store, _ func(string)) {
_, _, err := s.Read("abcdefghijkl")
require.Error(t, err)
})
}
func TestStore_InvalidID(t *testing.T) {
forEachBackend(t, testSizeLimit, func(t *testing.T, s *Store, _ func(string)) {
_, err := s.Write("bad", strings.NewReader("x"), 0)
require.Equal(t, errInvalidFileID, err)
_, _, err = s.Read("bad")
require.Equal(t, errInvalidFileID, err)
err = s.Remove("bad")
require.Equal(t, errInvalidFileID, err)
})
}
func TestStore_WriteLargeObjects(t *testing.T) {
sizes := map[string]int64{
"100B": 100,
"6MB": 6 * 1024 * 1024,
"12MB": 12 * 1024 * 1024,
}
for name, sz := range sizes {
t.Run(name, func(t *testing.T) {
forEachBackend(t, sz+1024, func(t *testing.T, s *Store, _ func(string)) {
data := make([]byte, sz)
for i := range data {
data[i] = byte(i % 251)
}
size, err := s.Write("abcdefghijkl", bytes.NewReader(data), 0)
require.Nil(t, err)
require.Equal(t, sz, size)
require.Equal(t, sz, s.Size())
reader, readSize, err := s.Read("abcdefghijkl")
require.Nil(t, err)
require.Equal(t, sz, readSize)
got, err := io.ReadAll(reader)
reader.Close()
require.Nil(t, err)
require.Equal(t, data, got)
})
})
}
}
func TestStore_WriteUntrustedLengthExact(t *testing.T) {
forEachBackend(t, testSizeLimit, func(t *testing.T, s *Store, _ func(string)) {
size, err := s.Write("abcdefghijkl", strings.NewReader("hello world"), 11)
require.Nil(t, err)
require.Equal(t, int64(11), size)
reader, _, err := s.Read("abcdefghijkl")
require.Nil(t, err)
data, err := io.ReadAll(reader)
reader.Close()
require.Nil(t, err)
require.Equal(t, "hello world", string(data))
})
}
func TestStore_WriteUntrustedLengthBodyLonger(t *testing.T) {
forEachBackend(t, testSizeLimit, func(t *testing.T, s *Store, _ func(string)) {
// Body has 11 bytes, but we claim 5 — only first 5 bytes should be stored
size, err := s.Write("abcdefghijkl", strings.NewReader("hello world"), 5)
require.Nil(t, err)
require.Equal(t, int64(5), size)
reader, _, err := s.Read("abcdefghijkl")
require.Nil(t, err)
data, err := io.ReadAll(reader)
reader.Close()
require.Nil(t, err)
require.Equal(t, "hello", string(data))
})
}
func TestStore_WriteUntrustedLengthBodyShorter(t *testing.T) {
forEachBackend(t, testSizeLimit, func(t *testing.T, s *Store, _ func(string)) {
// Body has 5 bytes, but we claim 100 — should fail
_, err := s.Write("abcdefghijkl", strings.NewReader("hello"), 100)
require.Error(t, err)
// File should not be readable (was cleaned up)
_, _, err = s.Read("abcdefghijkl")
require.Error(t, err)
})
}
func TestStore_Sync(t *testing.T) {
forEachBackend(t, testSizeLimit, func(t *testing.T, s *Store, makeOld func(string)) {
// Write some files
_, err := s.Write("abcdefghijk0", strings.NewReader("file0"), 0)
require.Nil(t, err)
_, err = s.Write("abcdefghijk1", strings.NewReader("file1"), 0)
require.Nil(t, err)
_, err = s.Write("abcdefghijk2", strings.NewReader("file2"), 0)
require.Nil(t, err)
require.Equal(t, int64(15), s.Size())
// Set the ID provider to only know about file 0 and 2
s.attachmentsWithSizes = func() (map[string]int64, error) {
return map[string]int64{"abcdefghijk0": 5, "abcdefghijk2": 5}, nil
}
// Make file 1 old enough to be cleaned up
makeOld("abcdefghijk1")
// Run sync
require.Nil(t, s.sync())
// File 1 should be deleted (orphan, old enough)
_, _, err = s.Read("abcdefghijk1")
require.Error(t, err)
// Files 0 and 2 should still be readable
r, _, err := s.Read("abcdefghijk0")
require.Nil(t, err)
r.Close()
r, _, err = s.Read("abcdefghijk2")
require.Nil(t, err)
r.Close()
// Size should be updated
require.Equal(t, int64(10), s.Size())
})
}
func TestStore_Sync_SkipsRecentFiles(t *testing.T) {
forEachBackend(t, testSizeLimit, func(t *testing.T, s *Store, _ func(string)) {
// Write a file
_, err := s.Write("abcdefghijk0", strings.NewReader("file0"), 0)
require.Nil(t, err)
// Set the ID provider to return empty (no valid IDs)
s.attachmentsWithSizes = func() (map[string]int64, error) {
return map[string]int64{}, nil
}
// File was just created, so it should NOT be deleted (< 1 hour old)
require.Nil(t, s.sync())
// File should still exist
reader, _, err := s.Read("abcdefghijk0")
require.Nil(t, err)
reader.Close()
})
}
// forEachBackend runs f against both the file and S3 backends. It also provides a makeOld
// callback that makes a specific object's timestamp old enough for orphan cleanup (> 1 hour).
// For the file backend, this uses os.Chtimes; for the S3 backend, it overrides the object's
// LastModified time via a modTimeOverrideBackend wrapper. Objects start with recent timestamps
// by default. The S3 subtest is skipped if NTFY_TEST_S3_URL is not set.
func forEachBackend(t *testing.T, totalSizeLimit int64, f func(t *testing.T, s *Store, makeOld func(string))) {
t.Run("file", func(t *testing.T) {
dir, s := newTestFileStore(t, totalSizeLimit)
makeOld := func(id string) {
oldTime := time.Unix(1, 0)
os.Chtimes(filepath.Join(dir, id), oldTime, oldTime)
}
f(t, s, makeOld)
})
t.Run("s3", func(t *testing.T) {
s, wrapper := newTestRealS3Store(t, totalSizeLimit)
makeOld := func(id string) {
wrapper.setModTime(id, time.Unix(1, 0))
}
f(t, s, makeOld)
})
}

View File

@@ -52,7 +52,7 @@ var flagsServe = append(
altsrc.NewStringSliceFlag(&cli.StringSliceFlag{Name: "auth-users", Aliases: []string{"auth_users"}, EnvVars: []string{"NTFY_AUTH_USERS"}, Usage: "pre-provisioned declarative users"}),
altsrc.NewStringSliceFlag(&cli.StringSliceFlag{Name: "auth-access", Aliases: []string{"auth_access"}, EnvVars: []string{"NTFY_AUTH_ACCESS"}, Usage: "pre-provisioned declarative access control entries"}),
altsrc.NewStringSliceFlag(&cli.StringSliceFlag{Name: "auth-tokens", Aliases: []string{"auth_tokens"}, EnvVars: []string{"NTFY_AUTH_TOKENS"}, Usage: "pre-provisioned declarative access tokens"}),
altsrc.NewStringFlag(&cli.StringFlag{Name: "attachment-cache-dir", Aliases: []string{"attachment_cache_dir"}, EnvVars: []string{"NTFY_ATTACHMENT_CACHE_DIR"}, Usage: "cache directory for attached files"}),
altsrc.NewStringFlag(&cli.StringFlag{Name: "attachment-cache-dir", Aliases: []string{"attachment_cache_dir"}, EnvVars: []string{"NTFY_ATTACHMENT_CACHE_DIR"}, Usage: "cache directory for attached files, or S3 URL (s3://ACCESS_KEY:SECRET_KEY@BUCKET[/PREFIX]?region=REGION[&endpoint=ENDPOINT])"}),
altsrc.NewStringFlag(&cli.StringFlag{Name: "attachment-total-size-limit", Aliases: []string{"attachment_total_size_limit", "A"}, EnvVars: []string{"NTFY_ATTACHMENT_TOTAL_SIZE_LIMIT"}, Value: util.FormatSize(server.DefaultAttachmentTotalSizeLimit), Usage: "limit of the on-disk attachment cache"}),
altsrc.NewStringFlag(&cli.StringFlag{Name: "attachment-file-size-limit", Aliases: []string{"attachment_file_size_limit", "Y"}, EnvVars: []string{"NTFY_ATTACHMENT_FILE_SIZE_LIMIT"}, Value: util.FormatSize(server.DefaultAttachmentFileSizeLimit), Usage: "per-file attachment size limit (e.g. 300k, 2M, 100M)"}),
altsrc.NewStringFlag(&cli.StringFlag{Name: "attachment-expiry-duration", Aliases: []string{"attachment_expiry_duration", "X"}, EnvVars: []string{"NTFY_ATTACHMENT_EXPIRY_DURATION"}, Value: util.FormatDuration(server.DefaultAttachmentExpiryDuration), Usage: "duration after which uploaded attachments will be deleted (e.g. 3h, 20h)"}),

View File

@@ -11,6 +11,12 @@ type Beginner interface {
Begin() (*sql.Tx, error)
}
// Querier is an interface for types that can execute SQL queries.
// *sql.DB, *sql.Tx, and *DB all implement this.
type Querier interface {
Query(query string, args ...any) (*sql.Rows, error)
}
// Host pairs a *sql.DB with the host:port it was opened against.
type Host struct {
Addr string // "host:port"

View File

@@ -489,30 +489,41 @@ Subscribers can retrieve cached messaging using the [`poll=1` parameter](subscri
## Attachments
If desired, you may allow users to upload and [attach files to notifications](publish.md#attachments). To enable
this feature, you have to simply configure an attachment cache directory and a base URL (`attachment-cache-dir`, `base-url`).
Once these options are set and the directory is writable by the server user, you can upload attachments via PUT.
this feature, you have to configure an attachment storage backend and a base URL (`base-url`). Attachments can be stored
either on the [local filesystem](#filesystem-storage) or in an [S3-compatible object store](#s3-storage), both using the `attachment-cache-dir` option.
Once configured, you can upload attachments via PUT.
By default, attachments are stored in the disk-cache **for only 3 hours**. The main reason for this is to avoid legal issues
and such when hosting user controlled content. Typically, this is more than enough time for the user (or the auto download
feature) to download the file. The following config options are relevant to attachments:
By default, attachments are stored **for only 3 hours**. The main reason for this is to avoid legal issues
and such when hosting user controlled content. Typically, this is more than enough time for the user (or the auto download
feature) to download the file. You can increase this time by [purchasing ntfy Pro](https://ntfy.sh/app) via the web app.
The following config options are relevant to attachments:
* `base-url` is the root URL for the ntfy server; this is needed for the generated attachment URLs
* `attachment-cache-dir` is the cache directory for attached files
* `attachment-total-size-limit` is the size limit of the on-disk attachment cache (default: 5G)
* `attachment-cache-dir` is the cache directory for attached files, or an S3 URL for object storage
* `attachment-total-size-limit` is the size limit of the attachment storage (default: 5G)
* `attachment-file-size-limit` is the per-file attachment size limit (e.g. 300k, 2M, 100M, default: 15M)
* `attachment-expiry-duration` is the duration after which uploaded attachments will be deleted (e.g. 3h, 20h, default: 3h)
Here's an example config using mostly the defaults (except for the cache directory, which is empty by default):
!!! warning
ntfy takes full control over the attachment directory or S3 bucket. Files that match the message ID format without
entries in the message table will be deleted. **Do not use a directory or S3 bucket that is also used for something else.**
Please also refer to the [rate limiting](#rate-limiting) settings below, specifically `visitor-attachment-total-size-limit`
and `visitor-attachment-daily-bandwidth-limit`. Setting these conservatively is necessary to avoid abuse.
### Filesystem storage
Here's an example config using the local filesystem for attachment storage:
=== "/etc/ntfy/server.yml (minimal)"
``` yaml
base-url: "https://ntfy.sh"
base-url: "https://ntfy.example.com"
attachment-cache-dir: "/var/cache/ntfy/attachments"
```
=== "/etc/ntfy/server.yml (all options)"
``` yaml
base-url: "https://ntfy.sh"
base-url: "https://ntfy.example.com"
attachment-cache-dir: "/var/cache/ntfy/attachments"
attachment-total-size-limit: "5G"
attachment-file-size-limit: "15M"
@@ -521,8 +532,70 @@ Here's an example config using mostly the defaults (except for the cache directo
visitor-attachment-daily-bandwidth-limit: "500M"
```
Please also refer to the [rate limiting](#rate-limiting) settings below, specifically `visitor-attachment-total-size-limit`
and `visitor-attachment-daily-bandwidth-limit`. Setting these conservatively is necessary to avoid abuse.
### S3 storage
As an alternative to the local filesystem, you can store attachments in an S3-compatible object store (e.g. [AWS S3](https://aws.amazon.com/s3/),
[DigitalOcean Spaces](https://www.digitalocean.com/products/spaces)). This is useful for HA/cloud deployments where you don't want to rely on local disk storage.
To use an S3-compatible storage for attachments, set `attachment-cache-dir` to an S3 URL with the following format:
```
s3://ACCESS_KEY:SECRET_KEY@BUCKET[/PREFIX]?region=REGION[&endpoint=ENDPOINT]
```
Here are a few examples:
=== "/etc/ntfy/server.yml (DigitalOcean Spaces)"
``` yaml
base-url: "https://ntfy.example.com"
attachment-cache-dir: "s3://ACCESS_KEY:SECRET_KEY@my-bucket/attachments?region=nyc3&endpoint=https://nyc3.digitaloceanspaces.com"
```
=== "/etc/ntfy/server.yml (AWS S3)"
``` yaml
base-url: "https://ntfy.example.com"
attachment-cache-dir: "s3://ACCESS_KEY:SECRET_KEY@my-bucket/attachments?region=us-east-1"
```
=== "/etc/ntfy/server.yml (custom endpoint)"
``` yaml
base-url: "https://ntfy.example.com"
attachment-cache-dir: "s3://ACCESS_KEY:SECRET_KEY@my-bucket/attachments?region=us-east-1&endpoint=https://s3.example.com"
```
Note that the access key and secret key may have to be URL encoded. For instance, a secret key `YmxhY+mxhYmxhC` (note the `+`) should
be encoded as `YmxhY%2BmxhYmxhC` (note the `%2B`), so the URL would be `s3://ACCESS_KEY:YmxhY%2BmxhYmxhC@my-bucket/attachments...`.
!!! info
ntfy.sh is hosted and sponsored by DigitalOcean. I can highly recommend their public cloud offering. It's been rock solid
for 4 years. They offer an S3-compatible storage for $5/month and 250 GB of storage, with 1 TiB of bandwidth.
Also, if you **use [this referral link](https://m.do.co/c/442b929528db), you can get $200 credit**.
For AWS S3, the IAM user needs the following permissions on the bucket:
``` json
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:ListBucket",
"s3:ListBucketMultipartUploads"
],
"Resource": "arn:aws:s3:::BUCKET_NAME"
},
{
"Effect": "Allow",
"Action": [
"s3:GetObject",
"s3:PutObject",
"s3:DeleteObject",
"s3:AbortMultipartUpload"
],
"Resource": "arn:aws:s3:::BUCKET_NAME/*"
}
]
}
```
## Access control
By default, the ntfy server is open for everyone, meaning **everyone can read and write to any topic** (this is how
@@ -2094,80 +2167,80 @@ variable before running the `ntfy` command (e.g. `export NTFY_LISTEN_HTTP=:80`).
`cache_duration` and `cache-duration` are both supported. This is to support stricter YAML parsers that do
not support dashes.
| Config option | Env variable | Format | Default | Description |
|--------------------------------------------|-------------------------------------------------|-----------------------------------------------------|-------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `base-url` | `NTFY_BASE_URL` | *URL* | - | Public facing base URL of the service (e.g. `https://ntfy.sh`) |
| `listen-http` | `NTFY_LISTEN_HTTP` | `[host]:port` | `:80` | Listen address for the HTTP web server |
| `listen-https` | `NTFY_LISTEN_HTTPS` | `[host]:port` | - | Listen address for the HTTPS web server. If set, you also need to set `key-file` and `cert-file`. |
| `listen-unix` | `NTFY_LISTEN_UNIX` | *filename* | - | Path to a Unix socket to listen on |
| `listen-unix-mode` | `NTFY_LISTEN_UNIX_MODE` | *file mode* | *system default* | File mode of the Unix socket, e.g. 0700 or 0777 |
| `key-file` | `NTFY_KEY_FILE` | *filename* | - | HTTPS/TLS private key file, only used if `listen-https` is set. |
| `cert-file` | `NTFY_CERT_FILE` | *filename* | - | HTTPS/TLS certificate file, only used if `listen-https` is set. |
| `firebase-key-file` | `NTFY_FIREBASE_KEY_FILE` | *filename* | - | If set, also publish messages to a Firebase Cloud Messaging (FCM) topic for your app. This is optional and only required to save battery when using the Android app. See [Firebase (FCM)](#firebase-fcm). |
| Config option | Env variable | Format | Default | Description |
|--------------------------------------------|-------------------------------------------------|-----------------------------------------------------|-------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `base-url` | `NTFY_BASE_URL` | *URL* | - | Public facing base URL of the service (e.g. `https://ntfy.sh`) |
| `listen-http` | `NTFY_LISTEN_HTTP` | `[host]:port` | `:80` | Listen address for the HTTP web server |
| `listen-https` | `NTFY_LISTEN_HTTPS` | `[host]:port` | - | Listen address for the HTTPS web server. If set, you also need to set `key-file` and `cert-file`. |
| `listen-unix` | `NTFY_LISTEN_UNIX` | *filename* | - | Path to a Unix socket to listen on |
| `listen-unix-mode` | `NTFY_LISTEN_UNIX_MODE` | *file mode* | *system default* | File mode of the Unix socket, e.g. 0700 or 0777 |
| `key-file` | `NTFY_KEY_FILE` | *filename* | - | HTTPS/TLS private key file, only used if `listen-https` is set. |
| `cert-file` | `NTFY_CERT_FILE` | *filename* | - | HTTPS/TLS certificate file, only used if `listen-https` is set. |
| `firebase-key-file` | `NTFY_FIREBASE_KEY_FILE` | *filename* | - | If set, also publish messages to a Firebase Cloud Messaging (FCM) topic for your app. This is optional and only required to save battery when using the Android app. See [Firebase (FCM)](#firebase-fcm). |
| `database-url` | `NTFY_DATABASE_URL` | *string (connection URL)* | - | PostgreSQL connection string (e.g. `postgres://user:pass@host:5432/ntfy`). If set, uses PostgreSQL for all database-backed stores (message cache, user manager, web push) instead of SQLite. See [database options](#database-options). |
| `database-replica-urls` | `NTFY_DATABASE_REPLICA_URLS` | *list of strings (connection URLs)* | - | PostgreSQL read replica connection strings. Non-critical read-only queries are distributed across replicas (round-robin) with automatic fallback to primary. Requires `database-url`. See [read replicas](#read-replicas). |
| `cache-file` | `NTFY_CACHE_FILE` | *filename* | - | If set, messages are cached in a local SQLite database instead of only in-memory. This allows for service restarts without losing messages in support of the since= parameter. See [message cache](#message-cache). |
| `cache-duration` | `NTFY_CACHE_DURATION` | *duration* | 12h | Duration for which messages will be buffered before they are deleted. This is required to support the `since=...` and `poll=1` parameter. Set this to `0` to disable the cache entirely. |
| `cache-startup-queries` | `NTFY_CACHE_STARTUP_QUERIES` | *string (SQL queries)* | - | SQL queries to run during database startup; this is useful for tuning and [enabling WAL mode](#message-cache) |
| `cache-batch-size` | `NTFY_CACHE_BATCH_SIZE` | *int* | 0 | Max size of messages to batch together when writing to message cache (if zero, writes are synchronous) |
| `cache-batch-timeout` | `NTFY_CACHE_BATCH_TIMEOUT` | *duration* | 0s | Timeout for batched async writes to the message cache (if zero, writes are synchronous) |
| `auth-file` | `NTFY_AUTH_FILE` | *filename* | - | Auth database file used for access control (SQLite). If set, enables authentication and access control. Not required if `database-url` is set. See [access control](#access-control). |
| `auth-default-access` | `NTFY_AUTH_DEFAULT_ACCESS` | `read-write`, `read-only`, `write-only`, `deny-all` | `read-write` | Default permissions if no matching entries in the auth database are found. Default is `read-write`. |
| `behind-proxy` | `NTFY_BEHIND_PROXY` | *bool* | false | If set, use forwarded header (e.g. X-Forwarded-For, X-Client-IP) to determine visitor IP address (for rate limiting) |
| `proxy-forwarded-header` | `NTFY_PROXY_FORWARDED_HEADER` | *string* | `X-Forwarded-For` | Use specified header to determine visitor IP address (for rate limiting) |
| `proxy-trusted-hosts` | `NTFY_PROXY_TRUSTED_HOSTS` | *comma-separated host/IP/CIDR list* | - | Comma-separated list of trusted IP addresses, hosts, or CIDRs to remove from forwarded header |
| `attachment-cache-dir` | `NTFY_ATTACHMENT_CACHE_DIR` | *directory* | - | Cache directory for attached files. To enable attachments, this has to be set. |
| `attachment-total-size-limit` | `NTFY_ATTACHMENT_TOTAL_SIZE_LIMIT` | *size* | 5G | Limit of the on-disk attachment cache directory. If the limits is exceeded, new attachments will be rejected. |
| `attachment-file-size-limit` | `NTFY_ATTACHMENT_FILE_SIZE_LIMIT` | *size* | 15M | Per-file attachment size limit (e.g. 300k, 2M, 100M). Larger attachment will be rejected. |
| `attachment-expiry-duration` | `NTFY_ATTACHMENT_EXPIRY_DURATION` | *duration* | 3h | Duration after which uploaded attachments will be deleted (e.g. 3h, 20h). Strongly affects `visitor-attachment-total-size-limit`. |
| `smtp-sender-addr` | `NTFY_SMTP_SENDER_ADDR` | `host:port` | - | SMTP server address to allow email sending |
| `smtp-sender-user` | `NTFY_SMTP_SENDER_USER` | *string* | - | SMTP user; only used if e-mail sending is enabled |
| `smtp-sender-pass` | `NTFY_SMTP_SENDER_PASS` | *string* | - | SMTP password; only used if e-mail sending is enabled |
| `smtp-sender-from` | `NTFY_SMTP_SENDER_FROM` | *e-mail address* | - | SMTP sender e-mail address; only used if e-mail sending is enabled |
| `smtp-server-listen` | `NTFY_SMTP_SERVER_LISTEN` | `[ip]:port` | - | Defines the IP address and port the SMTP server will listen on, e.g. `:25` or `1.2.3.4:25` |
| `smtp-server-domain` | `NTFY_SMTP_SERVER_DOMAIN` | *domain name* | - | SMTP server e-mail domain, e.g. `ntfy.sh` |
| `smtp-server-addr-prefix` | `NTFY_SMTP_SERVER_ADDR_PREFIX` | *string* | - | Optional prefix for the e-mail addresses to prevent spam, e.g. `ntfy-` |
| `twilio-account` | `NTFY_TWILIO_ACCOUNT` | *string* | - | Twilio account SID, e.g. AC12345beefbeef67890beefbeef122586 |
| `twilio-auth-token` | `NTFY_TWILIO_AUTH_TOKEN` | *string* | - | Twilio auth token, e.g. affebeef258625862586258625862586 |
| `twilio-phone-number` | `NTFY_TWILIO_PHONE_NUMBER` | *string* | - | Twilio outgoing phone number, e.g. +18775132586 |
| `twilio-verify-service` | `NTFY_TWILIO_VERIFY_SERVICE` | *string* | - | Twilio Verify service SID, e.g. VA12345beefbeef67890beefbeef122586 |
| `keepalive-interval` | `NTFY_KEEPALIVE_INTERVAL` | *duration* | 45s | Interval in which keepalive messages are sent to the client. This is to prevent intermediaries closing the connection for inactivity. Note that the Android app has a hardcoded timeout at 77s, so it should be less than that. |
| `manager-interval` | `NTFY_MANAGER_INTERVAL` | *duration* | 1m | Interval in which the manager prunes old messages, deletes topics and prints the stats. |
| `message-size-limit` | `NTFY_MESSAGE_SIZE_LIMIT` | *size* | 4K | The size limit for the message body. Please note that this is largely untested, and that FCM/APNS have limits around 4KB. If you increase this size limit, FCM and APNS will NOT work for large messages. |
| `message-delay-limit` | `NTFY_MESSAGE_DELAY_LIMIT` | *duration* | 3d | Amount of time a message can be [scheduled](publish.md#scheduled-delivery) into the future when using the `Delay` header |
| `global-topic-limit` | `NTFY_GLOBAL_TOPIC_LIMIT` | *number* | 15,000 | Rate limiting: Total number of topics before the server rejects new topics. |
| `upstream-base-url` | `NTFY_UPSTREAM_BASE_URL` | *URL* | `https://ntfy.sh` | Forward poll request to an upstream server, this is needed for iOS push notifications for self-hosted servers |
| `upstream-access-token` | `NTFY_UPSTREAM_ACCESS_TOKEN` | *string* | `tk_zyYLYj...` | Access token to use for the upstream server; needed only if upstream rate limits are exceeded or upstream server requires auth |
| `visitor-attachment-total-size-limit` | `NTFY_VISITOR_ATTACHMENT_TOTAL_SIZE_LIMIT` | *size* | 100M | Rate limiting: Total storage limit used for attachments per visitor, for all attachments combined. Storage is freed after attachments expire. See `attachment-expiry-duration`. |
| `visitor-attachment-daily-bandwidth-limit` | `NTFY_VISITOR_ATTACHMENT_DAILY_BANDWIDTH_LIMIT` | *size* | 500M | Rate limiting: Total daily attachment download/upload traffic limit per visitor. This is to protect your bandwidth costs from exploding. |
| `visitor-email-limit-burst` | `NTFY_VISITOR_EMAIL_LIMIT_BURST` | *number* | 16 | Rate limiting:Initial limit of e-mails per visitor |
| `visitor-email-limit-replenish` | `NTFY_VISITOR_EMAIL_LIMIT_REPLENISH` | *duration* | 1h | Rate limiting: Strongly related to `visitor-email-limit-burst`: The rate at which the bucket is refilled |
| `visitor-message-daily-limit` | `NTFY_VISITOR_MESSAGE_DAILY_LIMIT` | *number* | - | Rate limiting: Allowed number of messages per day per visitor, reset every day at midnight (UTC). By default, this value is unset. |
| `visitor-request-limit-burst` | `NTFY_VISITOR_REQUEST_LIMIT_BURST` | *number* | 60 | Rate limiting: Allowed GET/PUT/POST requests per second, per visitor. This setting is the initial bucket of requests each visitor has |
| `visitor-request-limit-replenish` | `NTFY_VISITOR_REQUEST_LIMIT_REPLENISH` | *duration* | 5s | Rate limiting: Strongly related to `visitor-request-limit-burst`: The rate at which the bucket is refilled |
| `visitor-request-limit-exempt-hosts` | `NTFY_VISITOR_REQUEST_LIMIT_EXEMPT_HOSTS` | *comma-separated host/IP/CIDR list* | - | Rate limiting: List of hostnames and IPs to be exempt from request rate limiting |
| `visitor-subscription-limit` | `NTFY_VISITOR_SUBSCRIPTION_LIMIT` | *number* | 30 | Rate limiting: Number of subscriptions per visitor (IP address) |
| `visitor-subscriber-rate-limiting` | `NTFY_VISITOR_SUBSCRIBER_RATE_LIMITING` | *bool* | `false` | Rate limiting: Enables subscriber-based rate limiting |
| `visitor-prefix-bits-ipv4` | `NTFY_VISITOR_PREFIX_BITS_IPV4` | *number* | 32 | Rate limiting: Number of bits to use for IPv4 visitor prefix, e.g. 24 for /24 |
| `visitor-prefix-bits-ipv6` | `NTFY_VISITOR_PREFIX_BITS_IPV6` | *number* | 64 | Rate limiting: Number of bits to use for IPv6 visitor prefix, e.g. 48 for /48 |
| `web-root` | `NTFY_WEB_ROOT` | *path*, e.g. `/` or `/app`, or `disable` | `/` | Sets root of the web app (e.g. /, or /app), or disables it entirely (disable) |
| `enable-signup` | `NTFY_ENABLE_SIGNUP` | *boolean* (`true` or `false`) | `false` | Allows users to sign up via the web app, or API |
| `enable-login` | `NTFY_ENABLE_LOGIN` | *boolean* (`true` or `false`) | `false` | Allows users to log in via the web app, or API |
| `enable-reservations` | `NTFY_ENABLE_RESERVATIONS` | *boolean* (`true` or `false`) | `false` | Allows users to reserve topics (if their tier allows it) |
| `require-login` | `NTFY_REQUIRE_LOGIN` | *boolean* (`true` or `false`) | `false` | All actions via the web app require a login |
| `stripe-secret-key` | `NTFY_STRIPE_SECRET_KEY` | *string* | - | Payments: Key used for the Stripe API communication, this enables payments |
| `stripe-webhook-key` | `NTFY_STRIPE_WEBHOOK_KEY` | *string* | - | Payments: Key required to validate the authenticity of incoming webhooks from Stripe |
| `billing-contact` | `NTFY_BILLING_CONTACT` | *email address* or *website* | - | Payments: Email or website displayed in Upgrade dialog as a billing contact |
| `web-push-public-key` | `NTFY_WEB_PUSH_PUBLIC_KEY` | *string* | - | Web Push: Public Key. Run `ntfy webpush keys` to generate |
| `web-push-private-key` | `NTFY_WEB_PUSH_PRIVATE_KEY` | *string* | - | Web Push: Private Key. Run `ntfy webpush keys` to generate |
| `web-push-file` | `NTFY_WEB_PUSH_FILE` | *string* | - | Web Push: Database file that stores subscriptions |
| `web-push-email-address` | `NTFY_WEB_PUSH_EMAIL_ADDRESS` | *string* | - | Web Push: Sender email address |
| `web-push-startup-queries` | `NTFY_WEB_PUSH_STARTUP_QUERIES` | *string* | - | Web Push: SQL queries to run against subscription database at startup |
| `web-push-expiry-duration` | `NTFY_WEB_PUSH_EXPIRY_DURATION` | *duration* | 60d | Web Push: Duration after which a subscription is considered stale and will be deleted. This is to prevent stale subscriptions. |
| `web-push-expiry-warning-duration` | `NTFY_WEB_PUSH_EXPIRY_WARNING_DURATION` | *duration* | 55d | Web Push: Duration after which a warning is sent to subscribers that their subscription will expire soon. This is to prevent stale subscriptions. |
| `log-format` | `NTFY_LOG_FORMAT` | *string* | `text` | Defines the output format, can be text or json |
| `log-file` | `NTFY_LOG_FILE` | *string* | - | Defines the filename to write logs to. If this is not set, ntfy logs to stderr |
| `log-level` | `NTFY_LOG_LEVEL` | *string* | `info` | Defines the default log level, can be one of trace, debug, info, warn or error |
| `database-replica-urls` | `NTFY_DATABASE_REPLICA_URLS` | *list of strings (connection URLs)* | - | PostgreSQL read replica connection strings. Non-critical read-only queries are distributed across replicas (round-robin) with automatic fallback to primary. Requires `database-url`. |
| `cache-file` | `NTFY_CACHE_FILE` | *filename* | - | If set, messages are cached in a local SQLite database instead of only in-memory. This allows for service restarts without losing messages in support of the since= parameter. See [message cache](#message-cache). |
| `cache-duration` | `NTFY_CACHE_DURATION` | *duration* | 12h | Duration for which messages will be buffered before they are deleted. This is required to support the `since=...` and `poll=1` parameter. Set this to `0` to disable the cache entirely. |
| `cache-startup-queries` | `NTFY_CACHE_STARTUP_QUERIES` | *string (SQL queries)* | - | SQL queries to run during database startup; this is useful for tuning and [enabling WAL mode](#message-cache) |
| `cache-batch-size` | `NTFY_CACHE_BATCH_SIZE` | *int* | 0 | Max size of messages to batch together when writing to message cache (if zero, writes are synchronous) |
| `cache-batch-timeout` | `NTFY_CACHE_BATCH_TIMEOUT` | *duration* | 0s | Timeout for batched async writes to the message cache (if zero, writes are synchronous) |
| `auth-file` | `NTFY_AUTH_FILE` | *filename* | - | Auth database file used for access control (SQLite). If set, enables authentication and access control. Not required if `database-url` is set. See [access control](#access-control). |
| `auth-default-access` | `NTFY_AUTH_DEFAULT_ACCESS` | `read-write`, `read-only`, `write-only`, `deny-all` | `read-write` | Default permissions if no matching entries in the auth database are found. Default is `read-write`. |
| `behind-proxy` | `NTFY_BEHIND_PROXY` | *bool* | false | If set, use forwarded header (e.g. X-Forwarded-For, X-Client-IP) to determine visitor IP address (for rate limiting) |
| `proxy-forwarded-header` | `NTFY_PROXY_FORWARDED_HEADER` | *string* | `X-Forwarded-For` | Use specified header to determine visitor IP address (for rate limiting) |
| `proxy-trusted-hosts` | `NTFY_PROXY_TRUSTED_HOSTS` | *comma-separated host/IP/CIDR list* | - | Comma-separated list of trusted IP addresses, hosts, or CIDRs to remove from forwarded header |
| `attachment-cache-dir` | `NTFY_ATTACHMENT_CACHE_DIR` | *directory or S3 URL* | - | Cache directory for attached files, or S3 URL for object storage (format: `s3://KEY:SECRET@BUCKET[/PREFIX]?region=REGION[&endpoint=ENDPOINT]`). |
| `attachment-total-size-limit` | `NTFY_ATTACHMENT_TOTAL_SIZE_LIMIT` | *size* | 5G | Limit of the on-disk attachment cache directory. If the limits is exceeded, new attachments will be rejected. |
| `attachment-file-size-limit` | `NTFY_ATTACHMENT_FILE_SIZE_LIMIT` | *size* | 15M | Per-file attachment size limit (e.g. 300k, 2M, 100M). Larger attachment will be rejected. |
| `attachment-expiry-duration` | `NTFY_ATTACHMENT_EXPIRY_DURATION` | *duration* | 3h | Duration after which uploaded attachments will be deleted (e.g. 3h, 20h). Strongly affects `visitor-attachment-total-size-limit`. |
| `smtp-sender-addr` | `NTFY_SMTP_SENDER_ADDR` | `host:port` | - | SMTP server address to allow email sending |
| `smtp-sender-user` | `NTFY_SMTP_SENDER_USER` | *string* | - | SMTP user; only used if e-mail sending is enabled |
| `smtp-sender-pass` | `NTFY_SMTP_SENDER_PASS` | *string* | - | SMTP password; only used if e-mail sending is enabled |
| `smtp-sender-from` | `NTFY_SMTP_SENDER_FROM` | *e-mail address* | - | SMTP sender e-mail address; only used if e-mail sending is enabled |
| `smtp-server-listen` | `NTFY_SMTP_SERVER_LISTEN` | `[ip]:port` | - | Defines the IP address and port the SMTP server will listen on, e.g. `:25` or `1.2.3.4:25` |
| `smtp-server-domain` | `NTFY_SMTP_SERVER_DOMAIN` | *domain name* | - | SMTP server e-mail domain, e.g. `ntfy.sh` |
| `smtp-server-addr-prefix` | `NTFY_SMTP_SERVER_ADDR_PREFIX` | *string* | - | Optional prefix for the e-mail addresses to prevent spam, e.g. `ntfy-` |
| `twilio-account` | `NTFY_TWILIO_ACCOUNT` | *string* | - | Twilio account SID, e.g. AC12345beefbeef67890beefbeef122586 |
| `twilio-auth-token` | `NTFY_TWILIO_AUTH_TOKEN` | *string* | - | Twilio auth token, e.g. affebeef258625862586258625862586 |
| `twilio-phone-number` | `NTFY_TWILIO_PHONE_NUMBER` | *string* | - | Twilio outgoing phone number, e.g. +18775132586 |
| `twilio-verify-service` | `NTFY_TWILIO_VERIFY_SERVICE` | *string* | - | Twilio Verify service SID, e.g. VA12345beefbeef67890beefbeef122586 |
| `keepalive-interval` | `NTFY_KEEPALIVE_INTERVAL` | *duration* | 45s | Interval in which keepalive messages are sent to the client. This is to prevent intermediaries closing the connection for inactivity. Note that the Android app has a hardcoded timeout at 77s, so it should be less than that. |
| `manager-interval` | `NTFY_MANAGER_INTERVAL` | *duration* | 1m | Interval in which the manager prunes old messages, deletes topics and prints the stats. |
| `message-size-limit` | `NTFY_MESSAGE_SIZE_LIMIT` | *size* | 4K | The size limit for the message body. Please note that this is largely untested, and that FCM/APNS have limits around 4KB. If you increase this size limit, FCM and APNS will NOT work for large messages. |
| `message-delay-limit` | `NTFY_MESSAGE_DELAY_LIMIT` | *duration* | 3d | Amount of time a message can be [scheduled](publish.md#scheduled-delivery) into the future when using the `Delay` header |
| `global-topic-limit` | `NTFY_GLOBAL_TOPIC_LIMIT` | *number* | 15,000 | Rate limiting: Total number of topics before the server rejects new topics. |
| `upstream-base-url` | `NTFY_UPSTREAM_BASE_URL` | *URL* | `https://ntfy.sh` | Forward poll request to an upstream server, this is needed for iOS push notifications for self-hosted servers |
| `upstream-access-token` | `NTFY_UPSTREAM_ACCESS_TOKEN` | *string* | `tk_zyYLYj...` | Access token to use for the upstream server; needed only if upstream rate limits are exceeded or upstream server requires auth |
| `visitor-attachment-total-size-limit` | `NTFY_VISITOR_ATTACHMENT_TOTAL_SIZE_LIMIT` | *size* | 100M | Rate limiting: Total storage limit used for attachments per visitor, for all attachments combined. Storage is freed after attachments expire. See `attachment-expiry-duration`. |
| `visitor-attachment-daily-bandwidth-limit` | `NTFY_VISITOR_ATTACHMENT_DAILY_BANDWIDTH_LIMIT` | *size* | 500M | Rate limiting: Total daily attachment download/upload traffic limit per visitor. This is to protect your bandwidth costs from exploding. |
| `visitor-email-limit-burst` | `NTFY_VISITOR_EMAIL_LIMIT_BURST` | *number* | 16 | Rate limiting:Initial limit of e-mails per visitor |
| `visitor-email-limit-replenish` | `NTFY_VISITOR_EMAIL_LIMIT_REPLENISH` | *duration* | 1h | Rate limiting: Strongly related to `visitor-email-limit-burst`: The rate at which the bucket is refilled |
| `visitor-message-daily-limit` | `NTFY_VISITOR_MESSAGE_DAILY_LIMIT` | *number* | - | Rate limiting: Allowed number of messages per day per visitor, reset every day at midnight (UTC). By default, this value is unset. |
| `visitor-request-limit-burst` | `NTFY_VISITOR_REQUEST_LIMIT_BURST` | *number* | 60 | Rate limiting: Allowed GET/PUT/POST requests per second, per visitor. This setting is the initial bucket of requests each visitor has |
| `visitor-request-limit-replenish` | `NTFY_VISITOR_REQUEST_LIMIT_REPLENISH` | *duration* | 5s | Rate limiting: Strongly related to `visitor-request-limit-burst`: The rate at which the bucket is refilled |
| `visitor-request-limit-exempt-hosts` | `NTFY_VISITOR_REQUEST_LIMIT_EXEMPT_HOSTS` | *comma-separated host/IP/CIDR list* | - | Rate limiting: List of hostnames and IPs to be exempt from request rate limiting |
| `visitor-subscription-limit` | `NTFY_VISITOR_SUBSCRIPTION_LIMIT` | *number* | 30 | Rate limiting: Number of subscriptions per visitor (IP address) |
| `visitor-subscriber-rate-limiting` | `NTFY_VISITOR_SUBSCRIBER_RATE_LIMITING` | *bool* | `false` | Rate limiting: Enables subscriber-based rate limiting |
| `visitor-prefix-bits-ipv4` | `NTFY_VISITOR_PREFIX_BITS_IPV4` | *number* | 32 | Rate limiting: Number of bits to use for IPv4 visitor prefix, e.g. 24 for /24 |
| `visitor-prefix-bits-ipv6` | `NTFY_VISITOR_PREFIX_BITS_IPV6` | *number* | 64 | Rate limiting: Number of bits to use for IPv6 visitor prefix, e.g. 48 for /48 |
| `web-root` | `NTFY_WEB_ROOT` | *path*, e.g. `/` or `/app`, or `disable` | `/` | Sets root of the web app (e.g. /, or /app), or disables it entirely (disable) |
| `enable-signup` | `NTFY_ENABLE_SIGNUP` | *boolean* (`true` or `false`) | `false` | Allows users to sign up via the web app, or API |
| `enable-login` | `NTFY_ENABLE_LOGIN` | *boolean* (`true` or `false`) | `false` | Allows users to log in via the web app, or API |
| `enable-reservations` | `NTFY_ENABLE_RESERVATIONS` | *boolean* (`true` or `false`) | `false` | Allows users to reserve topics (if their tier allows it) |
| `require-login` | `NTFY_REQUIRE_LOGIN` | *boolean* (`true` or `false`) | `false` | All actions via the web app require a login |
| `stripe-secret-key` | `NTFY_STRIPE_SECRET_KEY` | *string* | - | Payments: Key used for the Stripe API communication, this enables payments |
| `stripe-webhook-key` | `NTFY_STRIPE_WEBHOOK_KEY` | *string* | - | Payments: Key required to validate the authenticity of incoming webhooks from Stripe |
| `billing-contact` | `NTFY_BILLING_CONTACT` | *email address* or *website* | - | Payments: Email or website displayed in Upgrade dialog as a billing contact |
| `web-push-public-key` | `NTFY_WEB_PUSH_PUBLIC_KEY` | *string* | - | Web Push: Public Key. Run `ntfy webpush keys` to generate |
| `web-push-private-key` | `NTFY_WEB_PUSH_PRIVATE_KEY` | *string* | - | Web Push: Private Key. Run `ntfy webpush keys` to generate |
| `web-push-file` | `NTFY_WEB_PUSH_FILE` | *string* | - | Web Push: Database file that stores subscriptions |
| `web-push-email-address` | `NTFY_WEB_PUSH_EMAIL_ADDRESS` | *string* | - | Web Push: Sender email address |
| `web-push-startup-queries` | `NTFY_WEB_PUSH_STARTUP_QUERIES` | *string* | - | Web Push: SQL queries to run against subscription database at startup |
| `web-push-expiry-duration` | `NTFY_WEB_PUSH_EXPIRY_DURATION` | *duration* | 60d | Web Push: Duration after which a subscription is considered stale and will be deleted. This is to prevent stale subscriptions. |
| `web-push-expiry-warning-duration` | `NTFY_WEB_PUSH_EXPIRY_WARNING_DURATION` | *duration* | 55d | Web Push: Duration after which a warning is sent to subscribers that their subscription will expire soon. This is to prevent stale subscriptions. |
| `log-format` | `NTFY_LOG_FORMAT` | *string* | `text` | Defines the output format, can be text or json |
| `log-file` | `NTFY_LOG_FILE` | *string* | - | Defines the filename to write logs to. If this is not set, ntfy logs to stderr |
| `log-level` | `NTFY_LOG_LEVEL` | *string* | `info` | Defines the default log level, can be one of trace, debug, info, warn or error |
The format for a *duration* is: `<number>(smhd)`, e.g. 30s, 20m, 1h or 3d.
The format for a *size* is: `<number>(GMK)`, e.g. 1G, 200M or 4000k.
@@ -2218,7 +2291,7 @@ OPTIONS:
--auth-file value, --auth_file value, -H value auth database file used for access control [$NTFY_AUTH_FILE]
--auth-startup-queries value, --auth_startup_queries value queries run when the auth database is initialized [$NTFY_AUTH_STARTUP_QUERIES]
--auth-default-access value, --auth_default_access value, -p value default permissions if no matching entries in the auth database are found (default: "read-write") [$NTFY_AUTH_DEFAULT_ACCESS]
--attachment-cache-dir value, --attachment_cache_dir value cache directory for attached files [$NTFY_ATTACHMENT_CACHE_DIR]
--attachment-cache-dir value, --attachment_cache_dir value cache directory for attached files, or S3 URL (s3://ACCESS_KEY:SECRET_KEY@BUCKET[/PREFIX]?region=REGION[&endpoint=ENDPOINT]) [$NTFY_ATTACHMENT_CACHE_DIR]
--attachment-total-size-limit value, --attachment_total_size_limit value, -A value limit of the on-disk attachment cache (default: "5G") [$NTFY_ATTACHMENT_TOTAL_SIZE_LIMIT]
--attachment-file-size-limit value, --attachment_file_size_limit value, -Y value per-file attachment size limit (e.g. 300k, 2M, 100M) (default: "15M") [$NTFY_ATTACHMENT_FILE_SIZE_LIMIT]
--attachment-expiry-duration value, --attachment_expiry_duration value, -X value duration after which uploaded attachments will be deleted (e.g. 3h, 20h) (default: "3h") [$NTFY_ATTACHMENT_EXPIRY_DURATION]

View File

@@ -71,7 +71,8 @@ The web app is a static website without a backend (other than the ntfy API). All
cache and local storage. That means it does not need to be protected with a login screen, and it poses no additional
security risk. So technically, it does not need to be disabled.
However, if you still want to disable it, you can do so with the `web-root: disable` option in the `server.yml` file.
However, if you still want, you can require login with the `require-login: true` option,
or disable it with the `web-root: disable` option in the `server.yml` file.
Think of the ntfy web app like an Android/iOS app. It is freely available and accessible to anyone, yet useless without
a proper backend. So as long as you secure your backend with ACLs, exposing the ntfy web app to the Internet is harmless.

View File

@@ -30,37 +30,37 @@ deb/rpm packages.
=== "x86_64/amd64"
```bash
wget https://github.com/binwiederhier/ntfy/releases/download/v2.19.1/ntfy_2.19.1_linux_amd64.tar.gz
tar zxvf ntfy_2.19.1_linux_amd64.tar.gz
sudo cp -a ntfy_2.19.1_linux_amd64/ntfy /usr/local/bin/ntfy
sudo mkdir /etc/ntfy && sudo cp ntfy_2.19.1_linux_amd64/{client,server}/*.yml /etc/ntfy
wget https://github.com/binwiederhier/ntfy/releases/download/v2.19.2/ntfy_2.19.2_linux_amd64.tar.gz
tar zxvf ntfy_2.19.2_linux_amd64.tar.gz
sudo cp -a ntfy_2.19.2_linux_amd64/ntfy /usr/local/bin/ntfy
sudo mkdir /etc/ntfy && sudo cp ntfy_2.19.2_linux_amd64/{client,server}/*.yml /etc/ntfy
sudo ntfy serve
```
=== "armv6"
```bash
wget https://github.com/binwiederhier/ntfy/releases/download/v2.19.1/ntfy_2.19.1_linux_armv6.tar.gz
tar zxvf ntfy_2.19.1_linux_armv6.tar.gz
sudo cp -a ntfy_2.19.1_linux_armv6/ntfy /usr/bin/ntfy
sudo mkdir /etc/ntfy && sudo cp ntfy_2.19.1_linux_armv6/{client,server}/*.yml /etc/ntfy
wget https://github.com/binwiederhier/ntfy/releases/download/v2.19.2/ntfy_2.19.2_linux_armv6.tar.gz
tar zxvf ntfy_2.19.2_linux_armv6.tar.gz
sudo cp -a ntfy_2.19.2_linux_armv6/ntfy /usr/bin/ntfy
sudo mkdir /etc/ntfy && sudo cp ntfy_2.19.2_linux_armv6/{client,server}/*.yml /etc/ntfy
sudo ntfy serve
```
=== "armv7/armhf"
```bash
wget https://github.com/binwiederhier/ntfy/releases/download/v2.19.1/ntfy_2.19.1_linux_armv7.tar.gz
tar zxvf ntfy_2.19.1_linux_armv7.tar.gz
sudo cp -a ntfy_2.19.1_linux_armv7/ntfy /usr/bin/ntfy
sudo mkdir /etc/ntfy && sudo cp ntfy_2.19.1_linux_armv7/{client,server}/*.yml /etc/ntfy
wget https://github.com/binwiederhier/ntfy/releases/download/v2.19.2/ntfy_2.19.2_linux_armv7.tar.gz
tar zxvf ntfy_2.19.2_linux_armv7.tar.gz
sudo cp -a ntfy_2.19.2_linux_armv7/ntfy /usr/bin/ntfy
sudo mkdir /etc/ntfy && sudo cp ntfy_2.19.2_linux_armv7/{client,server}/*.yml /etc/ntfy
sudo ntfy serve
```
=== "arm64"
```bash
wget https://github.com/binwiederhier/ntfy/releases/download/v2.19.1/ntfy_2.19.1_linux_arm64.tar.gz
tar zxvf ntfy_2.19.1_linux_arm64.tar.gz
sudo cp -a ntfy_2.19.1_linux_arm64/ntfy /usr/bin/ntfy
sudo mkdir /etc/ntfy && sudo cp ntfy_2.19.1_linux_arm64/{client,server}/*.yml /etc/ntfy
wget https://github.com/binwiederhier/ntfy/releases/download/v2.19.2/ntfy_2.19.2_linux_arm64.tar.gz
tar zxvf ntfy_2.19.2_linux_arm64.tar.gz
sudo cp -a ntfy_2.19.2_linux_arm64/ntfy /usr/bin/ntfy
sudo mkdir /etc/ntfy && sudo cp ntfy_2.19.2_linux_arm64/{client,server}/*.yml /etc/ntfy
sudo ntfy serve
```
@@ -116,7 +116,7 @@ Manually installing the .deb file:
=== "x86_64/amd64"
```bash
wget https://github.com/binwiederhier/ntfy/releases/download/v2.19.1/ntfy_2.19.1_linux_amd64.deb
wget https://github.com/binwiederhier/ntfy/releases/download/v2.19.2/ntfy_2.19.2_linux_amd64.deb
sudo dpkg -i ntfy_*.deb
sudo systemctl enable ntfy
sudo systemctl start ntfy
@@ -124,7 +124,7 @@ Manually installing the .deb file:
=== "armv6"
```bash
wget https://github.com/binwiederhier/ntfy/releases/download/v2.19.1/ntfy_2.19.1_linux_armv6.deb
wget https://github.com/binwiederhier/ntfy/releases/download/v2.19.2/ntfy_2.19.2_linux_armv6.deb
sudo dpkg -i ntfy_*.deb
sudo systemctl enable ntfy
sudo systemctl start ntfy
@@ -132,7 +132,7 @@ Manually installing the .deb file:
=== "armv7/armhf"
```bash
wget https://github.com/binwiederhier/ntfy/releases/download/v2.19.1/ntfy_2.19.1_linux_armv7.deb
wget https://github.com/binwiederhier/ntfy/releases/download/v2.19.2/ntfy_2.19.2_linux_armv7.deb
sudo dpkg -i ntfy_*.deb
sudo systemctl enable ntfy
sudo systemctl start ntfy
@@ -140,7 +140,7 @@ Manually installing the .deb file:
=== "arm64"
```bash
wget https://github.com/binwiederhier/ntfy/releases/download/v2.19.1/ntfy_2.19.1_linux_arm64.deb
wget https://github.com/binwiederhier/ntfy/releases/download/v2.19.2/ntfy_2.19.2_linux_arm64.deb
sudo dpkg -i ntfy_*.deb
sudo systemctl enable ntfy
sudo systemctl start ntfy
@@ -150,28 +150,28 @@ Manually installing the .deb file:
=== "x86_64/amd64"
```bash
sudo rpm -ivh https://github.com/binwiederhier/ntfy/releases/download/v2.19.1/ntfy_2.19.1_linux_amd64.rpm
sudo rpm -ivh https://github.com/binwiederhier/ntfy/releases/download/v2.19.2/ntfy_2.19.2_linux_amd64.rpm
sudo systemctl enable ntfy
sudo systemctl start ntfy
```
=== "armv6"
```bash
sudo rpm -ivh https://github.com/binwiederhier/ntfy/releases/download/v2.19.1/ntfy_2.19.1_linux_armv6.rpm
sudo rpm -ivh https://github.com/binwiederhier/ntfy/releases/download/v2.19.2/ntfy_2.19.2_linux_armv6.rpm
sudo systemctl enable ntfy
sudo systemctl start ntfy
```
=== "armv7/armhf"
```bash
sudo rpm -ivh https://github.com/binwiederhier/ntfy/releases/download/v2.19.1/ntfy_2.19.1_linux_armv7.rpm
sudo rpm -ivh https://github.com/binwiederhier/ntfy/releases/download/v2.19.2/ntfy_2.19.2_linux_armv7.rpm
sudo systemctl enable ntfy
sudo systemctl start ntfy
```
=== "arm64"
```bash
sudo rpm -ivh https://github.com/binwiederhier/ntfy/releases/download/v2.19.1/ntfy_2.19.1_linux_arm64.rpm
sudo rpm -ivh https://github.com/binwiederhier/ntfy/releases/download/v2.19.2/ntfy_2.19.2_linux_arm64.rpm
sudo systemctl enable ntfy
sudo systemctl start ntfy
```
@@ -213,18 +213,18 @@ pkg install go-ntfy
## macOS
The [ntfy CLI](subscribe/cli.md) (`ntfy publish` and `ntfy subscribe` only) is supported on macOS as well.
To install, please [download the tarball](https://github.com/binwiederhier/ntfy/releases/download/v2.19.1/ntfy_2.19.1_darwin_all.tar.gz),
To install, please [download the tarball](https://github.com/binwiederhier/ntfy/releases/download/v2.19.2/ntfy_2.19.2_darwin_all.tar.gz),
extract it and place it somewhere in your `PATH` (e.g. `/usr/local/bin/ntfy`).
If run as `root`, ntfy will look for its config at `/etc/ntfy/client.yml`. For all other users, it'll look for it at
`~/Library/Application Support/ntfy/client.yml` (sample included in the tarball).
```bash
curl -L https://github.com/binwiederhier/ntfy/releases/download/v2.19.1/ntfy_2.19.1_darwin_all.tar.gz > ntfy_2.19.1_darwin_all.tar.gz
tar zxvf ntfy_2.19.1_darwin_all.tar.gz
sudo cp -a ntfy_2.19.1_darwin_all/ntfy /usr/local/bin/ntfy
curl -L https://github.com/binwiederhier/ntfy/releases/download/v2.19.2/ntfy_2.19.2_darwin_all.tar.gz > ntfy_2.19.2_darwin_all.tar.gz
tar zxvf ntfy_2.19.2_darwin_all.tar.gz
sudo cp -a ntfy_2.19.2_darwin_all/ntfy /usr/local/bin/ntfy
mkdir ~/Library/Application\ Support/ntfy
cp ntfy_2.19.1_darwin_all/client/client.yml ~/Library/Application\ Support/ntfy/client.yml
cp ntfy_2.19.2_darwin_all/client/client.yml ~/Library/Application\ Support/ntfy/client.yml
ntfy --help
```
@@ -245,7 +245,7 @@ brew install ntfy
The ntfy server and CLI are fully supported on Windows. You can run the ntfy server directly or as a Windows service.
To install, you can either
* [Download the latest ZIP](https://github.com/binwiederhier/ntfy/releases/download/v2.19.1/ntfy_2.19.1_windows_amd64.zip),
* [Download the latest ZIP](https://github.com/binwiederhier/ntfy/releases/download/v2.19.2/ntfy_2.19.2_windows_amd64.zip),
extract it and place the `ntfy.exe` binary somewhere in your `%Path%`.
* Or install ntfy from the [Scoop](https://scoop.sh) main repository via `scoop install ntfy`

View File

@@ -6,12 +6,23 @@ and the [ntfy Android app](https://github.com/binwiederhier/ntfy-android/release
| Component | Version | Release date |
|------------------|---------|--------------|
| ntfy server | v2.19.1 | Mar 15, 2026 |
| ntfy server | v2.19.2 | Mar 16, 2026 |
| ntfy Android app | v1.24.0 | Mar 5, 2026 |
| ntfy iOS app | v1.3 | Nov 26, 2023 |
Please check out the release notes for [upcoming releases](#not-released-yet) below.
## ntfy server v2.19.2
Released March 16, 2026
This is another small bugfix release for PostgreSQL, avoiding races between primary and read replica, as well as to
further reduce primary load.
**Bug fixes + maintenance:**
* Fix race condition in web push subscription causing FK constraint violation when concurrent requests hit the same endpoint
* Route authorization query to read-only database replica to reduce primary database load
## ntfy server v2.19.1
Released March 15, 2026
@@ -1787,4 +1798,25 @@ and the [ntfy Android app](https://github.com/binwiederhier/ntfy-android/release
## Not released yet
Nothing to see here.
### ntfy server v2.20.x (UNRELEASED)
This release is another step towards making it possible to help scale ntfy up and out 🔥! With this release, you can store
attachments in an S3-compatible object store as an alterative to the directory. See [attachment store](config.md#attachments)
for details.
!!! warning
With this release, ntfy will take full control over the attachment directory or S3 bucket. Files/objects in the configured `attachment-cache-dir`
that match the message ID format (12 chars, matching `^[A-Za-z0-9]{12}$`), and have no entries in the message database will be deleted.
**Do not use a directory or S3 bucket as `attachment-cache-dir` that is also used for something else.**
This is a small behavioral change that was necessary because the old logic often left attachments behind and would not clean them
up. Unless you have re-used the attachment directory for anything else (which is hopefully never done), this should not affect
you at all.
**Features:**
* Add S3-compatible object storage as an alternative [attachment store](config.md#attachments) via `attachment-cache-dir` config option
**Bug fixes + maintenance:**
* Reject invalid e-mail addresses (e.g. multiple comma-separated recipients) with HTTP 400

18
go.mod
View File

@@ -4,13 +4,13 @@ go 1.25.0
require (
cloud.google.com/go/firestore v1.21.0 // indirect
cloud.google.com/go/storage v1.61.1 // indirect
cloud.google.com/go/storage v1.61.3 // indirect
github.com/BurntSushi/toml v1.6.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect
github.com/emersion/go-smtp v0.18.0
github.com/gabriel-vasile/mimetype v1.4.13
github.com/gorilla/websocket v1.5.3
github.com/mattn/go-sqlite3 v1.14.34
github.com/mattn/go-sqlite3 v1.14.37
github.com/olebedev/when v1.1.0
github.com/stretchr/testify v1.11.1
github.com/urfave/cli/v2 v2.27.7
@@ -19,7 +19,7 @@ require (
golang.org/x/sync v0.20.0
golang.org/x/term v0.41.0
golang.org/x/time v0.15.0
google.golang.org/api v0.271.0
google.golang.org/api v0.272.0
gopkg.in/yaml.v2 v2.4.0
)
@@ -30,7 +30,7 @@ require github.com/pkg/errors v0.9.1 // indirect
require (
firebase.google.com/go/v4 v4.19.0
github.com/SherClockHolmes/webpush-go v1.4.0
github.com/jackc/pgx/v5 v5.8.0
github.com/jackc/pgx/v5 v5.9.0
github.com/microcosm-cc/bluemonday v1.0.27
github.com/prometheus/client_golang v1.23.2
github.com/stripe/stripe-go/v74 v74.30.0
@@ -70,7 +70,7 @@ require (
github.com/google/s2a-go v0.1.9 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.14 // indirect
github.com/googleapis/gax-go/v2 v2.18.0 // indirect
github.com/googleapis/gax-go/v2 v2.19.0 // indirect
github.com/gorilla/css v1.0.1 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
@@ -97,10 +97,10 @@ require (
go.yaml.in/yaml/v2 v2.4.4 // indirect
golang.org/x/net v0.52.0 // indirect
google.golang.org/appengine/v2 v2.0.6 // indirect
google.golang.org/genproto v0.0.0-20260311181403-84a4fc48630c // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20260311181403-84a4fc48630c // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20260311181403-84a4fc48630c // indirect
google.golang.org/grpc v1.79.2 // indirect
google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20260319201613-d00831a3d3e7 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20260319201613-d00831a3d3e7 // indirect
google.golang.org/grpc v1.79.3 // indirect
google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

36
go.sum
View File

@@ -18,8 +18,8 @@ cloud.google.com/go/longrunning v0.8.0 h1:LiKK77J3bx5gDLi4SMViHixjD2ohlkwBi+mKA7
cloud.google.com/go/longrunning v0.8.0/go.mod h1:UmErU2Onzi+fKDg2gR7dusz11Pe26aknR4kHmJJqIfk=
cloud.google.com/go/monitoring v1.24.3 h1:dde+gMNc0UhPZD1Azu6at2e79bfdztVDS5lvhOdsgaE=
cloud.google.com/go/monitoring v1.24.3/go.mod h1:nYP6W0tm3N9H/bOw8am7t62YTzZY+zUeQ+Bi6+2eonI=
cloud.google.com/go/storage v1.61.1 h1:VELCSvZKiSw0AS1k3so5mKGy3CB7bTCYD8EHhTF42bY=
cloud.google.com/go/storage v1.61.1/go.mod h1:k30/hwYfd0M8aULYbPkQLgNf+SFcdjlRHvLMXggw18E=
cloud.google.com/go/storage v1.61.3 h1:VS//ZfBuPGDvakfD9xyPW1RGF1Vy3BWUoVZXgW1KMOg=
cloud.google.com/go/storage v1.61.3/go.mod h1:JtqK8BBB7TWv0HVGHubtUdzYYrakOQIsMLffZ2Z/HWk=
cloud.google.com/go/trace v1.11.7 h1:kDNDX8JkaAG3R2nq1lIdkb7FCSi1rCmsEtKVsty7p+U=
cloud.google.com/go/trace v1.11.7/go.mod h1:TNn9d5V3fQVf6s4SCveVMIBS2LJUqo73GACmq/Tky0s=
firebase.google.com/go/v4 v4.19.0 h1:f5NMlC2YHFsncz00c2+ecBr+ZYlRMhKIhj1z8Iz0lD8=
@@ -98,8 +98,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.3.14 h1:yh8ncqsbUY4shRD5dA6RlzjJaT4hi3kII+zYw8wmLb8=
github.com/googleapis/enterprise-certificate-proxy v0.3.14/go.mod h1:vqVt9yG9480NtzREnTlmGSBmFrA+bzb0yl0TxoBQXOg=
github.com/googleapis/gax-go/v2 v2.18.0 h1:jxP5Uuo3bxm3M6gGtV94P4lliVetoCB4Wk2x8QA86LI=
github.com/googleapis/gax-go/v2 v2.18.0/go.mod h1:uSzZN4a356eRG985CzJ3WfbFSpqkLTjsnhWGJR6EwrE=
github.com/googleapis/gax-go/v2 v2.19.0 h1:fYQaUOiGwll0cGj7jmHT/0nPlcrZDFPrZRhTsoCr8hE=
github.com/googleapis/gax-go/v2 v2.19.0/go.mod h1:w2ROXVdfGEVFXzmlciUU4EdjHgWvB5h2n6x/8XSTTJA=
github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8=
github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
@@ -108,8 +108,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.8.0 h1:TYPDoleBBme0xGSAX3/+NujXXtpZn9HBONkQC7IEZSo=
github.com/jackc/pgx/v5 v5.8.0/go.mod h1:QVeDInX2m9VyzvNeiCJVjCkNFqzsNb43204HshNSZKw=
github.com/jackc/pgx/v5 v5.9.0 h1:T/dI+2TvmI2H8s/KH1/lXIbz1CUFk3gn5oTjr0/mBsE=
github.com/jackc/pgx/v5 v5.9.0/go.mod h1:mal1tBGAFfLHvZzaYh77YS/eC6IX9OWbRV1QIIM0Jn4=
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
@@ -120,8 +120,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/mattn/go-sqlite3 v1.14.34 h1:3NtcvcUnFBPsuRcno8pUtupspG/GM+9nZ88zgJcp6Zk=
github.com/mattn/go-sqlite3 v1.14.34/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/mattn/go-sqlite3 v1.14.37 h1:3DOZp4cXis1cUIpCfXLtmlGolNLp2VEqhiB/PARNBIg=
github.com/mattn/go-sqlite3 v1.14.37/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwXFM08ygZfk=
github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
@@ -272,18 +272,18 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
google.golang.org/api v0.271.0 h1:cIPN4qcUc61jlh7oXu6pwOQqbJW2GqYh5PS6rB2C/JY=
google.golang.org/api v0.271.0/go.mod h1:CGT29bhwkbF+i11qkRUJb2KMKqcJ1hdFceEIRd9u64Q=
google.golang.org/api v0.272.0 h1:eLUQZGnAS3OHn31URRf9sAmRk3w2JjMx37d2k8AjJmA=
google.golang.org/api v0.272.0/go.mod h1:wKjowi5LNJc5qarNvDCvNQBn3rVK8nSy6jg2SwRwzIA=
google.golang.org/appengine/v2 v2.0.6 h1:LvPZLGuchSBslPBp+LAhihBeGSiRh1myRoYK4NtuBIw=
google.golang.org/appengine/v2 v2.0.6/go.mod h1:WoEXGoXNfa0mLvaH5sV3ZSGXwVmy8yf7Z1JKf3J3wLI=
google.golang.org/genproto v0.0.0-20260311181403-84a4fc48630c h1:ZhFDeBMmFc/4g8/GwxnJ4rzB3O4GwQVNr+8Mh7Y5z4g=
google.golang.org/genproto v0.0.0-20260311181403-84a4fc48630c/go.mod h1:hf4r/rBuzaTkLUWRO03771Xvcs6P5hwdQK3UUEJjqo0=
google.golang.org/genproto/googleapis/api v0.0.0-20260311181403-84a4fc48630c h1:OyQPd6I3pN/9gDxz6L13kYGJgqkpdrAohJRBeXyxlgI=
google.golang.org/genproto/googleapis/api v0.0.0-20260311181403-84a4fc48630c/go.mod h1:X2gu9Qwng7Nn009s/r3RUxqkzQNqOrAy79bluY7ojIg=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260311181403-84a4fc48630c h1:xgCzyF2LFIO/0X2UAoVRiXKU5Xg6VjToG4i2/ecSswk=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260311181403-84a4fc48630c/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8=
google.golang.org/grpc v1.79.2 h1:fRMD94s2tITpyJGtBBn7MkMseNpOZU8ZxgC3MMBaXRU=
google.golang.org/grpc v1.79.2/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ=
google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7 h1:XzmzkmB14QhVhgnawEVsOn6OFsnpyxNPRY9QV01dNB0=
google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7/go.mod h1:L43LFes82YgSonw6iTXTxXUX1OlULt4AQtkik4ULL/I=
google.golang.org/genproto/googleapis/api v0.0.0-20260319201613-d00831a3d3e7 h1:41r6JMbpzBMen0R/4TZeeAmGXSJC7DftGINUodzTkPI=
google.golang.org/genproto/googleapis/api v0.0.0-20260319201613-d00831a3d3e7/go.mod h1:EIQZ5bFCfRQDV4MhRle7+OgjNtZ6P1PiZBgAKuxXu/Y=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260319201613-d00831a3d3e7 h1:ndE4FoJqsIceKP2oYSnUZqhTdYufCYYkqwtFzfrhI7w=
google.golang.org/genproto/googleapis/rpc v0.0.0-20260319201613-d00831a3d3e7/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8=
google.golang.org/grpc v1.79.3 h1:sybAEdRIEtvcD68Gx7dmnwjZKlyfuc61Dyo9pGXXkKE=
google.golang.org/grpc v1.79.3/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=

View File

@@ -43,6 +43,7 @@ type queries struct {
selectAttachmentsExpired string
selectAttachmentsSizeBySender string
selectAttachmentsSizeByUserID string
selectAttachmentsWithSizes string
selectStats string
updateStats string
updateMessageTime string
@@ -245,25 +246,14 @@ func (c *Cache) MessagesDue() ([]*model.Message, error) {
return readMessages(rows)
}
// MessagesExpired returns a list of IDs for messages that have expired (should be deleted)
// MessagesExpired returns a list of message IDs that have expired and should be deleted
func (c *Cache) MessagesExpired() ([]string, error) {
rows, err := c.db.Query(c.queries.selectMessagesExpired, time.Now().Unix())
if err != nil {
return nil, err
}
defer rows.Close()
ids := make([]string, 0)
for rows.Next() {
var id string
if err := rows.Scan(&id); err != nil {
return nil, err
}
ids = append(ids, id)
}
if err := rows.Err(); err != nil {
return nil, err
}
return ids, nil
return readStrings(rows)
}
// Message returns the message with the given ID, or ErrMessageNotFound if not found
@@ -272,10 +262,10 @@ func (c *Cache) Message(id string) (*model.Message, error) {
if err != nil {
return nil, err
}
defer rows.Close()
if !rows.Next() {
return nil, model.ErrMessageNotFound
}
defer rows.Close()
return readMessage(rows)
}
@@ -319,18 +309,7 @@ func (c *Cache) Topics() ([]string, error) {
return nil, err
}
defer rows.Close()
topics := make([]string, 0)
for rows.Next() {
var id string
if err := rows.Scan(&id); err != nil {
return nil, err
}
topics = append(topics, id)
}
if err := rows.Err(); err != nil {
return nil, err
}
return topics, nil
return readStrings(rows)
}
// DeleteMessages deletes the messages with the given IDs
@@ -358,15 +337,8 @@ func (c *Cache) DeleteScheduledBySequenceID(topic, sequenceID string) ([]string,
return nil, err
}
defer rows.Close()
ids := make([]string, 0)
for rows.Next() {
var id string
if err := rows.Scan(&id); err != nil {
return nil, err
}
ids = append(ids, id)
}
if err := rows.Err(); err != nil {
ids, err := readStrings(rows)
if err != nil {
return nil, err
}
rows.Close() // Close rows before executing delete in same transaction
@@ -398,18 +370,7 @@ func (c *Cache) AttachmentsExpired() ([]string, error) {
return nil, err
}
defer rows.Close()
ids := make([]string, 0)
for rows.Next() {
var id string
if err := rows.Scan(&id); err != nil {
return nil, err
}
ids = append(ids, id)
}
if err := rows.Err(); err != nil {
return nil, err
}
return ids, nil
return readStrings(rows)
}
// MarkAttachmentsDeleted marks the attachments for the given message IDs as deleted
@@ -444,6 +405,30 @@ func (c *Cache) AttachmentBytesUsedByUser(userID string) (int64, error) {
return c.readAttachmentBytesUsed(rows)
}
// AttachmentsWithSizes returns a map of message ID to attachment size for all active
// (non-expired, non-deleted) attachments. This is used to hydrate the attachment store's
// size tracking on startup and during periodic sync.
func (c *Cache) AttachmentsWithSizes() (map[string]int64, error) {
rows, err := c.db.ReadOnly().Query(c.queries.selectAttachmentsWithSizes, time.Now().Unix())
if err != nil {
return nil, err
}
defer rows.Close()
attachments := make(map[string]int64)
for rows.Next() {
var id string
var size int64
if err := rows.Scan(&id, &size); err != nil {
return nil, err
}
attachments[id] = size
}
if err := rows.Err(); err != nil {
return nil, err
}
return attachments, nil
}
func (c *Cache) readAttachmentBytesUsed(rows *sql.Rows) (int64, error) {
defer rows.Close()
var size int64
@@ -590,3 +575,18 @@ func readMessage(rows *sql.Rows) (*model.Message, error) {
Encoding: encoding,
}, nil
}
func readStrings(rows *sql.Rows) ([]string, error) {
strs := make([]string, 0)
for rows.Next() {
var s string
if err := rows.Scan(&s); err != nil {
return nil, err
}
strs = append(strs, s)
}
if err := rows.Err(); err != nil {
return nil, err
}
return strs, nil
}

View File

@@ -70,6 +70,7 @@ const (
postgresSelectAttachmentsExpiredQuery = `SELECT mid FROM message WHERE attachment_expires > 0 AND attachment_expires <= $1 AND attachment_deleted = FALSE`
postgresSelectAttachmentsSizeBySenderQuery = `SELECT COALESCE(SUM(attachment_size), 0) FROM message WHERE user_id = '' AND sender = $1 AND attachment_expires >= $2`
postgresSelectAttachmentsSizeByUserIDQuery = `SELECT COALESCE(SUM(attachment_size), 0) FROM message WHERE user_id = $1 AND attachment_expires >= $2`
postgresSelectAttachmentsWithSizesQuery = `SELECT mid, attachment_size FROM message WHERE attachment_expires > $1 AND attachment_deleted = FALSE`
postgresSelectStatsQuery = `SELECT value FROM message_stats WHERE key = 'messages'`
postgresUpdateStatsQuery = `UPDATE message_stats SET value = $1 WHERE key = 'messages'`
@@ -97,6 +98,7 @@ var postgresQueries = queries{
selectAttachmentsExpired: postgresSelectAttachmentsExpiredQuery,
selectAttachmentsSizeBySender: postgresSelectAttachmentsSizeBySenderQuery,
selectAttachmentsSizeByUserID: postgresSelectAttachmentsSizeByUserIDQuery,
selectAttachmentsWithSizes: postgresSelectAttachmentsWithSizesQuery,
selectStats: postgresSelectStatsQuery,
updateStats: postgresUpdateStatsQuery,
updateMessageTime: postgresUpdateMessageTimeQuery,

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"heckel.io/ntfy/v2/db"
"heckel.io/ntfy/v2/log"
)
// Initial PostgreSQL schema
@@ -41,6 +42,7 @@ const (
CREATE INDEX IF NOT EXISTS idx_message_sequence_id ON message (sequence_id);
CREATE INDEX IF NOT EXISTS idx_message_topic_published_time ON message (topic, published, time, id);
CREATE INDEX IF NOT EXISTS idx_message_published_expires ON message (published, expires);
CREATE INDEX IF NOT EXISTS idx_message_attachment_expires ON message (attachment_expires) WHERE attachment_deleted = FALSE;
CREATE INDEX IF NOT EXISTS idx_message_sender_attachment_expires ON message (sender, attachment_expires) WHERE user_id = '';
CREATE INDEX IF NOT EXISTS idx_message_user_id_attachment_expires ON message (user_id, attachment_expires);
CREATE TABLE IF NOT EXISTS message_stats (
@@ -57,21 +59,57 @@ const (
// PostgreSQL schema management queries
const (
postgresCurrentSchemaVersion = 14
postgresCurrentSchemaVersion = 15
postgresInsertSchemaVersionQuery = `INSERT INTO schema_version (store, version) VALUES ('message', $1)`
postgresUpdateSchemaVersionQuery = `UPDATE schema_version SET version = $1 WHERE store = 'message'`
postgresSelectSchemaVersionQuery = `SELECT version FROM schema_version WHERE store = 'message'`
)
func setupPostgres(db *sql.DB) error {
// PostgreSQL schema migrations
const (
// 14 -> 15
postgresMigrate14To15CreateIndexQuery = `
CREATE INDEX IF NOT EXISTS idx_message_attachment_expires ON message (attachment_expires) WHERE attachment_deleted = FALSE;
`
)
var postgresMigrations = map[int]func(db *sql.DB) error{
14: postgresMigrateFrom14,
}
func setupPostgres(sqlDB *sql.DB) error {
var schemaVersion int
if err := db.QueryRow(postgresSelectSchemaVersionQuery).Scan(&schemaVersion); err != nil {
return setupNewPostgresDB(db)
if err := sqlDB.QueryRow(postgresSelectSchemaVersionQuery).Scan(&schemaVersion); err != nil {
return setupNewPostgresDB(sqlDB)
} else if schemaVersion == postgresCurrentSchemaVersion {
return nil
} else if schemaVersion > postgresCurrentSchemaVersion {
return fmt.Errorf("unexpected schema version: version %d is higher than current version %d", schemaVersion, postgresCurrentSchemaVersion)
}
for i := schemaVersion; i < postgresCurrentSchemaVersion; i++ {
fn, ok := postgresMigrations[i]
if !ok {
return fmt.Errorf("cannot find migration step from schema version %d to %d", i, i+1)
} else if err := fn(sqlDB); err != nil {
return err
}
}
return nil
}
func postgresMigrateFrom14(sqlDB *sql.DB) error {
log.Tag(tagMessageCache).Info("Migrating message cache database schema: from 14 to 15")
return db.ExecTx(sqlDB, func(tx *sql.Tx) error {
if _, err := tx.Exec(postgresMigrate14To15CreateIndexQuery); err != nil {
return err
}
if _, err := tx.Exec(postgresUpdateSchemaVersionQuery, 15); err != nil {
return err
}
return nil
})
}
func setupNewPostgresDB(sqlDB *sql.DB) error {
return db.ExecTx(sqlDB, func(tx *sql.Tx) error {
if _, err := tx.Exec(postgresCreateTablesQuery); err != nil {

View File

@@ -73,6 +73,7 @@ const (
sqliteSelectAttachmentsExpiredQuery = `SELECT mid FROM messages WHERE attachment_expires > 0 AND attachment_expires <= ? AND attachment_deleted = 0`
sqliteSelectAttachmentsSizeBySenderQuery = `SELECT IFNULL(SUM(attachment_size), 0) FROM messages WHERE user = '' AND sender = ? AND attachment_expires >= ?`
sqliteSelectAttachmentsSizeByUserIDQuery = `SELECT IFNULL(SUM(attachment_size), 0) FROM messages WHERE user = ? AND attachment_expires >= ?`
sqliteSelectAttachmentsWithSizesQuery = `SELECT mid, attachment_size FROM messages WHERE attachment_expires > ? AND attachment_deleted = 0`
sqliteSelectStatsQuery = `SELECT value FROM stats WHERE key = 'messages'`
sqliteUpdateStatsQuery = `UPDATE stats SET value = ? WHERE key = 'messages'`
@@ -100,6 +101,7 @@ var sqliteQueries = queries{
selectAttachmentsExpired: sqliteSelectAttachmentsExpiredQuery,
selectAttachmentsSizeBySender: sqliteSelectAttachmentsSizeBySenderQuery,
selectAttachmentsSizeByUserID: sqliteSelectAttachmentsSizeByUserIDQuery,
selectAttachmentsWithSizes: sqliteSelectAttachmentsWithSizesQuery,
selectStats: sqliteSelectStatsQuery,
updateStats: sqliteUpdateStatsQuery,
updateMessageTime: sqliteUpdateMessageTimeQuery,

View File

@@ -57,7 +57,7 @@ const (
// Schema version management for SQLite
const (
sqliteCurrentSchemaVersion = 14
sqliteCurrentSchemaVersion = 15
sqliteCreateSchemaVersionTableQuery = `
CREATE TABLE IF NOT EXISTS schemaVersion (
id INT PRIMARY KEY,
@@ -208,6 +208,7 @@ var (
11: sqliteMigrateFrom11,
12: sqliteMigrateFrom12,
13: sqliteMigrateFrom13,
14: sqliteMigrateFrom14,
}
)
@@ -451,3 +452,15 @@ func sqliteMigrateFrom13(sqlDB *sql.DB, _ time.Duration) error {
return nil
})
}
// sqliteMigrateFrom14 is a no-op; the corresponding Postgres migration adds
// idx_message_attachment_expires, which SQLite already has from the initial schema.
func sqliteMigrateFrom14(sqlDB *sql.DB, _ time.Duration) error {
log.Tag(tagMessageCache).Info("Migrating cache database schema: from 14 to 15")
return db.ExecTx(sqlDB, func(tx *sql.Tx) error {
if _, err := tx.Exec(sqliteUpdateSchemaVersionQuery, 15); err != nil {
return err
}
return nil
})
}

View File

@@ -19,8 +19,8 @@ const (
PollRequestEvent = "poll_request"
)
// MessageIDLength is the length of a randomly generated message ID
const MessageIDLength = 12
// messageIDLength is the length of a randomly generated message ID
const messageIDLength = 12
// Errors for message operations
var (
@@ -133,10 +133,20 @@ func NewAction() *Action {
}
}
// GenerateMessageID creates a new random message ID
func GenerateMessageID() string {
return util.RandomString(messageIDLength)
}
// ValidMessageID returns true if the given string is a valid message ID
func ValidMessageID(s string) bool {
return util.ValidRandomString(s, messageIDLength)
}
// NewMessage creates a new message with the current timestamp
func NewMessage(event, topic, msg string) *Message {
return &Message{
ID: util.RandomString(MessageIDLength),
ID: GenerateMessageID(),
Time: time.Now().Unix(),
Event: event,
Topic: topic,
@@ -173,11 +183,6 @@ func NewPollRequestMessage(topic, pollID string) *Message {
return m
}
// ValidMessageID returns true if the given string is a valid message ID
func ValidMessageID(s string) bool {
return util.ValidRandomString(s, MessageIDLength)
}
// SinceMarker represents a point in time or message ID from which to retrieve messages
type SinceMarker struct {
time time.Time

302
s3/client.go Normal file
View File

@@ -0,0 +1,302 @@
package s3
import (
"bytes"
"context"
"crypto/md5" //nolint:gosec // MD5 is required by the S3 protocol for Content-MD5 headers
"encoding/base64"
"encoding/xml"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"time"
"heckel.io/ntfy/v2/log"
)
const (
tagS3Client = "s3_client"
)
// Client is a minimal S3-compatible client. It supports PutObject, GetObject, DeleteObjects,
// and ListObjectsV2 operations using AWS Signature V4 signing. The bucket and optional key prefix
// are fixed at construction time. All operations target the same bucket and prefix.
//
// The following IAM policy is required for AWS S3:
//
// {
// "Version": "2012-10-17",
// "Statement": [
// {
// "Effect": "Allow",
// "Action": [
// "s3:ListBucket",
// "s3:ListBucketMultipartUploads"
// ],
// "Resource": "arn:aws:s3:::BUCKET_NAME"
// },
// {
// "Effect": "Allow",
// "Action": [
// "s3:GetObject",
// "s3:PutObject",
// "s3:DeleteObject",
// "s3:AbortMultipartUpload"
// ],
// "Resource": "arn:aws:s3:::BUCKET_NAME/*"
// }
// ]
// }
//
// Fields must not be modified after the Client is passed to any method or goroutine.
type Client struct {
config *Config
http *http.Client
}
// New creates a new S3 client from the given Config.
func New(config *Config) *Client {
httpClient := config.HTTPClient
if httpClient == nil {
httpClient = http.DefaultClient
}
return &Client{
config: config,
http: httpClient,
}
}
// PutObject uploads body to the given key. The key is automatically prefixed with the client's
// configured prefix.
//
// If untrustedLength is between 1 and 5 GB, the body is streamed directly to S3 via a
// single PUT request without buffering. The read is limited to untrustedLength bytes;
// any extra data in the body is ignored. If the body is shorter than claimed, the upload fails.
//
// Otherwise (untrustedLength <= 0 or > 5 GB), the first 5 MB are buffered to decide
// between a simple PUT and multipart upload.
//
// See https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
// and https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html
func (c *Client) PutObject(ctx context.Context, key string, body io.Reader, untrustedLength int64) error {
if untrustedLength > 0 && untrustedLength <= maxSinglePutSize {
// Stream directly: Content-Length is known (but untrusted). LimitReader ensures we send at most
// untrustedLength bytes, and any extra data in body is ignored.
return c.putObject(ctx, key, io.LimitReader(body, untrustedLength), untrustedLength)
}
// Buffered path: read first 5 MB to decide simple vs multipart
first := make([]byte, partSize)
n, err := io.ReadFull(body, first)
if errors.Is(err, io.ErrUnexpectedEOF) || err == io.EOF {
return c.putObject(ctx, key, bytes.NewReader(first[:n]), int64(n))
} else if err != nil {
return fmt.Errorf("error reading object %s from client: %w", key, err)
}
return c.putObjectMultipart(ctx, key, io.MultiReader(bytes.NewReader(first), body))
}
// putObject uploads a body with known size using a simple PUT with UNSIGNED-PAYLOAD.
func (c *Client) putObject(ctx context.Context, key string, body io.Reader, size int64) error {
log.Tag(tagS3Client).Debug("Uploading object %s (%d bytes)", key, size)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, c.config.ObjectURL(key), body)
if err != nil {
return fmt.Errorf("creating upload request object %s failed: %w", key, err)
}
req.ContentLength = size
c.signV4(req, unsignedPayload)
resp, err := c.http.Do(req)
if err != nil {
return fmt.Errorf("uploading object %s failed: %w", key, err)
}
defer resp.Body.Close()
if !isHTTPSuccess(resp) {
return parseError(resp)
}
return nil
}
// GetObject downloads an object. The key is automatically prefixed with the client's configured
// prefix. The caller must close the returned ReadCloser.
//
// See https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
func (c *Client) GetObject(ctx context.Context, key string) (io.ReadCloser, int64, error) {
log.Tag(tagS3Client).Debug("Fetching object %s", key)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, c.config.ObjectURL(key), nil)
if err != nil {
return nil, 0, fmt.Errorf("error creating HTTP GET request for %s: %w", key, err)
}
c.signV4(req, emptyPayloadHash)
resp, err := c.http.Do(req)
if err != nil {
return nil, 0, fmt.Errorf("error fetching object %s: %w", key, err)
} else if !isHTTPSuccess(resp) {
err := parseError(resp)
resp.Body.Close()
return nil, 0, err
}
return resp.Body, resp.ContentLength, nil
}
// ListObjectsV2 returns all objects under the client's configured prefix by paginating through
// ListObjectsV2 results automatically. Keys in the returned objects have the prefix stripped,
// so they match the keys used with PutObject/GetObject/DeleteObjects. It stops after 10,000
// pages as a safety valve.
//
// See https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html
func (c *Client) ListObjectsV2(ctx context.Context) ([]*Object, error) {
var all []*Object
var token string
for page := 0; page < maxPages; page++ {
result, err := c.listObjectsV2(ctx, token)
if err != nil {
return nil, err
}
for _, obj := range result.Contents {
var lastModified time.Time
if obj.LastModified != "" {
lastModified, _ = time.Parse(time.RFC3339, obj.LastModified)
}
all = append(all, &Object{
Key: c.config.StripPrefix(obj.Key),
Size: obj.Size,
LastModified: lastModified,
})
}
if !result.IsTruncated {
return all, nil
}
token = result.NextContinuationToken
}
return nil, fmt.Errorf("listing objects exceeded %d pages", maxPages)
}
// listObjectsV2 performs a single ListObjectsV2 request using the client's configured prefix.
func (c *Client) listObjectsV2(ctx context.Context, continuationToken string) (*listObjectsV2Result, error) {
if continuationToken == "" {
log.Tag(tagS3Client).Debug("Listing remote objects")
} else {
log.Tag(tagS3Client).Debug("Listing remote objects, continuing with token '%s'", continuationToken)
}
query := url.Values{"list-type": {"2"}}
if prefix := c.config.ListPrefix(); prefix != "" {
query.Set("prefix", prefix)
}
if continuationToken != "" {
query.Set("continuation-token", continuationToken)
}
respBody, err := c.do(ctx, "ListObjects", http.MethodGet, c.config.BucketURL()+"?"+query.Encode(), nil, nil)
if err != nil {
return nil, err
}
var result listObjectsV2Result
if err := xml.Unmarshal(respBody, &result); err != nil {
return nil, fmt.Errorf("failed to unmarshal list object response: %w", err)
}
return &result, nil
}
// DeleteObjects removes multiple objects in a single batch request. Keys are automatically
// prefixed with the client's configured prefix. S3 supports up to 1000 keys per call; the
// caller is responsible for batching if needed.
//
// Even when S3 returns HTTP 200, individual keys may fail. If any per-key errors are present
// in the response, they are returned as a combined error.
//
// See https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html
func (c *Client) DeleteObjects(ctx context.Context, keys []string) error {
// S3 DeleteObjects supports up to 1000 keys per call
for i := 0; i < len(keys); i += maxDeleteBatchSize {
end := i + maxDeleteBatchSize
if end > len(keys) {
end = len(keys)
}
if err := c.deleteObjects(ctx, keys[i:end]); err != nil {
return err
}
}
return nil
}
func (c *Client) deleteObjects(ctx context.Context, keys []string) error {
log.Tag(tagS3Client).Debug("Deleting %d object(s)", len(keys))
req := &deleteObjectsRequest{
Quiet: true,
}
for _, key := range keys {
req.Objects = append(req.Objects, &deleteObject{Key: c.config.ObjectKey(key)})
}
body, err := xml.Marshal(req)
if err != nil {
return fmt.Errorf("error marshalling XML for deleting objects: %w", err)
}
// Content-MD5 is required by the S3 protocol for DeleteObjects requests.
md5Sum := md5.Sum(body) //nolint:gosec
headers := map[string]string{
"Content-MD5": base64.StdEncoding.EncodeToString(md5Sum[:]),
}
reqURL := c.config.BucketURL() + "?delete"
respBody, err := c.do(ctx, "DeleteObjects", http.MethodPost, reqURL, body, headers)
if err != nil {
return fmt.Errorf("error deleting objects: %w", err)
}
// S3 may return HTTP 200 with per-key errors in the response body
var result deleteObjectsResult
if err := xml.Unmarshal(respBody, &result); err != nil {
return nil // If we can't parse, assume success (Quiet mode returns empty body on success)
}
if len(result.Errors) > 0 {
var msgs []string
for _, e := range result.Errors {
msgs = append(msgs, fmt.Sprintf("%s: %s", e.Key, e.Message))
}
return fmt.Errorf("error deleting objects, partial failure: %s", strings.Join(msgs, "; "))
}
return nil
}
// do creates a signed request, executes it, reads the response body, and checks for errors.
// If body is nil, the request is sent with an empty payload. If body is non-nil, it is sent
// with a computed SHA-256 payload hash and Content-Type: application/xml.
func (c *Client) do(ctx context.Context, op, method, reqURL string, body []byte, headers map[string]string) ([]byte, error) {
log.Tag(tagS3Client).Trace("Performing request %s %s %s (body: %d bytes)", op, method, reqURL, len(body))
var reader io.Reader
var hash string
if body != nil {
reader = bytes.NewReader(body)
hash = sha256Hex(body)
} else {
hash = emptyPayloadHash
}
req, err := http.NewRequestWithContext(ctx, method, reqURL, reader)
if err != nil {
return nil, fmt.Errorf("s3: %s request: %w", op, err)
}
if body != nil {
req.ContentLength = int64(len(body))
req.Header.Set("Content-Type", "application/xml")
} else {
req.ContentLength = 0
}
for k, v := range headers {
req.Header.Set(k, v)
}
c.signV4(req, hash)
resp, err := c.http.Do(req)
if err != nil {
return nil, fmt.Errorf("s3: %s: %w", op, err)
}
respBody, err := io.ReadAll(io.LimitReader(resp.Body, maxResponseBytes))
resp.Body.Close()
if err != nil {
return nil, fmt.Errorf("s3: %s read: %w", op, err)
}
if !isHTTPSuccess(resp) {
return nil, parseErrorFromBytes(resp.StatusCode, respBody)
}
return respBody, nil
}

71
s3/client_auth.go Normal file
View File

@@ -0,0 +1,71 @@
package s3
import (
"encoding/hex"
"fmt"
"net/http"
"sort"
"strings"
"time"
)
// signV4 signs req in place using AWS Signature V4. payloadHash is the hex-encoded SHA-256
// of the request body, or the literal string "UNSIGNED-PAYLOAD" for streaming uploads.
//
// See https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
func (c *Client) signV4(req *http.Request, hash string) {
now := time.Now().UTC()
datestamp := now.Format("20060102")
amzDate := now.Format("20060102T150405Z")
// Required headers
req.Header.Set("Host", c.config.HostHeader())
req.Header.Set("X-Amz-Date", amzDate)
req.Header.Set("X-Amz-Content-Sha256", hash)
// Canonical headers (all headers we set, sorted by lowercase key)
signedKeys := make([]string, 0, len(req.Header))
canonHeaders := make(map[string]string, len(req.Header))
for k := range req.Header {
lk := strings.ToLower(k)
signedKeys = append(signedKeys, lk)
canonHeaders[lk] = strings.TrimSpace(req.Header.Get(k))
}
sort.Strings(signedKeys)
signedHeadersStr := strings.Join(signedKeys, ";")
var chBuf strings.Builder
for _, k := range signedKeys {
chBuf.WriteString(k)
chBuf.WriteByte(':')
chBuf.WriteString(canonHeaders[k])
chBuf.WriteByte('\n')
}
// Canonical request
canonicalRequest := strings.Join([]string{
req.Method,
canonicalURI(req.URL),
canonicalQueryString(req.URL.Query()),
chBuf.String(),
signedHeadersStr,
hash,
}, "\n")
// String to sign
credentialScope := datestamp + "/" + c.config.Region + "/s3/aws4_request"
stringToSign := "AWS4-HMAC-SHA256\n" + amzDate + "\n" + credentialScope + "\n" + sha256Hex([]byte(canonicalRequest))
// Signing key
signingKey := hmacSHA256(hmacSHA256(hmacSHA256(hmacSHA256(
[]byte("AWS4"+c.config.SecretKey), []byte(datestamp)),
[]byte(c.config.Region)),
[]byte("s3")),
[]byte("aws4_request"))
signature := hex.EncodeToString(hmacSHA256(signingKey, []byte(stringToSign)))
header := fmt.Sprintf(
"AWS4-HMAC-SHA256 Credential=%s/%s, SignedHeaders=%s, Signature=%s",
c.config.AccessKey, credentialScope, signedHeadersStr, signature,
)
req.Header.Set("Authorization", header)
}

187
s3/client_multipart.go Normal file
View File

@@ -0,0 +1,187 @@
package s3
import (
"bytes"
"context"
"encoding/xml"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"time"
"heckel.io/ntfy/v2/log"
)
// AbortIncompleteUploads lists all in-progress multipart uploads and aborts those initiated
// before the given cutoff time. This cleans up orphaned upload parts from interrupted uploads.
//
// See https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html
// and https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html
func (c *Client) AbortIncompleteUploads(ctx context.Context, cutoff time.Time) error {
uploads, err := c.listMultipartUploads(ctx)
if err != nil {
return err
}
for _, u := range uploads {
if !u.Initiated.IsZero() && u.Initiated.Before(cutoff) {
c.abortMultipartUpload(ctx, u.Key, u.UploadID)
}
}
return nil
}
// listMultipartUploads returns in-progress multipart uploads for the client's prefix.
// It paginates automatically, stopping after 10,000 pages as a safety valve.
func (c *Client) listMultipartUploads(ctx context.Context) ([]*multipartUpload, error) {
var all []*multipartUpload
var keyMarker, uploadIDMarker string
for page := 0; page < maxPages; page++ {
query := url.Values{"uploads": {""}}
if prefix := c.config.ListPrefix(); prefix != "" {
query.Set("prefix", prefix)
}
if keyMarker != "" {
query.Set("key-marker", keyMarker)
query.Set("upload-id-marker", uploadIDMarker)
}
respBody, err := c.do(ctx, "ListMultipartUploads", http.MethodGet, c.config.BucketURL()+"?"+query.Encode(), nil, nil)
if err != nil {
return nil, err
}
var result listMultipartUploadsResult
if err := xml.Unmarshal(respBody, &result); err != nil {
return nil, fmt.Errorf("error unmarshalling multipart upload result: %w", err)
}
for _, u := range result.Uploads {
var initiated time.Time
if u.Initiated != "" {
initiated, _ = time.Parse(time.RFC3339, u.Initiated)
}
all = append(all, &multipartUpload{
Key: u.Key,
UploadID: u.UploadID,
Initiated: initiated,
})
}
if !result.IsTruncated {
return all, nil
}
keyMarker = result.NextKeyMarker
uploadIDMarker = result.NextUploadIDMarker
}
return nil, fmt.Errorf("error listing multipart uploads, exceeded %d pages", maxPages)
}
// abortMultipartUpload cancels an in-progress multipart upload. Called on error to clean up.
func (c *Client) abortMultipartUpload(ctx context.Context, key, uploadID string) {
log.Tag(tagS3Client).Info("Aborting multipart upload for object %s", key)
reqURL := fmt.Sprintf("%s?uploadId=%s", c.config.ObjectURL(key), url.QueryEscape(uploadID))
req, err := http.NewRequestWithContext(ctx, http.MethodDelete, reqURL, nil)
if err != nil {
return
}
c.signV4(req, emptyPayloadHash)
resp, err := c.http.Do(req)
if err != nil {
return
}
resp.Body.Close()
}
// putObjectMultipart uploads body using S3 multipart upload. It reads the body in partSize
// chunks, uploading each as a separate part. This allows uploading without knowing the total
// body size in advance.
func (c *Client) putObjectMultipart(ctx context.Context, key string, body io.Reader) error {
log.Tag(tagS3Client).Debug("Uploading multipart object %s", key)
// Step 1: Initiate multipart upload
uploadID, err := c.initiateMultipartUpload(ctx, key)
if err != nil {
return err
}
// Step 2: Upload parts
partNumber := 1
buf := make([]byte, partSize)
var parts []*completedPart
for {
n, err := io.ReadFull(body, buf)
if n > 0 {
etag, uploadErr := c.uploadPart(ctx, key, uploadID, partNumber, buf[:n])
if uploadErr != nil {
c.abortMultipartUpload(ctx, key, uploadID)
return uploadErr
}
parts = append(parts, &completedPart{
PartNumber: partNumber,
ETag: etag,
})
partNumber++
}
if err == io.EOF || errors.Is(err, io.ErrUnexpectedEOF) {
break
} else if err != nil {
c.abortMultipartUpload(ctx, key, uploadID)
return fmt.Errorf("error uploading object %s, reading from client failed: %w", key, err)
}
}
// Step 3: Complete multipart upload
return c.completeMultipartUpload(ctx, key, uploadID, parts)
}
// initiateMultipartUpload starts a new multipart upload and returns the upload ID.
func (c *Client) initiateMultipartUpload(ctx context.Context, key string) (string, error) {
respBody, err := c.do(ctx, "InitiateMultipartUpload", http.MethodPost, c.config.ObjectURL(key)+"?uploads", nil, nil)
if err != nil {
return "", err
}
var result initiateMultipartUploadResult
if err := xml.Unmarshal(respBody, &result); err != nil {
return "", fmt.Errorf("error unmarshalling initiate multipart upload response: %w", err)
}
return result.UploadID, nil
}
// uploadPart uploads a single part of a multipart upload and returns the ETag.
func (c *Client) uploadPart(ctx context.Context, key, uploadID string, partNumber int, data []byte) (string, error) {
log.Tag(tagS3Client).Debug("Uploading multipart part for object %s, part %d, size %d", key, partNumber, len(data))
reqURL := fmt.Sprintf("%s?partNumber=%d&uploadId=%s", c.config.ObjectURL(key), partNumber, url.QueryEscape(uploadID))
req, err := http.NewRequestWithContext(ctx, http.MethodPut, reqURL, bytes.NewReader(data))
if err != nil {
return "", fmt.Errorf("error creating multipart upload part request for object %s: %w", key, err)
}
req.ContentLength = int64(len(data))
c.signV4(req, unsignedPayload)
resp, err := c.http.Do(req)
if err != nil {
return "", fmt.Errorf("error uploading multipart part for object %s: %w", key, err)
}
defer resp.Body.Close()
if !isHTTPSuccess(resp) {
return "", parseError(resp)
}
return resp.Header.Get("ETag"), nil
}
// completeMultipartUpload finalizes a multipart upload with the given parts.
func (c *Client) completeMultipartUpload(ctx context.Context, key, uploadID string, parts []*completedPart) error {
log.Tag(tagS3Client).Debug("Completing multipart upload for object %s, %d parts", key, len(parts))
bodyBytes, err := xml.Marshal(&completeMultipartUploadRequest{Parts: parts})
if err != nil {
return fmt.Errorf("error marshalling complete multipart upload request: %w", err)
}
reqURL := fmt.Sprintf("%s?uploadId=%s", c.config.ObjectURL(key), url.QueryEscape(uploadID))
respBody, err := c.do(ctx, "CompleteMultipartUpload", http.MethodPost, reqURL, bodyBytes, nil)
if err != nil {
return err
}
// Check if the response contains an error (S3 can return 200 with an error body)
var errResp errorResponse
if xml.Unmarshal(respBody, &errResp) == nil && errResp.Code != "" {
return &errResp
}
return nil
}

414
s3/client_test.go Normal file
View File

@@ -0,0 +1,414 @@
package s3
import (
"bytes"
"context"
"fmt"
"io"
"os"
"strings"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestParseURL_Success(t *testing.T) {
cfg, err := ParseURL("s3://AKID:SECRET@my-bucket/attachments?region=us-east-1")
require.Nil(t, err)
require.Equal(t, "my-bucket", cfg.Bucket)
require.Equal(t, "attachments", cfg.Prefix)
require.Equal(t, "us-east-1", cfg.Region)
require.Equal(t, "AKID", cfg.AccessKey)
require.Equal(t, "SECRET", cfg.SecretKey)
require.Equal(t, "s3.us-east-1.amazonaws.com", cfg.Endpoint)
require.False(t, cfg.PathStyle)
}
func TestParseURL_NoPrefix(t *testing.T) {
cfg, err := ParseURL("s3://AKID:SECRET@my-bucket?region=us-east-1")
require.Nil(t, err)
require.Equal(t, "my-bucket", cfg.Bucket)
require.Equal(t, "", cfg.Prefix)
}
func TestParseURL_WithEndpoint(t *testing.T) {
cfg, err := ParseURL("s3://AKID:SECRET@my-bucket/prefix?region=us-east-1&endpoint=https://s3.example.com")
require.Nil(t, err)
require.Equal(t, "my-bucket", cfg.Bucket)
require.Equal(t, "prefix", cfg.Prefix)
require.Equal(t, "s3.example.com", cfg.Endpoint)
require.True(t, cfg.PathStyle)
}
func TestParseURL_EndpointHTTP(t *testing.T) {
cfg, err := ParseURL("s3://AKID:SECRET@my-bucket?region=us-east-1&endpoint=http://localhost:9000")
require.Nil(t, err)
require.Equal(t, "localhost:9000", cfg.Endpoint)
require.True(t, cfg.PathStyle)
}
func TestParseURL_EndpointTrailingSlash(t *testing.T) {
cfg, err := ParseURL("s3://AKID:SECRET@my-bucket?region=us-east-1&endpoint=https://s3.example.com/")
require.Nil(t, err)
require.Equal(t, "s3.example.com", cfg.Endpoint)
}
func TestParseURL_NestedPrefix(t *testing.T) {
cfg, err := ParseURL("s3://AKID:SECRET@my-bucket/a/b/c?region=us-east-1")
require.Nil(t, err)
require.Equal(t, "my-bucket", cfg.Bucket)
require.Equal(t, "a/b/c", cfg.Prefix)
}
func TestParseURL_MissingRegion(t *testing.T) {
_, err := ParseURL("s3://AKID:SECRET@my-bucket")
require.Error(t, err)
require.Contains(t, err.Error(), "region")
}
func TestParseURL_MissingCredentials(t *testing.T) {
_, err := ParseURL("s3://my-bucket?region=us-east-1")
require.Error(t, err)
require.Contains(t, err.Error(), "access key")
}
func TestParseURL_MissingSecretKey(t *testing.T) {
_, err := ParseURL("s3://AKID@my-bucket?region=us-east-1")
require.Error(t, err)
require.Contains(t, err.Error(), "secret key")
}
func TestParseURL_WrongScheme(t *testing.T) {
_, err := ParseURL("http://AKID:SECRET@my-bucket?region=us-east-1")
require.Error(t, err)
require.Contains(t, err.Error(), "scheme")
}
func TestParseURL_EmptyBucket(t *testing.T) {
_, err := ParseURL("s3://AKID:SECRET@?region=us-east-1")
require.Error(t, err)
require.Contains(t, err.Error(), "bucket")
}
// --- Unit tests: URL construction ---
func TestConfig_BucketURL_PathStyle(t *testing.T) {
c := &Config{Endpoint: "s3.example.com", Bucket: "my-bucket", PathStyle: true}
require.Equal(t, "https://s3.example.com/my-bucket", c.BucketURL())
}
func TestConfig_BucketURL_VirtualHosted(t *testing.T) {
c := &Config{Endpoint: "s3.us-east-1.amazonaws.com", Bucket: "my-bucket", PathStyle: false}
require.Equal(t, "https://my-bucket.s3.us-east-1.amazonaws.com", c.BucketURL())
}
func TestConfig_ObjectURL_PathStyle(t *testing.T) {
c := &Config{Endpoint: "s3.example.com", Bucket: "my-bucket", Prefix: "prefix", PathStyle: true}
require.Equal(t, "https://s3.example.com/my-bucket/prefix/obj", c.ObjectURL("obj"))
}
func TestConfig_ObjectURL_VirtualHosted(t *testing.T) {
c := &Config{Endpoint: "s3.us-east-1.amazonaws.com", Bucket: "my-bucket", Prefix: "prefix", PathStyle: false}
require.Equal(t, "https://my-bucket.s3.us-east-1.amazonaws.com/prefix/obj", c.ObjectURL("obj"))
}
func TestConfig_HostHeader_PathStyle(t *testing.T) {
c := &Config{Endpoint: "s3.example.com", Bucket: "my-bucket", PathStyle: true}
require.Equal(t, "s3.example.com", c.HostHeader())
}
func TestConfig_HostHeader_VirtualHosted(t *testing.T) {
c := &Config{Endpoint: "s3.us-east-1.amazonaws.com", Bucket: "my-bucket", PathStyle: false}
require.Equal(t, "my-bucket.s3.us-east-1.amazonaws.com", c.HostHeader())
}
func TestConfig_ObjectKey(t *testing.T) {
c := &Config{Prefix: "attachments"}
require.Equal(t, "attachments/file123", c.ObjectKey("file123"))
c2 := &Config{Prefix: ""}
require.Equal(t, "file123", c2.ObjectKey("file123"))
}
func TestConfig_ListPrefix(t *testing.T) {
c := &Config{Prefix: "attachments"}
require.Equal(t, "attachments/", c.ListPrefix())
c2 := &Config{Prefix: ""}
require.Equal(t, "", c2.ListPrefix())
}
// --- Integration tests using real S3 ---
func TestClient_PutGetObject(t *testing.T) {
client := newTestClient(t)
ctx := context.Background()
// Put
err := client.PutObject(ctx, "test-key", strings.NewReader("hello world"), 0)
require.Nil(t, err)
// Get
reader, size, err := client.GetObject(ctx, "test-key")
require.Nil(t, err)
require.Equal(t, int64(11), size)
data, err := io.ReadAll(reader)
reader.Close()
require.Nil(t, err)
require.Equal(t, "hello world", string(data))
}
func TestClient_GetObject_NotFound(t *testing.T) {
client := newTestClient(t)
_, _, err := client.GetObject(context.Background(), "nonexistent")
require.Error(t, err)
}
func TestClient_DeleteObjects(t *testing.T) {
client := newTestClient(t)
ctx := context.Background()
// Put several objects
for i := 0; i < 5; i++ {
err := client.PutObject(ctx, fmt.Sprintf("del-%d", i), bytes.NewReader([]byte("data")), 0)
require.Nil(t, err)
}
waitForCount(t, client, 5)
// Delete some
err := client.DeleteObjects(ctx, []string{"del-1", "del-3"})
require.Nil(t, err)
waitForCount(t, client, 3)
// Verify deleted ones are gone
_, _, err = client.GetObject(ctx, "del-1")
require.Error(t, err)
_, _, err = client.GetObject(ctx, "del-3")
require.Error(t, err)
// Verify remaining ones are still there
for _, key := range []string{"del-0", "del-2", "del-4"} {
reader, _, err := client.GetObject(ctx, key)
require.Nil(t, err)
reader.Close()
}
}
func TestClient_ListObjects(t *testing.T) {
client := newTestClient(t)
ctx := context.Background()
for i := 0; i < 3; i++ {
err := client.PutObject(ctx, fmt.Sprintf("list-%d", i), bytes.NewReader([]byte("x")), 0)
require.Nil(t, err)
}
waitForCount(t, client, 3)
}
func TestClient_ListObjects_Pagination(t *testing.T) {
client := newTestClient(t)
ctx := context.Background()
// Create 1010 objects in parallel (5 goroutines)
const total = 1010
const workers = 5
var wg sync.WaitGroup
errs := make(chan error, total)
for w := 0; w < workers; w++ {
wg.Add(1)
go func(start int) {
defer wg.Done()
for i := start; i < total; i += workers {
if err := client.PutObject(ctx, fmt.Sprintf("pg-%04d", i), bytes.NewReader([]byte("x")), 0); err != nil {
errs <- err
return
}
}
}(w)
}
wg.Wait()
close(errs)
for err := range errs {
require.Nil(t, err)
}
waitForCount(t, client, total)
}
func TestClient_PutObject_LargeBody(t *testing.T) {
client := newTestClient(t)
ctx := context.Background()
// 1 MB object
data := make([]byte, 1024*1024)
for i := range data {
data[i] = byte(i % 256)
}
err := client.PutObject(ctx, "large", bytes.NewReader(data), 0)
require.Nil(t, err)
reader, size, err := client.GetObject(ctx, "large")
require.Nil(t, err)
require.Equal(t, int64(1024*1024), size)
got, err := io.ReadAll(reader)
reader.Close()
require.Nil(t, err)
require.Equal(t, data, got)
}
func TestClient_PutObject_ChunkedUpload(t *testing.T) {
client := newTestClient(t)
ctx := context.Background()
// 12 MB object, exceeds 5 MB partSize, triggers multipart upload path
data := make([]byte, 12*1024*1024)
for i := range data {
data[i] = byte(i % 256)
}
err := client.PutObject(ctx, "multipart", bytes.NewReader(data), 0)
require.Nil(t, err)
reader, size, err := client.GetObject(ctx, "multipart")
require.Nil(t, err)
require.Equal(t, int64(12*1024*1024), size)
got, err := io.ReadAll(reader)
reader.Close()
require.Nil(t, err)
require.Equal(t, data, got)
}
func TestClient_PutObject_ExactPartSize(t *testing.T) {
client := newTestClient(t)
ctx := context.Background()
// Exactly 5 MB (partSize), should use the simple put path (ReadFull succeeds fully)
data := make([]byte, 5*1024*1024)
for i := range data {
data[i] = byte(i % 256)
}
err := client.PutObject(ctx, "exact", bytes.NewReader(data), 0)
require.Nil(t, err)
reader, size, err := client.GetObject(ctx, "exact")
require.Nil(t, err)
require.Equal(t, int64(5*1024*1024), size)
got, err := io.ReadAll(reader)
reader.Close()
require.Nil(t, err)
require.Equal(t, data, got)
}
func TestClient_PutObject_StreamingExactLength(t *testing.T) {
client := newTestClient(t)
ctx := context.Background()
// untrustedLength matches body exactly — streams directly via putObject
err := client.PutObject(ctx, "stream-exact", strings.NewReader("hello world"), 11)
require.Nil(t, err)
reader, size, err := client.GetObject(ctx, "stream-exact")
require.Nil(t, err)
require.Equal(t, int64(11), size)
got, err := io.ReadAll(reader)
reader.Close()
require.Nil(t, err)
require.Equal(t, "hello world", string(got))
}
func TestClient_PutObject_StreamingBodyLongerThanClaimed(t *testing.T) {
client := newTestClient(t)
ctx := context.Background()
// Body has 11 bytes, but we claim 5 — only first 5 bytes should be stored
err := client.PutObject(ctx, "stream-long", strings.NewReader("hello world"), 5)
require.Nil(t, err)
reader, size, err := client.GetObject(ctx, "stream-long")
require.Nil(t, err)
require.Equal(t, int64(5), size)
got, err := io.ReadAll(reader)
reader.Close()
require.Nil(t, err)
require.Equal(t, "hello", string(got))
}
func TestClient_PutObject_StreamingBodyShorterThanClaimed(t *testing.T) {
client := newTestClient(t)
ctx := context.Background()
// Body has 5 bytes, but we claim 100 — should fail
err := client.PutObject(ctx, "stream-short", strings.NewReader("hello"), 100)
require.Error(t, err)
// Object should not exist
_, _, err = client.GetObject(ctx, "stream-short")
require.Error(t, err)
}
func TestClient_PutObject_NestedKey(t *testing.T) {
client := newTestClient(t)
ctx := context.Background()
err := client.PutObject(ctx, "deep/nested/prefix/file.txt", strings.NewReader("nested"), 0)
require.Nil(t, err)
reader, _, err := client.GetObject(ctx, "deep/nested/prefix/file.txt")
require.Nil(t, err)
data, _ := io.ReadAll(reader)
reader.Close()
require.Equal(t, "nested", string(data))
}
func newTestClient(t *testing.T) *Client {
t.Helper()
s3URL := os.Getenv("NTFY_TEST_S3_URL")
if s3URL == "" {
t.Skip("NTFY_TEST_S3_URL not set")
}
cfg, err := ParseURL(s3URL)
require.Nil(t, err)
// Use per-test prefix to isolate objects between tests
if cfg.Prefix != "" {
cfg.Prefix = cfg.Prefix + "/testpkg-s3/" + t.Name()
} else {
cfg.Prefix = "testpkg-s3/" + t.Name()
}
client := New(cfg)
deleteAllObjects(t, client)
t.Cleanup(func() { deleteAllObjects(t, client) })
return client
}
func deleteAllObjects(t *testing.T, client *Client) {
t.Helper()
for i := 0; i < 60; i++ {
objects, err := client.ListObjectsV2(context.Background())
require.Nil(t, err)
if len(objects) == 0 {
return
}
keys := make([]string, len(objects))
for j, obj := range objects {
keys[j] = obj.Key
}
require.Nil(t, client.DeleteObjects(context.Background(), keys))
time.Sleep(500 * time.Millisecond)
}
t.Fatal("timed out waiting for bucket to be empty")
}
func waitForCount(t *testing.T, client *Client, expected int) {
t.Helper()
for i := 0; i < 60; i++ {
objects, err := client.ListObjectsV2(context.Background())
require.Nil(t, err)
if len(objects) == expected {
return
}
time.Sleep(500 * time.Millisecond)
}
objects, _ := client.ListObjectsV2(context.Background())
t.Fatalf("timed out waiting for %d objects, got %d", expected, len(objects))
}

165
s3/types.go Normal file
View File

@@ -0,0 +1,165 @@
package s3
import (
"encoding/xml"
"fmt"
"net/http"
"net/url"
"strings"
"time"
)
// Config holds the parsed fields from an S3 URL. Use ParseURL to create one from a URL string.
type Config struct {
Endpoint string // host[:port] only, e.g. "s3.us-east-1.amazonaws.com"
PathStyle bool
Bucket string
Prefix string
Region string
AccessKey string
SecretKey string
HTTPClient *http.Client // if nil, http.DefaultClient is used
}
// BucketURL returns the base URL for bucket-level operations.
func (c *Config) BucketURL() string {
if c.PathStyle {
return fmt.Sprintf("https://%s/%s", c.Endpoint, c.Bucket)
}
return fmt.Sprintf("https://%s.%s", c.Bucket, c.Endpoint)
}
// HostHeader returns the value for the Host header.
func (c *Config) HostHeader() string {
if c.PathStyle {
return c.Endpoint
}
return c.Bucket + "." + c.Endpoint
}
// ListPrefix returns the prefix to use in ListObjectsV2 requests,
// with a trailing slash so that only objects under the prefix directory are returned.
func (c *Config) ListPrefix() string {
if c.Prefix != "" {
return c.Prefix + "/"
}
return ""
}
// StripPrefix removes the configured prefix from a key returned by ListObjectsV2,
// so keys match what was passed to PutObject/GetObject/DeleteObjects.
func (c *Config) StripPrefix(key string) string {
if c.Prefix != "" {
return strings.TrimPrefix(key, c.Prefix+"/")
}
return key
}
// ObjectKey prepends the configured prefix to the given key.
func (c *Config) ObjectKey(key string) string {
if c.Prefix != "" {
return c.Prefix + "/" + key
}
return key
}
// ObjectURL returns the full URL for an object, automatically prepending the configured prefix.
func (c *Config) ObjectURL(key string) string {
u, _ := url.JoinPath(c.BucketURL(), c.ObjectKey(key))
return u
}
// Object represents an S3 object returned by list operations.
type Object struct {
Key string
Size int64
LastModified time.Time
}
// errorResponse is returned when S3 responds with a non-2xx status code.
type errorResponse struct {
StatusCode int
Code string `xml:"Code"`
Message string `xml:"Message"`
Body string `xml:"-"` // raw response body
}
func (e *errorResponse) Error() string {
if e.Code != "" {
return fmt.Sprintf("s3: %s (HTTP %d): %s", e.Code, e.StatusCode, e.Message)
}
return fmt.Sprintf("s3: HTTP %d: %s", e.StatusCode, e.Body)
}
// listObjectsV2Result is the XML response from S3 ListObjectsV2
type listObjectsV2Result struct {
Contents []*listObject `xml:"Contents"`
IsTruncated bool `xml:"IsTruncated"`
NextContinuationToken string `xml:"NextContinuationToken"`
}
type listObject struct {
Key string `xml:"Key"`
Size int64 `xml:"Size"`
LastModified string `xml:"LastModified"`
}
// deleteObjectsRequest is the XML request body for S3 DeleteObjects
type deleteObjectsRequest struct {
XMLName xml.Name `xml:"Delete"`
Quiet bool `xml:"Quiet"`
Objects []*deleteObject `xml:"Object"`
}
type deleteObject struct {
Key string `xml:"Key"`
}
// deleteObjectsResult is the XML response from S3 DeleteObjects
type deleteObjectsResult struct {
Errors []*deleteError `xml:"Error"`
}
type deleteError struct {
Key string `xml:"Key"`
Code string `xml:"Code"`
Message string `xml:"Message"`
}
// listMultipartUploadsResult is the XML response from S3 listMultipartUploads
type listMultipartUploadsResult struct {
Uploads []*listUpload `xml:"Upload"`
IsTruncated bool `xml:"IsTruncated"`
NextKeyMarker string `xml:"NextKeyMarker"`
NextUploadIDMarker string `xml:"NextUploadIdMarker"`
}
type listUpload struct {
Key string `xml:"Key"`
UploadID string `xml:"UploadId"`
Initiated string `xml:"Initiated"`
}
// multipartUpload represents an in-progress multipart upload returned by listMultipartUploads.
type multipartUpload struct {
Key string
UploadID string
Initiated time.Time
}
// initiateMultipartUploadResult is the XML response from S3 InitiateMultipartUpload
type initiateMultipartUploadResult struct {
UploadID string `xml:"UploadId"`
}
// completeMultipartUploadRequest is the XML request body for S3 CompleteMultipartUpload
type completeMultipartUploadRequest struct {
XMLName xml.Name `xml:"CompleteMultipartUpload"`
Parts []*completedPart `xml:"Part"`
}
// completedPart represents a successfully uploaded part for CompleteMultipartUpload
type completedPart struct {
PartNumber int `xml:"PartNumber"`
ETag string `xml:"ETag"`
}

180
s3/util.go Normal file
View File

@@ -0,0 +1,180 @@
package s3
import (
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"encoding/xml"
"fmt"
"io"
"net/http"
"net/url"
"sort"
"strings"
)
const (
// SHA-256 hash of the empty string, used as the payload hash for bodiless requests
emptyPayloadHash = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
// Sent as the payload hash for streaming uploads where the body is not buffered in memory
unsignedPayload = "UNSIGNED-PAYLOAD"
// maxResponseBytes caps the size of S3 response bodies we read into memory
maxResponseBytes = 2 * 1024 * 1024
// partSize is the size of each part for multipart uploads (5 MB). This is also the threshold
// above which PutObject switches from a simple PUT to multipart upload. S3 requires a minimum
// part size of 5 MB for all parts except the last.
partSize = 5 * 1024 * 1024
// maxSinglePutSize is the maximum size for a single PUT upload (5 GB).
// Objects larger than this must use multipart upload.
maxSinglePutSize = 5 * 1024 * 1024 * 1024
// maxPages is the max number of pages to iterate through when listing objects
maxPages = 500
// maxDeleteBatchSize is the maximum number of keys per S3 DeleteObjects call
maxDeleteBatchSize = 1000
)
// ParseURL parses an S3 URL of the form:
//
// s3://ACCESS_KEY:SECRET_KEY@BUCKET[/PREFIX]?region=REGION[&endpoint=ENDPOINT]
//
// When endpoint is specified, path-style addressing is enabled automatically.
func ParseURL(s3URL string) (*Config, error) {
u, err := url.Parse(s3URL)
if err != nil {
return nil, fmt.Errorf("s3: invalid URL: %w", err)
}
if u.Scheme != "s3" {
return nil, fmt.Errorf("s3: URL scheme must be 's3', got '%s'", u.Scheme)
}
if u.Host == "" {
return nil, fmt.Errorf("s3: bucket name must be specified as host")
}
bucket := u.Host
prefix := strings.TrimPrefix(u.Path, "/")
accessKey := u.User.Username()
secretKey, _ := u.User.Password()
if accessKey == "" || secretKey == "" {
return nil, fmt.Errorf("s3: access key and secret key must be specified in URL")
}
region := u.Query().Get("region")
if region == "" {
return nil, fmt.Errorf("s3: region query parameter is required")
}
endpointParam := u.Query().Get("endpoint")
var endpoint string
var pathStyle bool
if endpointParam != "" {
// Custom endpoint: strip scheme prefix to extract host[:port]
ep := strings.TrimRight(endpointParam, "/")
ep = strings.TrimPrefix(ep, "https://")
ep = strings.TrimPrefix(ep, "http://")
endpoint = ep
pathStyle = true
} else {
endpoint = fmt.Sprintf("s3.%s.amazonaws.com", region)
pathStyle = false
}
return &Config{
Endpoint: endpoint,
PathStyle: pathStyle,
Bucket: bucket,
Prefix: prefix,
Region: region,
AccessKey: accessKey,
SecretKey: secretKey,
}, nil
}
// parseError reads an S3 error response and returns an *errorResponse.
func parseError(resp *http.Response) error {
body, err := io.ReadAll(io.LimitReader(resp.Body, maxResponseBytes))
if err != nil {
return fmt.Errorf("error reading S3 error response: %w", err)
}
return parseErrorFromBytes(resp.StatusCode, body)
}
func parseErrorFromBytes(statusCode int, body []byte) error {
errResp := &errorResponse{
StatusCode: statusCode,
Body: string(body),
}
// Try to parse XML error; if it fails, we still have StatusCode and Body
_ = xml.Unmarshal(body, errResp)
return errResp
}
// canonicalURI returns the URI-encoded path for the canonical request. Each path segment is
// percent-encoded per RFC 3986; forward slashes are preserved.
func canonicalURI(u *url.URL) string {
p := u.Path
if p == "" {
return "/"
}
segments := strings.Split(p, "/")
for i, seg := range segments {
segments[i] = uriEncode(seg)
}
return strings.Join(segments, "/")
}
// canonicalQueryString builds the query string for the canonical request. Keys and values
// are URI-encoded per RFC 3986 (using %20, not +) and sorted lexically by key.
func canonicalQueryString(values url.Values) string {
if len(values) == 0 {
return ""
}
keys := make([]string, 0, len(values))
for k := range values {
keys = append(keys, k)
}
sort.Strings(keys)
var pairs []string
for _, k := range keys {
ek := uriEncode(k)
vs := make([]string, len(values[k]))
copy(vs, values[k])
sort.Strings(vs)
for _, v := range vs {
pairs = append(pairs, ek+"="+uriEncode(v))
}
}
return strings.Join(pairs, "&")
}
// uriEncode percent-encodes a string per RFC 3986, encoding everything except unreserved
// characters (A-Z a-z 0-9 - _ . ~).
func uriEncode(s string) string {
var buf strings.Builder
for i := 0; i < len(s); i++ {
b := s[i]
if (b >= 'A' && b <= 'Z') || (b >= 'a' && b <= 'z') || (b >= '0' && b <= '9') ||
b == '-' || b == '_' || b == '.' || b == '~' {
buf.WriteByte(b)
} else {
fmt.Fprintf(&buf, "%%%02X", b)
}
}
return buf.String()
}
func isHTTPSuccess(resp *http.Response) bool {
return resp.StatusCode/100 == 2
}
func sha256Hex(data []byte) string {
h := sha256.Sum256(data)
return hex.EncodeToString(h[:])
}
func hmacSHA256(key, data []byte) []byte {
h := hmac.New(sha256.New, key)
h.Write(data)
return h.Sum(nil)
}

181
s3/util_test.go Normal file
View File

@@ -0,0 +1,181 @@
package s3
import (
"net/http"
"net/url"
"testing"
"github.com/stretchr/testify/require"
)
func TestURIEncode(t *testing.T) {
// Unreserved characters are not encoded
require.Equal(t, "abcdefghijklmnopqrstuvwxyz", uriEncode("abcdefghijklmnopqrstuvwxyz"))
require.Equal(t, "ABCDEFGHIJKLMNOPQRSTUVWXYZ", uriEncode("ABCDEFGHIJKLMNOPQRSTUVWXYZ"))
require.Equal(t, "0123456789", uriEncode("0123456789"))
require.Equal(t, "-_.~", uriEncode("-_.~"))
// Spaces use %20, not +
require.Equal(t, "hello%20world", uriEncode("hello world"))
// Slashes are encoded (canonicalURI handles slash splitting separately)
require.Equal(t, "a%2Fb", uriEncode("a/b"))
// Special characters
require.Equal(t, "%2B", uriEncode("+"))
require.Equal(t, "%3D", uriEncode("="))
require.Equal(t, "%26", uriEncode("&"))
require.Equal(t, "%40", uriEncode("@"))
require.Equal(t, "%23", uriEncode("#"))
// Mixed
require.Equal(t, "test~file-name_1.txt", uriEncode("test~file-name_1.txt"))
require.Equal(t, "key%20with%20spaces%2Fand%2Fslashes", uriEncode("key with spaces/and/slashes"))
// Empty string
require.Equal(t, "", uriEncode(""))
}
func TestCanonicalURI(t *testing.T) {
// Simple path
u, _ := url.Parse("https://example.com/bucket/key")
require.Equal(t, "/bucket/key", canonicalURI(u))
// Root path
u, _ = url.Parse("https://example.com/")
require.Equal(t, "/", canonicalURI(u))
// Empty path
u, _ = url.Parse("https://example.com")
require.Equal(t, "/", canonicalURI(u))
// Path with special characters
u, _ = url.Parse("https://example.com/bucket/key%20with%20spaces")
require.Equal(t, "/bucket/key%20with%20spaces", canonicalURI(u))
// Nested path
u, _ = url.Parse("https://example.com/bucket/a/b/c/file.txt")
require.Equal(t, "/bucket/a/b/c/file.txt", canonicalURI(u))
}
func TestCanonicalQueryString(t *testing.T) {
// Multiple keys sorted alphabetically
vals := url.Values{
"prefix": {"test/"},
"list-type": {"2"},
}
require.Equal(t, "list-type=2&prefix=test%2F", canonicalQueryString(vals))
// Empty values
require.Equal(t, "", canonicalQueryString(url.Values{}))
// Single key
require.Equal(t, "key=value", canonicalQueryString(url.Values{"key": {"value"}}))
// Key with multiple values (sorted)
vals = url.Values{"key": {"b", "a"}}
require.Equal(t, "key=a&key=b", canonicalQueryString(vals))
// Keys requiring encoding
vals = url.Values{"continuation-token": {"abc+def"}}
require.Equal(t, "continuation-token=abc%2Bdef", canonicalQueryString(vals))
}
func TestSHA256Hex(t *testing.T) {
// SHA-256 of empty string
require.Equal(t, emptyPayloadHash, sha256Hex([]byte("")))
// SHA-256 of known value
require.Equal(t, "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824", sha256Hex([]byte("hello")))
}
func TestHmacSHA256(t *testing.T) {
// Known test vector: HMAC-SHA256("key", "message")
result := hmacSHA256([]byte("key"), []byte("message"))
require.Len(t, result, 32) // SHA-256 produces 32 bytes
require.NotEqual(t, make([]byte, 32), result)
// Same inputs should produce same output
result2 := hmacSHA256([]byte("key"), []byte("message"))
require.Equal(t, result, result2)
// Different inputs should produce different output
result3 := hmacSHA256([]byte("different-key"), []byte("message"))
require.NotEqual(t, result, result3)
}
func TestSignV4_SetsRequiredHeaders(t *testing.T) {
c := &Client{config: &Config{
AccessKey: "AKID",
SecretKey: "SECRET",
Region: "us-east-1",
Endpoint: "s3.us-east-1.amazonaws.com",
Bucket: "my-bucket",
}}
req, _ := http.NewRequest(http.MethodGet, "https://my-bucket.s3.us-east-1.amazonaws.com/test-key", nil)
c.signV4(req, emptyPayloadHash)
// All required SigV4 headers must be set
require.NotEmpty(t, req.Header.Get("Host"))
require.NotEmpty(t, req.Header.Get("X-Amz-Date"))
require.Equal(t, emptyPayloadHash, req.Header.Get("X-Amz-Content-Sha256"))
// Authorization header must have correct format
auth := req.Header.Get("Authorization")
require.Contains(t, auth, "AWS4-HMAC-SHA256")
require.Contains(t, auth, "Credential=AKID/")
require.Contains(t, auth, "/us-east-1/s3/aws4_request")
require.Contains(t, auth, "SignedHeaders=")
require.Contains(t, auth, "Signature=")
}
func TestSignV4_UnsignedPayload(t *testing.T) {
c := &Client{config: &Config{
AccessKey: "AKID",
SecretKey: "SECRET",
Region: "us-east-1",
Endpoint: "s3.us-east-1.amazonaws.com",
Bucket: "my-bucket",
}}
req, _ := http.NewRequest(http.MethodPut, "https://my-bucket.s3.us-east-1.amazonaws.com/test-key", nil)
c.signV4(req, unsignedPayload)
require.Equal(t, unsignedPayload, req.Header.Get("X-Amz-Content-Sha256"))
}
func TestSignV4_DifferentRegions(t *testing.T) {
c1 := &Client{config: &Config{AccessKey: "AKID", SecretKey: "SECRET", Region: "us-east-1", Endpoint: "s3.us-east-1.amazonaws.com", Bucket: "b"}}
c2 := &Client{config: &Config{AccessKey: "AKID", SecretKey: "SECRET", Region: "eu-west-1", Endpoint: "s3.eu-west-1.amazonaws.com", Bucket: "b"}}
req1, _ := http.NewRequest(http.MethodGet, "https://b.s3.us-east-1.amazonaws.com/key", nil)
c1.signV4(req1, emptyPayloadHash)
req2, _ := http.NewRequest(http.MethodGet, "https://b.s3.eu-west-1.amazonaws.com/key", nil)
c2.signV4(req2, emptyPayloadHash)
// Different regions should produce different signatures
require.NotEqual(t, req1.Header.Get("Authorization"), req2.Header.Get("Authorization"))
}
func TestParseError_XMLResponse(t *testing.T) {
xmlBody := []byte(`<?xml version="1.0" encoding="UTF-8"?><Error><Code>NoSuchKey</Code><Message>The specified key does not exist.</Message></Error>`)
err := parseErrorFromBytes(404, xmlBody)
var errResp *errorResponse
require.ErrorAs(t, err, &errResp)
require.Equal(t, 404, errResp.StatusCode)
require.Equal(t, "NoSuchKey", errResp.Code)
require.Equal(t, "The specified key does not exist.", errResp.Message)
}
func TestParseError_NonXMLResponse(t *testing.T) {
err := parseErrorFromBytes(500, []byte("internal server error"))
var errResp *errorResponse
require.ErrorAs(t, err, &errResp)
require.Equal(t, 500, errResp.StatusCode)
require.Equal(t, "", errResp.Code) // XML parsing failed, no code
require.Contains(t, errResp.Body, "internal server error")
}

View File

@@ -142,6 +142,7 @@ var (
errHTTPBadRequestTemplateFileNotFound = &errHTTP{40047, http.StatusBadRequest, "invalid request: template file not found", "https://ntfy.sh/docs/publish/#message-templating", nil}
errHTTPBadRequestTemplateFileInvalid = &errHTTP{40048, http.StatusBadRequest, "invalid request: template file invalid", "https://ntfy.sh/docs/publish/#message-templating", nil}
errHTTPBadRequestSequenceIDInvalid = &errHTTP{40049, http.StatusBadRequest, "invalid request: sequence ID invalid", "https://ntfy.sh/docs/publish/#updating-deleting-notifications", nil}
errHTTPBadRequestEmailAddressInvalid = &errHTTP{40050, http.StatusBadRequest, "invalid request: invalid e-mail address", "https://ntfy.sh/docs/publish/#e-mail-notifications", nil}
errHTTPNotFound = &errHTTP{40401, http.StatusNotFound, "page not found", "", nil}
errHTTPUnauthorized = &errHTTP{40101, http.StatusUnauthorized, "unauthorized", "https://ntfy.sh/docs/publish/#authentication", nil}
errHTTPForbidden = &errHTTP{40301, http.StatusForbidden, "forbidden", "https://ntfy.sh/docs/publish/#authentication", nil}

View File

@@ -1,128 +0,0 @@
package server
import (
"errors"
"fmt"
"heckel.io/ntfy/v2/log"
"heckel.io/ntfy/v2/model"
"heckel.io/ntfy/v2/util"
"io"
"os"
"path/filepath"
"regexp"
"sync"
)
var (
fileIDRegex = regexp.MustCompile(fmt.Sprintf(`^[-_A-Za-z0-9]{%d}$`, model.MessageIDLength))
errInvalidFileID = errors.New("invalid file ID")
errFileExists = errors.New("file exists")
)
type fileCache struct {
dir string
totalSizeCurrent int64
totalSizeLimit int64
mu sync.Mutex
}
func newFileCache(dir string, totalSizeLimit int64) (*fileCache, error) {
if err := os.MkdirAll(dir, 0700); err != nil {
return nil, err
}
size, err := dirSize(dir)
if err != nil {
return nil, err
}
return &fileCache{
dir: dir,
totalSizeCurrent: size,
totalSizeLimit: totalSizeLimit,
}, nil
}
func (c *fileCache) Write(id string, in io.Reader, limiters ...util.Limiter) (int64, error) {
if !fileIDRegex.MatchString(id) {
return 0, errInvalidFileID
}
log.Tag(tagFileCache).Field("message_id", id).Debug("Writing attachment")
file := filepath.Join(c.dir, id)
if _, err := os.Stat(file); err == nil {
return 0, errFileExists
}
f, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600)
if err != nil {
return 0, err
}
defer f.Close()
limiters = append(limiters, util.NewFixedLimiter(c.Remaining()))
limitWriter := util.NewLimitWriter(f, limiters...)
size, err := io.Copy(limitWriter, in)
if err != nil {
os.Remove(file)
return 0, err
}
if err := f.Close(); err != nil {
os.Remove(file)
return 0, err
}
c.mu.Lock()
c.totalSizeCurrent += size
mset(metricAttachmentsTotalSize, c.totalSizeCurrent)
c.mu.Unlock()
return size, nil
}
func (c *fileCache) Remove(ids ...string) error {
for _, id := range ids {
if !fileIDRegex.MatchString(id) {
return errInvalidFileID
}
log.Tag(tagFileCache).Field("message_id", id).Debug("Deleting attachment")
file := filepath.Join(c.dir, id)
if err := os.Remove(file); err != nil {
log.Tag(tagFileCache).Field("message_id", id).Err(err).Debug("Error deleting attachment")
}
}
size, err := dirSize(c.dir)
if err != nil {
return err
}
c.mu.Lock()
c.totalSizeCurrent = size
c.mu.Unlock()
mset(metricAttachmentsTotalSize, size)
return nil
}
func (c *fileCache) Size() int64 {
c.mu.Lock()
defer c.mu.Unlock()
return c.totalSizeCurrent
}
func (c *fileCache) Remaining() int64 {
c.mu.Lock()
defer c.mu.Unlock()
remaining := c.totalSizeLimit - c.totalSizeCurrent
if remaining < 0 {
return 0
}
return remaining
}
func dirSize(dir string) (int64, error) {
entries, err := os.ReadDir(dir)
if err != nil {
return 0, err
}
var size int64
for _, e := range entries {
info, err := e.Info()
if err != nil {
return 0, err
}
size += info.Size()
}
return size, nil
}

View File

@@ -1,76 +0,0 @@
package server
import (
"bytes"
"fmt"
"github.com/stretchr/testify/require"
"heckel.io/ntfy/v2/util"
"os"
"strings"
"testing"
)
var (
oneKilobyteArray = make([]byte, 1024)
)
func TestFileCache_Write_Success(t *testing.T) {
dir, c := newTestFileCache(t)
size, err := c.Write("abcdefghijkl", strings.NewReader("normal file"), util.NewFixedLimiter(999))
require.Nil(t, err)
require.Equal(t, int64(11), size)
require.Equal(t, "normal file", readFile(t, dir+"/abcdefghijkl"))
require.Equal(t, int64(11), c.Size())
require.Equal(t, int64(10229), c.Remaining())
}
func TestFileCache_Write_Remove_Success(t *testing.T) {
dir, c := newTestFileCache(t) // max = 10k (10240), each = 1k (1024)
for i := 0; i < 10; i++ { // 10x999 = 9990
size, err := c.Write(fmt.Sprintf("abcdefghijk%d", i), bytes.NewReader(make([]byte, 999)))
require.Nil(t, err)
require.Equal(t, int64(999), size)
}
require.Equal(t, int64(9990), c.Size())
require.Equal(t, int64(250), c.Remaining())
require.FileExists(t, dir+"/abcdefghijk1")
require.FileExists(t, dir+"/abcdefghijk5")
require.Nil(t, c.Remove("abcdefghijk1", "abcdefghijk5"))
require.NoFileExists(t, dir+"/abcdefghijk1")
require.NoFileExists(t, dir+"/abcdefghijk5")
require.Equal(t, int64(7992), c.Size())
require.Equal(t, int64(2248), c.Remaining())
}
func TestFileCache_Write_FailedTotalSizeLimit(t *testing.T) {
dir, c := newTestFileCache(t)
for i := 0; i < 10; i++ {
size, err := c.Write(fmt.Sprintf("abcdefghijk%d", i), bytes.NewReader(oneKilobyteArray))
require.Nil(t, err)
require.Equal(t, int64(1024), size)
}
_, err := c.Write("abcdefghijkX", bytes.NewReader(oneKilobyteArray))
require.Equal(t, util.ErrLimitReached, err)
require.NoFileExists(t, dir+"/abcdefghijkX")
}
func TestFileCache_Write_FailedAdditionalLimiter(t *testing.T) {
dir, c := newTestFileCache(t)
_, err := c.Write("abcdefghijkl", bytes.NewReader(make([]byte, 1001)), util.NewFixedLimiter(1000))
require.Equal(t, util.ErrLimitReached, err)
require.NoFileExists(t, dir+"/abcdefghijkl")
}
func newTestFileCache(t *testing.T) (dir string, cache *fileCache) {
dir = t.TempDir()
cache, err := newFileCache(dir, 10*1024)
require.Nil(t, err)
return dir, cache
}
func readFile(t *testing.T, f string) string {
b, err := os.ReadFile(f)
require.Nil(t, err)
return string(b)
}

View File

@@ -24,7 +24,6 @@ const (
tagSMTP = "smtp" // Receive email
tagEmail = "email" // Send email
tagTwilio = "twilio"
tagFileCache = "file_cache"
tagMessageCache = "message_cache"
tagStripe = "stripe"
tagAccount = "account"
@@ -36,7 +35,7 @@ const (
)
var (
normalErrorCodes = []int{http.StatusNotFound, http.StatusBadRequest, http.StatusTooManyRequests, http.StatusUnauthorized, http.StatusForbidden, http.StatusInsufficientStorage}
normalErrorCodes = []int{http.StatusNotFound, http.StatusBadRequest, http.StatusTooManyRequests, http.StatusUnauthorized, http.StatusForbidden, http.StatusInsufficientStorage, http.StatusRequestEntityTooLarge}
rateLimitingErrorCodes = []int{http.StatusTooManyRequests, http.StatusRequestEntityTooLarge}
)

View File

@@ -32,6 +32,7 @@ import (
"github.com/prometheus/client_golang/prometheus/promhttp"
"golang.org/x/sync/errgroup"
"gopkg.in/yaml.v2"
"heckel.io/ntfy/v2/attachment"
"heckel.io/ntfy/v2/db"
"heckel.io/ntfy/v2/db/pg"
"heckel.io/ntfy/v2/log"
@@ -64,7 +65,7 @@ type Server struct {
userManager *user.Manager // Might be nil!
messageCache *message.Cache // Database that stores the messages
webPush *webpush.Store // Database that stores web push subscriptions
fileCache *fileCache // File system based cache that stores attachments
attachment *attachment.Store // Attachment store (file system or S3)
stripe stripeAPI // Stripe API, can be replaced with a mock
priceCache *util.LookupCache[map[string]int64] // Stripe price ID -> price as cents (USD implied!)
metricsHandler http.Handler // Handles /metrics if enable-metrics set, and listen-metrics-http not set
@@ -122,6 +123,7 @@ var (
fileRegex = regexp.MustCompile(`^/file/([-_A-Za-z0-9]{1,64})(?:\.[A-Za-z0-9]{1,16})?$`)
urlRegex = regexp.MustCompile(`^https?://`)
phoneNumberRegex = regexp.MustCompile(`^\+\d{1,100}$`)
emailAddressRegex = regexp.MustCompile(`^[^\s,;]+@[^\s,;]+$`)
//go:embed site
webFs embed.FS
@@ -227,12 +229,9 @@ func New(conf *Config) (*Server, error) {
if err != nil {
return nil, err
}
var fileCache *fileCache
if conf.AttachmentCacheDir != "" {
fileCache, err = newFileCache(conf.AttachmentCacheDir, conf.AttachmentTotalSizeLimit)
if err != nil {
return nil, err
}
attachmentStore, err := createAttachmentStore(conf, messageCache)
if err != nil {
return nil, err
}
var userManager *user.Manager
if conf.AuthFile != "" || pool != nil {
@@ -276,7 +275,7 @@ func New(conf *Config) (*Server, error) {
db: pool,
messageCache: messageCache,
webPush: wp,
fileCache: fileCache,
attachment: attachmentStore,
firebaseClient: firebaseClient,
smtpSender: mailer,
topics: topics,
@@ -301,6 +300,15 @@ func createMessageCache(conf *Config, pool *db.DB) (*message.Cache, error) {
return message.NewMemStore()
}
func createAttachmentStore(conf *Config, messageCache *message.Cache) (*attachment.Store, error) {
if strings.HasPrefix(conf.AttachmentCacheDir, "s3://") {
return attachment.NewS3Store(conf.AttachmentCacheDir, conf.AttachmentTotalSizeLimit, messageCache.AttachmentsWithSizes)
} else if conf.AttachmentCacheDir != "" {
return attachment.NewFileStore(conf.AttachmentCacheDir, conf.AttachmentTotalSizeLimit, messageCache.AttachmentsWithSizes)
}
return nil, nil
}
// Run executes the main server. It listens on HTTP (+ HTTPS, if configured), and starts
// a manager go routine to print stats and prune messages.
func (s *Server) Run() error {
@@ -421,6 +429,9 @@ func (s *Server) Stop() {
if s.smtpServer != nil {
s.smtpServer.Close()
}
if s.attachment != nil {
s.attachment.Close()
}
s.closeDatabases()
close(s.closeChan)
}
@@ -595,7 +606,7 @@ func (s *Server) handleInternal(w http.ResponseWriter, r *http.Request, v *visit
return s.ensureWebEnabled(s.handleStatic)(w, r, v)
} else if r.Method == http.MethodGet && docsRegex.MatchString(r.URL.Path) {
return s.ensureWebEnabled(s.handleDocs)(w, r, v)
} else if (r.Method == http.MethodGet || r.Method == http.MethodHead) && fileRegex.MatchString(r.URL.Path) && s.config.AttachmentCacheDir != "" {
} else if (r.Method == http.MethodGet || r.Method == http.MethodHead) && fileRegex.MatchString(r.URL.Path) && s.attachment != nil {
return s.limitRequests(s.handleFile)(w, r, v)
} else if r.Method == http.MethodOptions {
return s.limitRequests(s.handleOptions)(w, r, v) // Should work even if the web app is not enabled, see #598
@@ -752,7 +763,7 @@ func (s *Server) handleStats(w http.ResponseWriter, _ *http.Request, _ *visitor)
// Before streaming the file to a client, it locates uploader (m.Sender or m.User) in the message cache, so it
// can associate the download bandwidth with the uploader.
func (s *Server) handleFile(w http.ResponseWriter, r *http.Request, v *visitor) error {
if s.config.AttachmentCacheDir == "" {
if s.attachment == nil {
return errHTTPInternalError
}
matches := fileRegex.FindStringSubmatch(r.URL.Path)
@@ -760,16 +771,16 @@ func (s *Server) handleFile(w http.ResponseWriter, r *http.Request, v *visitor)
return errHTTPInternalErrorInvalidPath
}
messageID := matches[1]
file := filepath.Join(s.config.AttachmentCacheDir, messageID)
stat, err := os.Stat(file)
reader, size, err := s.attachment.Read(messageID)
if err != nil {
return errHTTPNotFound.Fields(log.Context{
"message_id": messageID,
"error_context": "filesystem",
"error_context": "attachment_store",
})
}
defer reader.Close()
w.Header().Set("Access-Control-Allow-Origin", s.config.AccessControlAllowOrigin) // CORS, allow cross-origin requests
w.Header().Set("Content-Length", fmt.Sprintf("%d", stat.Size()))
w.Header().Set("Content-Length", fmt.Sprintf("%d", size))
if r.Method == http.MethodHead {
return nil
}
@@ -805,19 +816,14 @@ func (s *Server) handleFile(w http.ResponseWriter, r *http.Request, v *visitor)
} else if m.Sender.IsValid() {
bandwidthVisitor = s.visitor(m.Sender, nil)
}
if !bandwidthVisitor.BandwidthAllowed(stat.Size()) {
if !bandwidthVisitor.BandwidthAllowed(size) {
return errHTTPTooManyRequestsLimitAttachmentBandwidth.With(m)
}
// Actually send file
f, err := os.Open(file)
if err != nil {
return err
}
defer f.Close()
if m.Attachment.Name != "" {
w.Header().Set("Content-Disposition", "attachment; filename="+strconv.Quote(m.Attachment.Name))
}
_, err = io.Copy(util.NewContentTypeWriter(w, r.URL.Path), f)
_, err = io.Copy(util.NewContentTypeWriter(w, r.URL.Path), reader)
return err
}
@@ -926,8 +932,8 @@ func (s *Server) handlePublishInternal(r *http.Request, v *visitor) (*model.Mess
return nil, err
}
// Delete attachment files for deleted scheduled messages
if s.fileCache != nil && len(deletedIDs) > 0 {
if err := s.fileCache.Remove(deletedIDs...); err != nil {
if s.attachment != nil && len(deletedIDs) > 0 {
if err := s.attachment.Remove(deletedIDs...); err != nil {
logvrm(v, r, m).Tag(tagPublish).Err(err).Warn("Error removing attachments for deleted scheduled messages")
}
}
@@ -1033,8 +1039,8 @@ func (s *Server) handleActionMessage(w http.ResponseWriter, r *http.Request, v *
return err
}
// Delete attachment files for deleted scheduled messages
if s.fileCache != nil && len(deletedIDs) > 0 {
if err := s.fileCache.Remove(deletedIDs...); err != nil {
if s.attachment != nil && len(deletedIDs) > 0 {
if err := s.attachment.Remove(deletedIDs...); err != nil {
logvrm(v, r, m).Tag(tagPublish).Err(err).Warn("Error removing attachments for deleted scheduled messages")
}
}
@@ -1163,6 +1169,9 @@ func (s *Server) parsePublishParams(r *http.Request, m *model.Message) (cache bo
m.Icon = icon
}
email = readParam(r, "x-email", "x-e-mail", "email", "e-mail", "mail", "e")
if email != "" && !emailAddressRegex.MatchString(email) {
return false, false, "", "", "", false, "", errHTTPBadRequestEmailAddressInvalid
}
if s.smtpSender == nil && email != "" {
return false, false, "", "", "", false, "", errHTTPBadRequestEmailDisabled
}
@@ -1409,7 +1418,7 @@ func (s *Server) renderTemplate(name, tpl, source string) (string, error) {
}
func (s *Server) handleBodyAsAttachment(r *http.Request, v *visitor, m *model.Message, body *util.PeekedReadCloser) error {
if s.fileCache == nil || s.config.BaseURL == "" || s.config.AttachmentCacheDir == "" {
if s.attachment == nil || s.config.BaseURL == "" {
return errHTTPBadRequestAttachmentsDisallowed.With(m)
}
vinfo, err := v.Info()
@@ -1417,19 +1426,19 @@ func (s *Server) handleBodyAsAttachment(r *http.Request, v *visitor, m *model.Me
return err
}
attachmentExpiry := time.Now().Add(vinfo.Limits.AttachmentExpiryDuration).Unix()
if m.Expires > 0 && attachmentExpiry > m.Expires {
attachmentExpiry = m.Expires // Attachment must never outlive the message
}
if m.Time > attachmentExpiry {
return errHTTPBadRequestAttachmentsExpiryBeforeDelivery.With(m)
}
contentLengthStr := r.Header.Get("Content-Length")
if contentLengthStr != "" { // Early "do-not-trust" check, hard limit see below
contentLength, err := strconv.ParseInt(contentLengthStr, 10, 64)
if err == nil && (contentLength > vinfo.Stats.AttachmentTotalSizeRemaining || contentLength > vinfo.Limits.AttachmentFileSizeLimit) {
return errHTTPEntityTooLargeAttachment.With(m).Fields(log.Context{
"message_content_length": contentLength,
"attachment_total_size_remaining": vinfo.Stats.AttachmentTotalSizeRemaining,
"attachment_file_size_limit": vinfo.Limits.AttachmentFileSizeLimit,
})
}
// Early "do-not-trust" check, hard limit see below
if r.ContentLength > 0 && (r.ContentLength > vinfo.Stats.AttachmentTotalSizeRemaining || r.ContentLength > vinfo.Limits.AttachmentFileSizeLimit) {
return errHTTPEntityTooLargeAttachment.With(m).Fields(log.Context{
"message_content_length": r.ContentLength,
"attachment_total_size_remaining": vinfo.Stats.AttachmentTotalSizeRemaining,
"attachment_file_size_limit": vinfo.Limits.AttachmentFileSizeLimit,
})
}
if m.Attachment == nil {
m.Attachment = &model.Attachment{}
@@ -1449,7 +1458,7 @@ func (s *Server) handleBodyAsAttachment(r *http.Request, v *visitor, m *model.Me
util.NewFixedLimiter(vinfo.Limits.AttachmentFileSizeLimit),
util.NewFixedLimiter(vinfo.Stats.AttachmentTotalSizeRemaining),
}
m.Attachment.Size, err = s.fileCache.Write(m.ID, body, limiters...)
m.Attachment.Size, err = s.attachment.Write(m.ID, body, r.ContentLength, limiters...)
if errors.Is(err, util.ErrLimitReached) {
return errHTTPEntityTooLargeAttachment.With(m)
} else if err != nil {

View File

@@ -153,7 +153,8 @@
# If enabled, clients can attach files to notifications as attachments. Minimum settings to enable attachments
# are "attachment-cache-dir" and "base-url".
#
# - attachment-cache-dir is the cache directory for attached files
# - attachment-cache-dir is the cache directory for attached files, or an S3 URL for object storage
# e.g. /var/cache/ntfy/attachments, or s3://ACCESS_KEY:SECRET_KEY@bucket/prefix?region=us-east-1&endpoint=https://...
# - attachment-total-size-limit is the limit of the on-disk attachment cache directory (total size)
# - attachment-file-size-limit is the per-file attachment size limit (e.g. 300k, 2M, 100M)
# - attachment-expiry-duration is the duration after which uploaded attachments will be deleted (e.g. 3h, 20h)

View File

@@ -3,14 +3,15 @@ package server
import (
"encoding/json"
"errors"
"heckel.io/ntfy/v2/log"
"heckel.io/ntfy/v2/model"
"heckel.io/ntfy/v2/user"
"heckel.io/ntfy/v2/util"
"net/http"
"net/netip"
"strings"
"time"
"heckel.io/ntfy/v2/log"
"heckel.io/ntfy/v2/model"
"heckel.io/ntfy/v2/user"
"heckel.io/ntfy/v2/util"
)
const (
@@ -455,21 +456,8 @@ func (s *Server) handleAccountReservationAdd(w http.ResponseWriter, r *http.Requ
return errHTTPUnauthorized
} else if err := s.userManager.AllowReservation(u.Name, req.Topic); err != nil {
return errHTTPConflictTopicReserved
} else if u.IsUser() {
hasReservation, err := s.userManager.HasReservation(u.Name, req.Topic)
if err != nil {
return err
}
if !hasReservation {
reservations, err := s.userManager.ReservationsCount(u.Name)
if err != nil {
return err
} else if reservations >= u.Tier.ReservationLimit {
return errHTTPTooManyRequestsLimitReservations
}
}
}
// Actually add the reservation
// Actually add the reservation (with limit check inside the transaction to avoid races)
logvr(v, r).
Tag(tagAccount).
Fields(log.Context{
@@ -477,7 +465,14 @@ func (s *Server) handleAccountReservationAdd(w http.ResponseWriter, r *http.Requ
"everyone": everyone.String(),
}).
Debug("Adding topic reservation")
if err := s.userManager.AddReservation(u.Name, req.Topic, everyone); err != nil {
var limit int64
if u.IsUser() && u.Tier != nil {
limit = u.Tier.ReservationLimit
}
if err := s.userManager.AddReservation(u.Name, req.Topic, everyone, limit); err != nil {
if errors.Is(err, user.ErrTooManyReservations) {
return errHTTPTooManyRequestsLimitReservations
}
return err
}
// Kill existing subscribers
@@ -530,22 +525,15 @@ func (s *Server) handleAccountReservationDelete(w http.ResponseWriter, r *http.R
// and marks associated messages for the topics as deleted. This also eventually deletes attachments.
// The process relies on the manager to perform the actual deletions (see runManager).
func (s *Server) maybeRemoveMessagesAndExcessReservations(r *http.Request, v *visitor, u *user.User, reservationsLimit int64) error {
reservations, err := s.userManager.Reservations(u.Name)
removedTopics, err := s.userManager.RemoveExcessReservations(u.Name, reservationsLimit)
if err != nil {
return err
} else if int64(len(reservations)) <= reservationsLimit {
} else if len(removedTopics) == 0 {
logvr(v, r).Tag(tagAccount).Debug("No excess reservations to remove")
return nil
}
topics := make([]string, 0)
for i := int64(len(reservations)) - 1; i >= reservationsLimit; i-- {
topics = append(topics, reservations[i].Topic)
}
logvr(v, r).Tag(tagAccount).Info("Removing excess reservations for topics %s", strings.Join(topics, ", "))
if err := s.userManager.RemoveReservations(u.Name, topics...); err != nil {
return err
}
if err := s.messageCache.ExpireMessages(topics...); err != nil {
logvr(v, r).Tag(tagAccount).Info("Removed excess topic reservations, now removing messages for topics %s", strings.Join(removedTopics, ", "))
if err := s.messageCache.ExpireMessages(removedTopics...); err != nil {
return err
}
go s.pruneMessages()

View File

@@ -503,7 +503,7 @@ func TestAccount_Reservation_AddAdminSuccess(t *testing.T) {
}))
require.Nil(t, s.userManager.AddUser("noadmin1", "pass", user.RoleUser, false))
require.Nil(t, s.userManager.ChangeTier("noadmin1", "pro"))
require.Nil(t, s.userManager.AddReservation("noadmin1", "mytopic", user.PermissionDenyAll))
require.Nil(t, s.userManager.AddReservation("noadmin1", "mytopic", user.PermissionDenyAll, 0))
require.Nil(t, s.userManager.AddUser("noadmin2", "pass", user.RoleUser, false))
require.Nil(t, s.userManager.ChangeTier("noadmin2", "pro"))

View File

@@ -3,7 +3,6 @@ package server
import (
"heckel.io/ntfy/v2/log"
"heckel.io/ntfy/v2/util"
"strings"
)
func (s *Server) execManager() {
@@ -99,6 +98,9 @@ func (s *Server) execManager() {
mset(metricUsers, usersCount)
mset(metricSubscribers, subscribers)
mset(metricTopics, topicsCount)
if s.attachment != nil {
mset(metricAttachmentsTotalSize, s.attachment.Size())
}
}
func (s *Server) pruneVisitors() {
@@ -137,7 +139,7 @@ func (s *Server) pruneTokens() {
}
func (s *Server) pruneAttachments() {
if s.fileCache == nil {
if s.attachment == nil {
return
}
log.
@@ -148,11 +150,11 @@ func (s *Server) pruneAttachments() {
log.Tag(tagManager).Err(err).Warn("Error retrieving expired attachments")
} else if len(ids) > 0 {
if log.Tag(tagManager).IsDebug() {
log.Tag(tagManager).Debug("Deleting attachments %s", strings.Join(ids, ", "))
}
if err := s.fileCache.Remove(ids...); err != nil {
log.Tag(tagManager).Err(err).Warn("Error deleting attachments")
log.Tag(tagManager).Debug("Marking %d expired attachment(s) as deleted", len(ids))
}
// Only mark as deleted in DB. The actual storage files are cleaned up
// by the attachment store's sync() loop, which periodically reconciles
// storage with the database and removes orphaned files.
if err := s.messageCache.MarkAttachmentsDeleted(ids...); err != nil {
log.Tag(tagManager).Err(err).Warn("Error marking attachments deleted")
}
@@ -171,13 +173,11 @@ func (s *Server) pruneMessages() {
if err != nil {
log.Tag(tagManager).Err(err).Warn("Error retrieving expired messages")
} else if len(expiredMessageIDs) > 0 {
if s.fileCache != nil {
if err := s.fileCache.Remove(expiredMessageIDs...); err != nil {
log.Tag(tagManager).Err(err).Warn("Error deleting attachments for expired messages")
}
}
// Only delete DB rows. Attachment storage files are cleaned up by the
// attachment store's sync() loop, which periodically reconciles storage
// with the database and removes orphaned files.
if err := s.messageCache.DeleteMessages(expiredMessageIDs...); err != nil {
log.Tag(tagManager).Err(err).Warn("Error marking attachments deleted")
log.Tag(tagManager).Err(err).Warn("Error deleting expired messages")
}
} else {
log.Tag(tagManager).Debug("No expired messages to delete")

View File

@@ -478,8 +478,8 @@ func TestPayments_Webhook_Subscription_Updated_Downgrade_From_PastDue_To_Active(
}))
require.Nil(t, s.userManager.AddUser("phil", "phil", user.RoleUser, false))
require.Nil(t, s.userManager.ChangeTier("phil", "pro"))
require.Nil(t, s.userManager.AddReservation("phil", "atopic", user.PermissionDenyAll))
require.Nil(t, s.userManager.AddReservation("phil", "ztopic", user.PermissionDenyAll))
require.Nil(t, s.userManager.AddReservation("phil", "atopic", user.PermissionDenyAll, 0))
require.Nil(t, s.userManager.AddReservation("phil", "ztopic", user.PermissionDenyAll, 0))
// Add billing details
u, err := s.userManager.User("phil")
@@ -589,7 +589,7 @@ func TestPayments_Webhook_Subscription_Deleted(t *testing.T) {
}))
require.Nil(t, s.userManager.AddUser("phil", "phil", user.RoleUser, false))
require.Nil(t, s.userManager.ChangeTier("phil", "pro"))
require.Nil(t, s.userManager.AddReservation("phil", "atopic", user.PermissionDenyAll))
require.Nil(t, s.userManager.AddReservation("phil", "atopic", user.PermissionDenyAll, 0))
// Add billing details
u, err := s.userManager.User("phil")

View File

@@ -1543,6 +1543,30 @@ func TestServer_PublishEmailNoMailer_Fail(t *testing.T) {
})
}
func TestServer_PublishEmailAddressInvalid(t *testing.T) {
forEachBackend(t, func(t *testing.T, databaseURL string) {
s := newTestServer(t, newTestConfig(t, databaseURL))
s.smtpSender = &testMailer{}
addresses := []string{
"test@example.com, other@example.com",
"invalidaddress",
"@nope",
"nope@",
}
for _, email := range addresses {
response := request(t, s, "PUT", "/mytopic", "fail", map[string]string{
"E-Mail": email,
})
require.Equal(t, 400, response.Code, "expected 400 for email: %s", email)
}
// Valid address should succeed
response := request(t, s, "PUT", "/mytopic", "success", map[string]string{
"E-Mail": "test@example.com",
})
require.Equal(t, 200, response.Code)
})
}
func TestServer_PublishAndExpungeTopicAfter16Hours(t *testing.T) {
forEachBackend(t, func(t *testing.T, databaseURL string) {
t.Parallel()
@@ -2121,7 +2145,7 @@ func TestServer_PublishAttachmentShortWithFilename(t *testing.T) {
require.Equal(t, "myfile.txt", msg.Attachment.Name)
require.Equal(t, "text/plain; charset=utf-8", msg.Attachment.Type)
require.Equal(t, int64(21), msg.Attachment.Size)
require.GreaterOrEqual(t, msg.Attachment.Expires, time.Now().Add(3*time.Hour).Unix())
require.GreaterOrEqual(t, msg.Attachment.Expires, time.Now().Add(3*time.Hour).Unix()-1)
require.Contains(t, msg.Attachment.URL, "http://127.0.0.1:12345/file/")
require.Equal(t, netip.Addr{}, msg.Sender) // Should never be returned
require.FileExists(t, filepath.Join(s.config.AttachmentCacheDir, msg.ID))
@@ -2194,8 +2218,8 @@ func TestServer_PublishAttachmentTooLargeContentLength(t *testing.T) {
forEachBackend(t, func(t *testing.T, databaseURL string) {
content := util.RandomString(5000) // > 4096
s := newTestServer(t, newTestConfig(t, databaseURL))
response := request(t, s, "PUT", "/mytopic", content, map[string]string{
"Content-Length": "20000000",
response := request(t, s, "PUT", "/mytopic", content, nil, func(r *http.Request) {
r.ContentLength = 20000000
})
err := toHTTPError(t, response.Body.String())
require.Equal(t, 413, response.Code)

View File

@@ -235,13 +235,12 @@ func TestServer_WebPush_Publish_RemoveOnError(t *testing.T) {
request(t, s, "POST", "/test-topic", "web push test", nil)
waitFor(t, func() bool {
return received.Load()
})
// Receiving the 410 should've caused the publisher to expire all subscriptions on the endpoint
requireSubscriptionCount(t, s, "test-topic", 0)
waitFor(t, func() bool {
subs, err := s.webPush.SubscriptionsForTopic("test-topic")
require.Nil(t, err)
return len(subs) == 0
})
requireSubscriptionCount(t, s, "test-topic-abc", 0)
})
}

147
tools/s3cli/main.go Normal file
View File

@@ -0,0 +1,147 @@
// Command s3cli is a minimal CLI for testing the s3 package. It supports put, get, rm, and ls.
//
// Usage:
//
// export S3_URL="s3://ACCESS_KEY:SECRET_KEY@BUCKET/PREFIX?region=REGION&endpoint=ENDPOINT"
//
// s3cli put <key> <file> Upload a file
// s3cli put <key> - Upload from stdin
// s3cli get <key> Download to stdout
// s3cli rm <key> [<key>...] Delete one or more objects
// s3cli ls List all objects
package main
import (
"context"
"fmt"
"io"
"os"
"text/tabwriter"
"heckel.io/ntfy/v2/s3"
)
func main() {
if len(os.Args) < 2 {
usage()
}
s3URL := os.Getenv("S3_URL")
if s3URL == "" {
fail("S3_URL environment variable is required")
}
cfg, err := s3.ParseURL(s3URL)
if err != nil {
fail("invalid S3_URL: %s", err)
}
client := s3.New(cfg)
ctx := context.Background()
switch os.Args[1] {
case "put":
cmdPut(ctx, client)
case "get":
cmdGet(ctx, client)
case "rm":
cmdRm(ctx, client)
case "ls":
cmdLs(ctx, client)
default:
usage()
}
}
func cmdPut(ctx context.Context, client *s3.Client) {
if len(os.Args) != 4 {
fail("usage: s3cli put <key> <file|->\n")
}
key := os.Args[2]
path := os.Args[3]
var r io.Reader
var size int64
if path == "-" {
r = os.Stdin
} else {
f, err := os.Open(path)
if err != nil {
fail("open %s: %s", path, err)
}
defer f.Close()
stat, err := f.Stat()
if err != nil {
fail("stat %s: %s", path, err)
}
r = f
size = stat.Size()
}
if err := client.PutObject(ctx, key, r, size); err != nil {
fail("put: %s", err)
}
fmt.Fprintf(os.Stderr, "uploaded %s\n", key)
}
func cmdGet(ctx context.Context, client *s3.Client) {
if len(os.Args) != 3 {
fail("usage: s3cli get <key>\n")
}
key := os.Args[2]
reader, size, err := client.GetObject(ctx, key)
if err != nil {
fail("get: %s", err)
}
defer reader.Close()
n, err := io.Copy(os.Stdout, reader)
if err != nil {
fail("read: %s", err)
}
fmt.Fprintf(os.Stderr, "downloaded %s (%d bytes, content-length: %d)\n", key, n, size)
}
func cmdRm(ctx context.Context, client *s3.Client) {
if len(os.Args) < 3 {
fail("usage: s3cli rm <key> [<key>...]\n")
}
keys := os.Args[2:]
if err := client.DeleteObjects(ctx, keys); err != nil {
fail("rm: %s", err)
}
fmt.Fprintf(os.Stderr, "deleted %d object(s)\n", len(keys))
}
func cmdLs(ctx context.Context, client *s3.Client) {
objects, err := client.ListObjectsV2(ctx)
if err != nil {
fail("ls: %s", err)
}
w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
var totalSize int64
for _, obj := range objects {
fmt.Fprintf(w, "%d\t%s\n", obj.Size, obj.Key)
totalSize += obj.Size
}
w.Flush()
fmt.Fprintf(os.Stderr, "%d object(s), %d bytes total\n", len(objects), totalSize)
}
func usage() {
fmt.Fprintf(os.Stderr, `Usage: s3cli <command> [args...]
Commands:
put <key> <file|-> Upload a file (use - for stdin)
get <key> Download to stdout
rm <key> [keys...] Delete objects
ls List all objects
Environment:
S3_URL S3 connection URL (required)
s3://ACCESS_KEY:SECRET_KEY@BUCKET[/PREFIX]?region=REGION[&endpoint=ENDPOINT]
`)
os.Exit(1)
}
func fail(format string, args ...any) {
fmt.Fprintf(os.Stderr, format+"\n", args...)
os.Exit(1)
}

View File

@@ -288,33 +288,41 @@ func (a *Manager) ChangeTier(username, tier string) error {
t, err := a.Tier(tier)
if err != nil {
return err
} else if err := a.checkReservationsLimit(username, t.ReservationLimit); err != nil {
return err
}
if _, err := a.db.Exec(a.queries.updateUserTier, tier, username); err != nil {
return err
}
return nil
return db.ExecTx(a.db, func(tx *sql.Tx) error {
if err := a.checkReservationsLimitTx(tx, username, t.ReservationLimit); err != nil {
return err
}
if _, err := tx.Exec(a.queries.updateUserTier, tier, username); err != nil {
return err
}
return nil
})
}
// ResetTier removes the tier from the given user
func (a *Manager) ResetTier(username string) error {
if !AllowedUsername(username) && username != Everyone && username != "" {
return ErrInvalidArgument
} else if err := a.checkReservationsLimit(username, 0); err != nil {
return err
}
_, err := a.db.Exec(a.queries.deleteUserTier, username)
return err
return db.ExecTx(a.db, func(tx *sql.Tx) error {
if err := a.checkReservationsLimitTx(tx, username, 0); err != nil {
return err
}
if _, err := tx.Exec(a.queries.deleteUserTier, username); err != nil {
return err
}
return nil
})
}
func (a *Manager) checkReservationsLimit(username string, reservationsLimit int64) error {
u, err := a.User(username)
func (a *Manager) checkReservationsLimitTx(tx *sql.Tx, username string, reservationsLimit int64) error {
u, err := a.userTx(tx, username)
if err != nil {
return err
}
if u.Tier != nil && reservationsLimit < u.Tier.ReservationLimit {
reservations, err := a.Reservations(username)
reservations, err := a.reservationsTx(tx, username)
if err != nil {
return err
} else if int64(len(reservations)) > reservationsLimit {
@@ -388,7 +396,11 @@ func (a *Manager) writeUserStatsQueue() error {
// User returns the user with the given username if it exists, or ErrUserNotFound otherwise
func (a *Manager) User(username string) (*User, error) {
rows, err := a.db.Query(a.queries.selectUserByName, username)
return a.userTx(a.db, username)
}
func (a *Manager) userTx(tx db.Querier, username string) (*User, error) {
rows, err := tx.Query(a.queries.selectUserByName, username)
if err != nil {
return nil, err
}
@@ -415,7 +427,7 @@ func (a *Manager) userByToken(token string) (*User, error) {
// UserByStripeCustomer returns the user with the given Stripe customer ID if it exists, or ErrUserNotFound otherwise
func (a *Manager) UserByStripeCustomer(customerID string) (*User, error) {
rows, err := a.db.ReadOnly().Query(a.queries.selectUserByStripeCustomerID, customerID)
rows, err := a.db.Query(a.queries.selectUserByStripeCustomerID, customerID)
if err != nil {
return nil, err
}
@@ -642,7 +654,7 @@ func (a *Manager) AllowReservation(username string, topic string) error {
// - Furthermore, the query prioritizes more specific permissions (longer!) over more generic ones, e.g. "test*" > "*"
// - It also prioritizes write permissions over read permissions
func (a *Manager) authorizeTopicAccess(usernameOrEveryone, topic string) (read, write, found bool, err error) {
rows, err := a.db.Query(a.queries.selectTopicPerms, Everyone, usernameOrEveryone, topic)
rows, err := a.db.ReadOnly().Query(a.queries.selectTopicPerms, Everyone, usernameOrEveryone, topic)
if err != nil {
return false, false, false, err
}
@@ -713,16 +725,35 @@ func (a *Manager) Grants(username string) ([]Grant, error) {
// AddReservation creates two access control entries for the given topic: one with full read/write
// access for the given user, and one for Everyone with the given permission. Both entries are
// created atomically in a single transaction.
func (a *Manager) AddReservation(username string, topic string, everyone Permission) error {
// created atomically in a single transaction. If limit is > 0, the reservation count is checked
// inside the transaction and ErrTooManyReservations is returned if the limit would be exceeded.
func (a *Manager) AddReservation(username string, topic string, everyone Permission, limit int64) error {
if !AllowedUsername(username) || username == Everyone || !AllowedTopic(topic) {
return ErrInvalidArgument
}
return db.ExecTx(a.db, func(tx *sql.Tx) error {
if err := a.addReservationAccessTx(tx, username, topic, true, true, username); err != nil {
if limit > 0 {
hasReservation, err := a.hasReservationTx(tx, username, topic)
if err != nil {
return err
}
if !hasReservation {
count, err := a.reservationsCountTx(tx, username)
if err != nil {
return err
}
if count >= limit {
return ErrTooManyReservations
}
}
}
if _, err := tx.Exec(a.queries.upsertUserAccess, username, toSQLWildcard(topic), true, true, username, username, false); err != nil {
return err
}
return a.addReservationAccessTx(tx, Everyone, topic, everyone.IsRead(), everyone.IsWrite(), username)
if _, err := tx.Exec(a.queries.upsertUserAccess, Everyone, toSQLWildcard(topic), everyone.IsRead(), everyone.IsWrite(), username, username, false); err != nil {
return err
}
return nil
})
}
@@ -740,10 +771,7 @@ func (a *Manager) RemoveReservations(username string, topics ...string) error {
}
return db.ExecTx(a.db, func(tx *sql.Tx) error {
for _, topic := range topics {
if err := a.resetTopicAccessTx(tx, username, topic); err != nil {
return err
}
if err := a.resetTopicAccessTx(tx, Everyone, topic); err != nil {
if err := a.removeReservationAccessTx(tx, username, topic); err != nil {
return err
}
}
@@ -753,7 +781,11 @@ func (a *Manager) RemoveReservations(username string, topics ...string) error {
// Reservations returns all user-owned topics, and the associated everyone-access
func (a *Manager) Reservations(username string) ([]Reservation, error) {
rows, err := a.db.ReadOnly().Query(a.queries.selectUserReservations, Everyone, username)
return a.reservationsTx(a.db.ReadOnly(), username)
}
func (a *Manager) reservationsTx(tx db.Querier, username string) ([]Reservation, error) {
rows, err := tx.Query(a.queries.selectUserReservations, Everyone, username)
if err != nil {
return nil, err
}
@@ -779,7 +811,11 @@ func (a *Manager) Reservations(username string) ([]Reservation, error) {
// HasReservation returns true if the given topic access is owned by the user
func (a *Manager) HasReservation(username, topic string) (bool, error) {
rows, err := a.db.Query(a.queries.selectUserHasReservation, username, escapeUnderscore(topic))
return a.hasReservationTx(a.db, username, topic)
}
func (a *Manager) hasReservationTx(tx db.Querier, username, topic string) (bool, error) {
rows, err := tx.Query(a.queries.selectUserHasReservation, username, escapeUnderscore(topic))
if err != nil {
return false, err
}
@@ -796,7 +832,11 @@ func (a *Manager) HasReservation(username, topic string) (bool, error) {
// ReservationsCount returns the number of reservations owned by this user
func (a *Manager) ReservationsCount(username string) (int64, error) {
rows, err := a.db.ReadOnly().Query(a.queries.selectUserReservationsCount, username)
return a.reservationsCountTx(a.db, username)
}
func (a *Manager) reservationsCountTx(tx db.Querier, username string) (int64, error) {
rows, err := tx.Query(a.queries.selectUserReservationsCount, username)
if err != nil {
return 0, err
}
@@ -828,6 +868,30 @@ func (a *Manager) ReservationOwner(topic string) (string, error) {
return ownerUserID, nil
}
// RemoveExcessReservations removes reservations that exceed the given limit for the user.
// It returns the list of topics whose reservations were removed. The read and removal are
// performed atomically in a single transaction to avoid issues with stale replica data.
func (a *Manager) RemoveExcessReservations(username string, limit int64) ([]string, error) {
return db.QueryTx(a.db, func(tx *sql.Tx) ([]string, error) {
reservations, err := a.reservationsTx(tx, username)
if err != nil {
return nil, err
}
if int64(len(reservations)) <= limit {
return []string{}, nil
}
removedTopics := make([]string, 0)
for i := int64(len(reservations)) - 1; i >= limit; i-- {
topic := reservations[i].Topic
if err := a.removeReservationAccessTx(tx, username, topic); err != nil {
return nil, err
}
removedTopics = append(removedTopics, topic)
}
return removedTopics, nil
})
}
// otherAccessCount returns the number of access entries for the given topic that are not owned by the user
func (a *Manager) otherAccessCount(username, topic string) (int, error) {
rows, err := a.db.Query(a.queries.selectOtherAccessCount, escapeUnderscore(topic), escapeUnderscore(topic), username)
@@ -845,14 +909,11 @@ func (a *Manager) otherAccessCount(username, topic string) (int, error) {
return count, nil
}
func (a *Manager) addReservationAccessTx(tx *sql.Tx, username, topic string, read, write bool, ownerUsername string) error {
if !AllowedUsername(username) && username != Everyone {
return ErrInvalidArgument
} else if !AllowedTopicPattern(topic) {
return ErrInvalidArgument
func (a *Manager) removeReservationAccessTx(tx *sql.Tx, username, topic string) error {
if err := a.resetTopicAccessTx(tx, username, topic); err != nil {
return err
}
_, err := tx.Exec(a.queries.upsertUserAccess, username, toSQLWildcard(topic), read, write, ownerUsername, ownerUsername, false)
return err
return a.resetTopicAccessTx(tx, Everyone, topic)
}
func (a *Manager) resetUserAccessTx(tx *sql.Tx, username string) error {
@@ -1134,7 +1195,7 @@ func (a *Manager) Tiers() ([]*Tier, error) {
// Tier returns a Tier based on the code, or ErrTierNotFound if it does not exist
func (a *Manager) Tier(code string) (*Tier, error) {
rows, err := a.db.ReadOnly().Query(a.queries.selectTierByCode, code)
rows, err := a.db.Query(a.queries.selectTierByCode, code)
if err != nil {
return nil, err
}
@@ -1144,7 +1205,7 @@ func (a *Manager) Tier(code string) (*Tier, error) {
// TierByStripePrice returns a Tier based on the Stripe price ID, or ErrTierNotFound if it does not exist
func (a *Manager) TierByStripePrice(priceID string) (*Tier, error) {
rows, err := a.db.ReadOnly().Query(a.queries.selectTierByPriceID, priceID, priceID)
rows, err := a.db.Query(a.queries.selectTierByPriceID, priceID, priceID)
if err != nil {
return nil, err
}

View File

@@ -226,7 +226,7 @@ func TestManager_MarkUserRemoved_RemoveDeletedUsers(t *testing.T) {
// Create user, add reservations and token
require.Nil(t, a.AddUser("user", "pass", RoleAdmin, false))
require.Nil(t, a.AddReservation("user", "mytopic", PermissionRead))
require.Nil(t, a.AddReservation("user", "mytopic", PermissionRead, 0))
u, err := a.User("user")
require.Nil(t, err)
@@ -439,8 +439,8 @@ func TestManager_Reservations(t *testing.T) {
a := newTestManager(t, newManager, PermissionDenyAll)
require.Nil(t, a.AddUser("phil", "phil", RoleUser, false))
require.Nil(t, a.AddUser("ben", "ben", RoleUser, false))
require.Nil(t, a.AddReservation("ben", "ztopic_", PermissionDenyAll))
require.Nil(t, a.AddReservation("ben", "readme", PermissionRead))
require.Nil(t, a.AddReservation("ben", "ztopic_", PermissionDenyAll, 0))
require.Nil(t, a.AddReservation("ben", "readme", PermissionRead, 0))
require.Nil(t, a.AllowAccess("ben", "something-else", PermissionRead))
reservations, err := a.Reservations("ben")
@@ -523,7 +523,7 @@ func TestManager_ChangeRoleFromTierUserToAdmin(t *testing.T) {
}))
require.Nil(t, a.AddUser("ben", "ben", RoleUser, false))
require.Nil(t, a.ChangeTier("ben", "pro"))
require.Nil(t, a.AddReservation("ben", "mytopic", PermissionDenyAll))
require.Nil(t, a.AddReservation("ben", "mytopic", PermissionDenyAll, 0))
ben, err := a.User("ben")
require.Nil(t, err)
@@ -1076,7 +1076,7 @@ func TestManager_Tier_Change_And_Reset(t *testing.T) {
// Add 10 reservations (pro tier allows that)
for i := 0; i < 4; i++ {
require.Nil(t, a.AddReservation("phil", fmt.Sprintf("topic%d", i), PermissionWrite))
require.Nil(t, a.AddReservation("phil", fmt.Sprintf("topic%d", i), PermissionWrite, 0))
}
// Downgrading will not work (too many reservations)
@@ -2118,7 +2118,7 @@ func TestStoreAuthorizeTopicAccessDenyAll(t *testing.T) {
func TestStoreReservations(t *testing.T) {
forEachStoreBackend(t, func(t *testing.T, manager *Manager) {
require.Nil(t, manager.AddUser("phil", "mypass", RoleUser, false))
require.Nil(t, manager.AddReservation("phil", "mytopic", PermissionRead))
require.Nil(t, manager.AddReservation("phil", "mytopic", PermissionRead, 0))
reservations, err := manager.Reservations("phil")
require.Nil(t, err)
@@ -2133,8 +2133,8 @@ func TestStoreReservations(t *testing.T) {
func TestStoreReservationsCount(t *testing.T) {
forEachStoreBackend(t, func(t *testing.T, manager *Manager) {
require.Nil(t, manager.AddUser("phil", "mypass", RoleUser, false))
require.Nil(t, manager.AddReservation("phil", "topic1", PermissionReadWrite))
require.Nil(t, manager.AddReservation("phil", "topic2", PermissionReadWrite))
require.Nil(t, manager.AddReservation("phil", "topic1", PermissionReadWrite, 0))
require.Nil(t, manager.AddReservation("phil", "topic2", PermissionReadWrite, 0))
count, err := manager.ReservationsCount("phil")
require.Nil(t, err)
@@ -2145,7 +2145,7 @@ func TestStoreReservationsCount(t *testing.T) {
func TestStoreHasReservation(t *testing.T) {
forEachStoreBackend(t, func(t *testing.T, manager *Manager) {
require.Nil(t, manager.AddUser("phil", "mypass", RoleUser, false))
require.Nil(t, manager.AddReservation("phil", "mytopic", PermissionReadWrite))
require.Nil(t, manager.AddReservation("phil", "mytopic", PermissionReadWrite, 0))
has, err := manager.HasReservation("phil", "mytopic")
require.Nil(t, err)
@@ -2160,7 +2160,7 @@ func TestStoreHasReservation(t *testing.T) {
func TestStoreReservationOwner(t *testing.T) {
forEachStoreBackend(t, func(t *testing.T, manager *Manager) {
require.Nil(t, manager.AddUser("phil", "mypass", RoleUser, false))
require.Nil(t, manager.AddReservation("phil", "mytopic", PermissionReadWrite))
require.Nil(t, manager.AddReservation("phil", "mytopic", PermissionReadWrite, 0))
owner, err := manager.ReservationOwner("mytopic")
require.Nil(t, err)
@@ -2172,6 +2172,26 @@ func TestStoreReservationOwner(t *testing.T) {
})
}
func TestStoreAddReservationWithLimit(t *testing.T) {
forEachStoreBackend(t, func(t *testing.T, manager *Manager) {
require.Nil(t, manager.AddUser("phil", "mypass", RoleUser, false))
// Adding reservations within limit succeeds
require.Nil(t, manager.AddReservation("phil", "topic1", PermissionReadWrite, 2))
require.Nil(t, manager.AddReservation("phil", "topic2", PermissionRead, 2))
// Adding a third reservation exceeds the limit
require.Equal(t, ErrTooManyReservations, manager.AddReservation("phil", "topic3", PermissionRead, 2))
// Updating an existing reservation within the limit succeeds
require.Nil(t, manager.AddReservation("phil", "topic1", PermissionRead, 2))
reservations, err := manager.Reservations("phil")
require.Nil(t, err)
require.Len(t, reservations, 2)
})
}
func TestStoreTiers(t *testing.T) {
forEachStoreBackend(t, func(t *testing.T, manager *Manager) {
tier := &Tier{
@@ -2431,7 +2451,7 @@ func TestStoreOtherAccessCount(t *testing.T) {
forEachStoreBackend(t, func(t *testing.T, manager *Manager) {
require.Nil(t, manager.AddUser("phil", "mypass", RoleUser, false))
require.Nil(t, manager.AddUser("ben", "benpass", RoleUser, false))
require.Nil(t, manager.AddReservation("ben", "mytopic", PermissionReadWrite))
require.Nil(t, manager.AddReservation("ben", "mytopic", PermissionReadWrite, 0))
count, err := manager.otherAccessCount("phil", "mytopic")
require.Nil(t, err)

View File

@@ -152,6 +152,61 @@ func (l *RateLimiter) Reset() {
l.value = 0
}
// CountingReader wraps an io.Reader and counts the number of bytes read through it.
type CountingReader struct {
r io.Reader
total int64
}
// NewCountingReader creates a new CountingReader
func NewCountingReader(r io.Reader) *CountingReader {
return &CountingReader{r: r}
}
// Read passes through to the underlying reader and counts the bytes read
func (r *CountingReader) Read(p []byte) (n int, err error) {
n, err = r.r.Read(p)
r.total += int64(n)
return
}
// Total returns the total number of bytes read so far
func (r *CountingReader) Total() int64 {
return r.total
}
// LimitReader implements an io.Reader that will pass through all Read calls to the underlying
// reader r until any of the limiter's limit is reached, at which point a Read will return ErrLimitReached.
// Each limiter's value is increased after every read based on the number of bytes actually read.
type LimitReader struct {
r io.Reader
limiters []Limiter
}
// NewLimitReader creates a new LimitReader
func NewLimitReader(r io.Reader, limiters ...Limiter) *LimitReader {
return &LimitReader{
r: r,
limiters: limiters,
}
}
// Read passes through all reads to the underlying reader until any of the given limiter's limit is reached
func (r *LimitReader) Read(p []byte) (n int, err error) {
n, err = r.r.Read(p)
if n > 0 {
for i := 0; i < len(r.limiters); i++ {
if !r.limiters[i].AllowN(int64(n)) {
for j := i - 1; j >= 0; j-- {
r.limiters[j].AllowN(-int64(n)) // Revert limiters if not allowed
}
return 0, ErrLimitReached
}
}
}
return
}
// LimitWriter implements an io.Writer that will pass through all Write calls to the underlying
// writer w until any of the limiter's limit is reached, at which point a Write will return ErrLimitReached.
// Each limiter's value is increased with every write.

View File

@@ -2,9 +2,12 @@ package util
import (
"bytes"
"github.com/stretchr/testify/require"
"io"
"strings"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestFixedLimiter_AllowValueReset(t *testing.T) {
@@ -147,3 +150,98 @@ func TestLimitWriter_WriteTwoDifferentLimiters_Wait_FixedLimiterFail(t *testing.
_, err = lw.Write(make([]byte, 8)) // <<< FixedLimiter fails
require.Equal(t, ErrLimitReached, err)
}
func TestCountingReader_Total(t *testing.T) {
cr := NewCountingReader(strings.NewReader("hello world"))
buf := make([]byte, 5)
n, err := cr.Read(buf)
require.Nil(t, err)
require.Equal(t, 5, n)
require.Equal(t, int64(5), cr.Total())
n, err = cr.Read(buf)
require.Nil(t, err)
require.Equal(t, 5, n)
require.Equal(t, int64(10), cr.Total())
n, err = cr.Read(buf)
require.Nil(t, err)
require.Equal(t, 1, n)
require.Equal(t, int64(11), cr.Total())
_, err = cr.Read(buf)
require.Equal(t, io.EOF, err)
require.Equal(t, int64(11), cr.Total())
}
func TestCountingReader_Empty(t *testing.T) {
cr := NewCountingReader(strings.NewReader(""))
require.Equal(t, int64(0), cr.Total())
_, err := cr.Read(make([]byte, 10))
require.Equal(t, io.EOF, err)
require.Equal(t, int64(0), cr.Total())
}
func TestLimitReader_ReadNoLimiter(t *testing.T) {
lr := NewLimitReader(strings.NewReader("hello"))
data, err := io.ReadAll(lr)
require.Nil(t, err)
require.Equal(t, "hello", string(data))
}
func TestLimitReader_ReadOneLimiter(t *testing.T) {
l := NewFixedLimiter(10)
lr := NewLimitReader(strings.NewReader("hello world!"), l)
buf := make([]byte, 5)
n, err := lr.Read(buf)
require.Nil(t, err)
require.Equal(t, 5, n)
require.Equal(t, int64(5), l.Value())
n, err = lr.Read(buf)
require.Nil(t, err)
require.Equal(t, 5, n)
require.Equal(t, int64(10), l.Value())
_, err = lr.Read(buf)
require.Equal(t, ErrLimitReached, err)
}
func TestLimitReader_ReadTwoLimiters(t *testing.T) {
l1 := NewFixedLimiter(11)
l2 := NewFixedLimiter(8)
lr := NewLimitReader(strings.NewReader("hello world!"), l1, l2)
buf := make([]byte, 5)
n, err := lr.Read(buf)
require.Nil(t, err)
require.Equal(t, 5, n)
// Second read: l2 (limit 8) should reject 5 more bytes
_, err = lr.Read(buf)
require.Equal(t, ErrLimitReached, err)
// l1 should have been reverted
require.Equal(t, int64(5), l1.Value())
require.Equal(t, int64(5), l2.Value())
}
func TestLimitReader_ReadAll(t *testing.T) {
l := NewFixedLimiter(100)
lr := NewLimitReader(strings.NewReader("hello"), l)
data, err := io.ReadAll(lr)
require.Nil(t, err)
require.Equal(t, "hello", string(data))
require.Equal(t, int64(5), l.Value())
}
func TestLimitReader_ReadExactLimit(t *testing.T) {
l := NewFixedLimiter(5)
lr := NewLimitReader(bytes.NewReader(make([]byte, 5)), l)
data, err := io.ReadAll(lr)
require.Nil(t, err)
require.Equal(t, 5, len(data))
require.Equal(t, int64(5), l.Value())
}

334
web/package-lock.json generated
View File

@@ -194,9 +194,9 @@
}
},
"node_modules/@babel/helper-define-polyfill-provider": {
"version": "0.6.7",
"resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.6.7.tgz",
"integrity": "sha512-6Fqi8MtQ/PweQ9xvux65emkLQ83uB+qAVtfHkC9UodyHMIZdxNI01HjLCLUtybElp2KY2XNE0nOgyP1E1vXw9w==",
"version": "0.6.8",
"resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.6.8.tgz",
"integrity": "sha512-47UwBLPpQi1NoWzLuHNjRoHlYXMwIJoBf7MFou6viC/sIHWYygpvr0B6IAyh5sBdA2nr2LPIRww8lfaUVQINBA==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -381,23 +381,23 @@
}
},
"node_modules/@babel/helpers": {
"version": "7.28.6",
"resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.28.6.tgz",
"integrity": "sha512-xOBvwq86HHdB7WUDTfKfT/Vuxh7gElQ+Sfti2Cy6yIWNW05P8iUslOVcZ4/sKbE+/jQaukQAdz/gf3724kYdqw==",
"version": "7.29.2",
"resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.29.2.tgz",
"integrity": "sha512-HoGuUs4sCZNezVEKdVcwqmZN8GoHirLUcLaYVNBK2J0DadGtdcqgr3BCbvH8+XUo4NGjNl3VOtSjEKNzqfFgKw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/template": "^7.28.6",
"@babel/types": "^7.28.6"
"@babel/types": "^7.29.0"
},
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/parser": {
"version": "7.29.0",
"resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.29.0.tgz",
"integrity": "sha512-IyDgFV5GeDUVX4YdF/3CPULtVGSXXMLh1xVIgdCgxApktqnQV0r7/8Nqthg+8YLGaAtdyIlo2qIdZrbCv4+7ww==",
"version": "7.29.2",
"resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.29.2.tgz",
"integrity": "sha512-4GgRzy/+fsBa72/RZVJmGKPmZu9Byn8o4MoLpmNe1m8ZfYnz5emHLQz3U4gLud6Zwl0RZIcgiLD7Uq7ySFuDLA==",
"license": "MIT",
"dependencies": {
"@babel/types": "^7.29.0"
@@ -1444,9 +1444,9 @@
}
},
"node_modules/@babel/preset-env": {
"version": "7.29.0",
"resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.29.0.tgz",
"integrity": "sha512-fNEdfc0yi16lt6IZo2Qxk3knHVdfMYX33czNb4v8yWhemoBhibCpQK/uYHtSKIiO+p/zd3+8fYVXhQdOVV608w==",
"version": "7.29.2",
"resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.29.2.tgz",
"integrity": "sha512-DYD23veRYGvBFhcTY1iUvJnDNpuqNd/BzBwCvzOTKUnJjKg5kpUBh3/u9585Agdkgj+QuygG7jLfOPWMa2KVNw==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -1544,9 +1544,9 @@
}
},
"node_modules/@babel/runtime": {
"version": "7.28.6",
"resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.28.6.tgz",
"integrity": "sha512-05WQkdpL9COIMz4LjTxGpPNCdlpyimKppYNoJ5Di5EUObifl8t4tuLuUBBZEpoLYOmfvIWrsp9fCl0HoPRVTdA==",
"version": "7.29.2",
"resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.29.2.tgz",
"integrity": "sha512-JiDShH45zKHWyGe4ZNVRrCjBz8Nh9TMmZG1kh4QTK8hCBTWBi8Da+i7s1fJw7/lYpM4ccepSNfqzZ/QvABBi5g==",
"license": "MIT",
"engines": {
"node": ">=6.9.0"
@@ -2738,9 +2738,9 @@
}
},
"node_modules/@rollup/rollup-android-arm-eabi": {
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.59.0.tgz",
"integrity": "sha512-upnNBkA6ZH2VKGcBj9Fyl9IGNPULcjXRlg0LLeaioQWueH30p6IXtJEbKAgvyv+mJaMxSm1l6xwDXYjpEMiLMg==",
"version": "4.60.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.60.0.tgz",
"integrity": "sha512-WOhNW9K8bR3kf4zLxbfg6Pxu2ybOUbB2AjMDHSQx86LIF4rH4Ft7vmMwNt0loO0eonglSNy4cpD3MKXXKQu0/A==",
"cpu": [
"arm"
],
@@ -2752,9 +2752,9 @@
]
},
"node_modules/@rollup/rollup-android-arm64": {
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.59.0.tgz",
"integrity": "sha512-hZ+Zxj3SySm4A/DylsDKZAeVg0mvi++0PYVceVyX7hemkw7OreKdCvW2oQ3T1FMZvCaQXqOTHb8qmBShoqk69Q==",
"version": "4.60.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.60.0.tgz",
"integrity": "sha512-u6JHLll5QKRvjciE78bQXDmqRqNs5M/3GVqZeMwvmjaNODJih/WIrJlFVEihvV0MiYFmd+ZyPr9wxOVbPAG2Iw==",
"cpu": [
"arm64"
],
@@ -2766,9 +2766,9 @@
]
},
"node_modules/@rollup/rollup-darwin-arm64": {
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.59.0.tgz",
"integrity": "sha512-W2Psnbh1J8ZJw0xKAd8zdNgF9HRLkdWwwdWqubSVk0pUuQkoHnv7rx4GiF9rT4t5DIZGAsConRE3AxCdJ4m8rg==",
"version": "4.60.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.60.0.tgz",
"integrity": "sha512-qEF7CsKKzSRc20Ciu2Zw1wRrBz4g56F7r/vRwY430UPp/nt1x21Q/fpJ9N5l47WWvJlkNCPJz3QRVw008fi7yA==",
"cpu": [
"arm64"
],
@@ -2780,9 +2780,9 @@
]
},
"node_modules/@rollup/rollup-darwin-x64": {
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.59.0.tgz",
"integrity": "sha512-ZW2KkwlS4lwTv7ZVsYDiARfFCnSGhzYPdiOU4IM2fDbL+QGlyAbjgSFuqNRbSthybLbIJ915UtZBtmuLrQAT/w==",
"version": "4.60.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.60.0.tgz",
"integrity": "sha512-WADYozJ4QCnXCH4wPB+3FuGmDPoFseVCUrANmA5LWwGmC6FL14BWC7pcq+FstOZv3baGX65tZ378uT6WG8ynTw==",
"cpu": [
"x64"
],
@@ -2794,9 +2794,9 @@
]
},
"node_modules/@rollup/rollup-freebsd-arm64": {
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.59.0.tgz",
"integrity": "sha512-EsKaJ5ytAu9jI3lonzn3BgG8iRBjV4LxZexygcQbpiU0wU0ATxhNVEpXKfUa0pS05gTcSDMKpn3Sx+QB9RlTTA==",
"version": "4.60.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.60.0.tgz",
"integrity": "sha512-6b8wGHJlDrGeSE3aH5mGNHBjA0TTkxdoNHik5EkvPHCt351XnigA4pS7Wsj/Eo9Y8RBU6f35cjN9SYmCFBtzxw==",
"cpu": [
"arm64"
],
@@ -2808,9 +2808,9 @@
]
},
"node_modules/@rollup/rollup-freebsd-x64": {
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.59.0.tgz",
"integrity": "sha512-d3DuZi2KzTMjImrxoHIAODUZYoUUMsuUiY4SRRcJy6NJoZ6iIqWnJu9IScV9jXysyGMVuW+KNzZvBLOcpdl3Vg==",
"version": "4.60.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.60.0.tgz",
"integrity": "sha512-h25Ga0t4jaylMB8M/JKAyrvvfxGRjnPQIR8lnCayyzEjEOx2EJIlIiMbhpWxDRKGKF8jbNH01NnN663dH638mA==",
"cpu": [
"x64"
],
@@ -2822,9 +2822,9 @@
]
},
"node_modules/@rollup/rollup-linux-arm-gnueabihf": {
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.59.0.tgz",
"integrity": "sha512-t4ONHboXi/3E0rT6OZl1pKbl2Vgxf9vJfWgmUoCEVQVxhW6Cw/c8I6hbbu7DAvgp82RKiH7TpLwxnJeKv2pbsw==",
"version": "4.60.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.60.0.tgz",
"integrity": "sha512-RzeBwv0B3qtVBWtcuABtSuCzToo2IEAIQrcyB/b2zMvBWVbjo8bZDjACUpnaafaxhTw2W+imQbP2BD1usasK4g==",
"cpu": [
"arm"
],
@@ -2836,9 +2836,9 @@
]
},
"node_modules/@rollup/rollup-linux-arm-musleabihf": {
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.59.0.tgz",
"integrity": "sha512-CikFT7aYPA2ufMD086cVORBYGHffBo4K8MQ4uPS/ZnY54GKj36i196u8U+aDVT2LX4eSMbyHtyOh7D7Zvk2VvA==",
"version": "4.60.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.60.0.tgz",
"integrity": "sha512-Sf7zusNI2CIU1HLzuu9Tc5YGAHEZs5Lu7N1ssJG4Tkw6e0MEsN7NdjUDDfGNHy2IU+ENyWT+L2obgWiguWibWQ==",
"cpu": [
"arm"
],
@@ -2850,9 +2850,9 @@
]
},
"node_modules/@rollup/rollup-linux-arm64-gnu": {
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.59.0.tgz",
"integrity": "sha512-jYgUGk5aLd1nUb1CtQ8E+t5JhLc9x5WdBKew9ZgAXg7DBk0ZHErLHdXM24rfX+bKrFe+Xp5YuJo54I5HFjGDAA==",
"version": "4.60.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.60.0.tgz",
"integrity": "sha512-DX2x7CMcrJzsE91q7/O02IJQ5/aLkVtYFryqCjduJhUfGKG6yJV8hxaw8pZa93lLEpPTP/ohdN4wFz7yp/ry9A==",
"cpu": [
"arm64"
],
@@ -2864,9 +2864,9 @@
]
},
"node_modules/@rollup/rollup-linux-arm64-musl": {
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.59.0.tgz",
"integrity": "sha512-peZRVEdnFWZ5Bh2KeumKG9ty7aCXzzEsHShOZEFiCQlDEepP1dpUl/SrUNXNg13UmZl+gzVDPsiCwnV1uI0RUA==",
"version": "4.60.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.60.0.tgz",
"integrity": "sha512-09EL+yFVbJZlhcQfShpswwRZ0Rg+z/CsSELFCnPt3iK+iqwGsI4zht3secj5vLEs957QvFFXnzAT0FFPIxSrkQ==",
"cpu": [
"arm64"
],
@@ -2878,9 +2878,9 @@
]
},
"node_modules/@rollup/rollup-linux-loong64-gnu": {
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.59.0.tgz",
"integrity": "sha512-gbUSW/97f7+r4gHy3Jlup8zDG190AuodsWnNiXErp9mT90iCy9NKKU0Xwx5k8VlRAIV2uU9CsMnEFg/xXaOfXg==",
"version": "4.60.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.60.0.tgz",
"integrity": "sha512-i9IcCMPr3EXm8EQg5jnja0Zyc1iFxJjZWlb4wr7U2Wx/GrddOuEafxRdMPRYVaXjgbhvqalp6np07hN1w9kAKw==",
"cpu": [
"loong64"
],
@@ -2892,9 +2892,9 @@
]
},
"node_modules/@rollup/rollup-linux-loong64-musl": {
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.59.0.tgz",
"integrity": "sha512-yTRONe79E+o0FWFijasoTjtzG9EBedFXJMl888NBEDCDV9I2wGbFFfJQQe63OijbFCUZqxpHz1GzpbtSFikJ4Q==",
"version": "4.60.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.60.0.tgz",
"integrity": "sha512-DGzdJK9kyJ+B78MCkWeGnpXJ91tK/iKA6HwHxF4TAlPIY7GXEvMe8hBFRgdrR9Ly4qebR/7gfUs9y2IoaVEyog==",
"cpu": [
"loong64"
],
@@ -2906,9 +2906,9 @@
]
},
"node_modules/@rollup/rollup-linux-ppc64-gnu": {
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.59.0.tgz",
"integrity": "sha512-sw1o3tfyk12k3OEpRddF68a1unZ5VCN7zoTNtSn2KndUE+ea3m3ROOKRCZxEpmT9nsGnogpFP9x6mnLTCaoLkA==",
"version": "4.60.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.60.0.tgz",
"integrity": "sha512-RwpnLsqC8qbS8z1H1AxBA1H6qknR4YpPR9w2XX0vo2Sz10miu57PkNcnHVaZkbqyw/kUWfKMI73jhmfi9BRMUQ==",
"cpu": [
"ppc64"
],
@@ -2920,9 +2920,9 @@
]
},
"node_modules/@rollup/rollup-linux-ppc64-musl": {
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.59.0.tgz",
"integrity": "sha512-+2kLtQ4xT3AiIxkzFVFXfsmlZiG5FXYW7ZyIIvGA7Bdeuh9Z0aN4hVyXS/G1E9bTP/vqszNIN/pUKCk/BTHsKA==",
"version": "4.60.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.60.0.tgz",
"integrity": "sha512-Z8pPf54Ly3aqtdWC3G4rFigZgNvd+qJlOE52fmko3KST9SoGfAdSRCwyoyG05q1HrrAblLbk1/PSIV+80/pxLg==",
"cpu": [
"ppc64"
],
@@ -2934,9 +2934,9 @@
]
},
"node_modules/@rollup/rollup-linux-riscv64-gnu": {
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.59.0.tgz",
"integrity": "sha512-NDYMpsXYJJaj+I7UdwIuHHNxXZ/b/N2hR15NyH3m2qAtb/hHPA4g4SuuvrdxetTdndfj9b1WOmy73kcPRoERUg==",
"version": "4.60.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.60.0.tgz",
"integrity": "sha512-3a3qQustp3COCGvnP4SvrMHnPQ9d1vzCakQVRTliaz8cIp/wULGjiGpbcqrkv0WrHTEp8bQD/B3HBjzujVWLOA==",
"cpu": [
"riscv64"
],
@@ -2948,9 +2948,9 @@
]
},
"node_modules/@rollup/rollup-linux-riscv64-musl": {
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.59.0.tgz",
"integrity": "sha512-nLckB8WOqHIf1bhymk+oHxvM9D3tyPndZH8i8+35p/1YiVoVswPid2yLzgX7ZJP0KQvnkhM4H6QZ5m0LzbyIAg==",
"version": "4.60.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.60.0.tgz",
"integrity": "sha512-pjZDsVH/1VsghMJ2/kAaxt6dL0psT6ZexQVrijczOf+PeP2BUqTHYejk3l6TlPRydggINOeNRhvpLa0AYpCWSQ==",
"cpu": [
"riscv64"
],
@@ -2962,9 +2962,9 @@
]
},
"node_modules/@rollup/rollup-linux-s390x-gnu": {
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.59.0.tgz",
"integrity": "sha512-oF87Ie3uAIvORFBpwnCvUzdeYUqi2wY6jRFWJAy1qus/udHFYIkplYRW+wo+GRUP4sKzYdmE1Y3+rY5Gc4ZO+w==",
"version": "4.60.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.60.0.tgz",
"integrity": "sha512-3ObQs0BhvPgiUVZrN7gqCSvmFuMWvWvsjG5ayJ3Lraqv+2KhOsp+pUbigqbeWqueGIsnn+09HBw27rJ+gYK4VQ==",
"cpu": [
"s390x"
],
@@ -2976,9 +2976,9 @@
]
},
"node_modules/@rollup/rollup-linux-x64-gnu": {
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.59.0.tgz",
"integrity": "sha512-3AHmtQq/ppNuUspKAlvA8HtLybkDflkMuLK4DPo77DfthRb71V84/c4MlWJXixZz4uruIH4uaa07IqoAkG64fg==",
"version": "4.60.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.60.0.tgz",
"integrity": "sha512-EtylprDtQPdS5rXvAayrNDYoJhIz1/vzN2fEubo3yLE7tfAw+948dO0g4M0vkTVFhKojnF+n6C8bDNe+gDRdTg==",
"cpu": [
"x64"
],
@@ -2990,9 +2990,9 @@
]
},
"node_modules/@rollup/rollup-linux-x64-musl": {
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.59.0.tgz",
"integrity": "sha512-2UdiwS/9cTAx7qIUZB/fWtToJwvt0Vbo0zmnYt7ED35KPg13Q0ym1g442THLC7VyI6JfYTP4PiSOWyoMdV2/xg==",
"version": "4.60.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.60.0.tgz",
"integrity": "sha512-k09oiRCi/bHU9UVFqD17r3eJR9bn03TyKraCrlz5ULFJGdJGi7VOmm9jl44vOJvRJ6P7WuBi/s2A97LxxHGIdw==",
"cpu": [
"x64"
],
@@ -3004,9 +3004,9 @@
]
},
"node_modules/@rollup/rollup-openbsd-x64": {
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.59.0.tgz",
"integrity": "sha512-M3bLRAVk6GOwFlPTIxVBSYKUaqfLrn8l0psKinkCFxl4lQvOSz8ZrKDz2gxcBwHFpci0B6rttydI4IpS4IS/jQ==",
"version": "4.60.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.60.0.tgz",
"integrity": "sha512-1o/0/pIhozoSaDJoDcec+IVLbnRtQmHwPV730+AOD29lHEEo4F5BEUB24H0OBdhbBBDwIOSuf7vgg0Ywxdfiiw==",
"cpu": [
"x64"
],
@@ -3018,9 +3018,9 @@
]
},
"node_modules/@rollup/rollup-openharmony-arm64": {
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.59.0.tgz",
"integrity": "sha512-tt9KBJqaqp5i5HUZzoafHZX8b5Q2Fe7UjYERADll83O4fGqJ49O1FsL6LpdzVFQcpwvnyd0i+K/VSwu/o/nWlA==",
"version": "4.60.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.60.0.tgz",
"integrity": "sha512-pESDkos/PDzYwtyzB5p/UoNU/8fJo68vcXM9ZW2V0kjYayj1KaaUfi1NmTUTUpMn4UhU4gTuK8gIaFO4UGuMbA==",
"cpu": [
"arm64"
],
@@ -3032,9 +3032,9 @@
]
},
"node_modules/@rollup/rollup-win32-arm64-msvc": {
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.59.0.tgz",
"integrity": "sha512-V5B6mG7OrGTwnxaNUzZTDTjDS7F75PO1ae6MJYdiMu60sq0CqN5CVeVsbhPxalupvTX8gXVSU9gq+Rx1/hvu6A==",
"version": "4.60.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.60.0.tgz",
"integrity": "sha512-hj1wFStD7B1YBeYmvY+lWXZ7ey73YGPcViMShYikqKT1GtstIKQAtfUI6yrzPjAy/O7pO0VLXGmUVWXQMaYgTQ==",
"cpu": [
"arm64"
],
@@ -3046,9 +3046,9 @@
]
},
"node_modules/@rollup/rollup-win32-ia32-msvc": {
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.59.0.tgz",
"integrity": "sha512-UKFMHPuM9R0iBegwzKF4y0C4J9u8C6MEJgFuXTBerMk7EJ92GFVFYBfOZaSGLu6COf7FxpQNqhNS4c4icUPqxA==",
"version": "4.60.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.60.0.tgz",
"integrity": "sha512-SyaIPFoxmUPlNDq5EHkTbiKzmSEmq/gOYFI/3HHJ8iS/v1mbugVa7dXUzcJGQfoytp9DJFLhHH4U3/eTy2Bq4w==",
"cpu": [
"ia32"
],
@@ -3060,9 +3060,9 @@
]
},
"node_modules/@rollup/rollup-win32-x64-gnu": {
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.59.0.tgz",
"integrity": "sha512-laBkYlSS1n2L8fSo1thDNGrCTQMmxjYY5G0WFWjFFYZkKPjsMBsgJfGf4TLxXrF6RyhI60L8TMOjBMvXiTcxeA==",
"version": "4.60.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.60.0.tgz",
"integrity": "sha512-RdcryEfzZr+lAr5kRm2ucN9aVlCCa2QNq4hXelZxb8GG0NJSazq44Z3PCCc8wISRuCVnGs0lQJVX5Vp6fKA+IA==",
"cpu": [
"x64"
],
@@ -3074,9 +3074,9 @@
]
},
"node_modules/@rollup/rollup-win32-x64-msvc": {
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.59.0.tgz",
"integrity": "sha512-2HRCml6OztYXyJXAvdDXPKcawukWY2GpR5/nxKp4iBgiO3wcoEGkAaqctIbZcNB6KlUQBIqt8VYkNSj2397EfA==",
"version": "4.60.0",
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.60.0.tgz",
"integrity": "sha512-PrsWNQ8BuE00O3Xsx3ALh2Df8fAj9+cvvX9AIA6o4KpATR98c9mud4XtDWVvsEuyia5U4tVSTKygawyJkjm60w==",
"cpu": [
"x64"
],
@@ -3583,14 +3583,14 @@
}
},
"node_modules/babel-plugin-polyfill-corejs2": {
"version": "0.4.16",
"resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.16.tgz",
"integrity": "sha512-xaVwwSfebXf0ooE11BJovZYKhFjIvQo7TsyVpETuIeH2JHv0k/T6Y5j22pPTvqYqmpkxdlPAJlyJ0tfOJAoMxw==",
"version": "0.4.17",
"resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.17.tgz",
"integrity": "sha512-aTyf30K/rqAsNwN76zYrdtx8obu0E4KoUME29B1xj+B3WxgvWkp943vYQ+z8Mv3lw9xHXMHpvSPOBxzAkIa94w==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/compat-data": "^7.28.6",
"@babel/helper-define-polyfill-provider": "^0.6.7",
"@babel/helper-define-polyfill-provider": "^0.6.8",
"semver": "^6.3.1"
},
"peerDependencies": {
@@ -3598,13 +3598,13 @@
}
},
"node_modules/babel-plugin-polyfill-corejs3": {
"version": "0.14.1",
"resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.14.1.tgz",
"integrity": "sha512-ENp89vM9Pw4kv/koBb5N2f9bDZsR0hpf3BdPMOg/pkS3pwO4dzNnQZVXtBbeyAadgm865DmQG2jMMLqmZXvuCw==",
"version": "0.14.2",
"resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.14.2.tgz",
"integrity": "sha512-coWpDLJ410R781Npmn/SIBZEsAetR4xVi0SxLMXPaMO4lSf1MwnkGYMtkFxew0Dn8B3/CpbpYxN0JCgg8mn67g==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/helper-define-polyfill-provider": "^0.6.7",
"@babel/helper-define-polyfill-provider": "^0.6.8",
"core-js-compat": "^3.48.0"
},
"peerDependencies": {
@@ -3612,13 +3612,13 @@
}
},
"node_modules/babel-plugin-polyfill-regenerator": {
"version": "0.6.7",
"resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.6.7.tgz",
"integrity": "sha512-OTYbUlSwXhNgr4g6efMZgsO8//jA61P7ZbRX3iTT53VON8l+WQS8IAUEVo4a4cWknrg2W8Cj4gQhRYNCJ8GkAA==",
"version": "0.6.8",
"resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.6.8.tgz",
"integrity": "sha512-M762rNHfSF1EV3SLtnCJXFoQbbIIz0OyRwnCmV0KPC7qosSfCO0QLTSuJX3ayAebubhE6oYBAYPrBA5ljowaZg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/helper-define-polyfill-provider": "^0.6.7"
"@babel/helper-define-polyfill-provider": "^0.6.8"
},
"peerDependencies": {
"@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0"
@@ -3642,9 +3642,9 @@
"license": "MIT"
},
"node_modules/baseline-browser-mapping": {
"version": "2.10.0",
"resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.10.0.tgz",
"integrity": "sha512-lIyg0szRfYbiy67j9KN8IyeD7q7hcmqnJ1ddWmNt19ItGpNN64mnllmxUNFIOdOm6by97jlL6wfpTTJrmnjWAA==",
"version": "2.10.10",
"resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.10.10.tgz",
"integrity": "sha512-sUoJ3IMxx4AyRqO4MLeHlnGDkyXRoUG0/AI9fjK+vS72ekpV0yWVY7O0BVjmBcRtkNcsAO2QDZ4tdKKGoI6YaQ==",
"dev": true,
"license": "Apache-2.0",
"bin": {
@@ -3766,9 +3766,9 @@
}
},
"node_modules/caniuse-lite": {
"version": "1.0.30001777",
"resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001777.tgz",
"integrity": "sha512-tmN+fJxroPndC74efCdp12j+0rk0RHwV5Jwa1zWaFVyw2ZxAuPeG8ZgWC3Wz7uSjT3qMRQ5XHZ4COgQmsCMJAQ==",
"version": "1.0.30001780",
"resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001780.tgz",
"integrity": "sha512-llngX0E7nQci5BPJDqoZSbuZ5Bcs9F5db7EtgfwBerX9XGtkkiO4NwfDDIRzHTTwcYC8vC7bmeUEPGrKlR/TkQ==",
"dev": true,
"funding": [
{
@@ -3910,9 +3910,9 @@
"license": "MIT"
},
"node_modules/core-js-compat": {
"version": "3.48.0",
"resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.48.0.tgz",
"integrity": "sha512-OM4cAF3D6VtH/WkLtWvyNC56EZVXsZdU3iqaMG2B4WvYrlqU831pc4UtG5yp0sE9z8Y02wVN7PjW5Zf9Gt0f1Q==",
"version": "3.49.0",
"resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.49.0.tgz",
"integrity": "sha512-VQXt1jr9cBz03b331DFDCCP90b3fanciLkgiOoy8SBHy06gNf+vQ1A3WFLqG7I8TipYIKeYK9wxd0tUrvHcOZA==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -3940,9 +3940,9 @@
}
},
"node_modules/cosmiconfig/node_modules/yaml": {
"version": "1.10.2",
"resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz",
"integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==",
"version": "1.10.3",
"resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.3.tgz",
"integrity": "sha512-vIYeF1u3CjlhAFekPPAk2h/Kv4T3mAkMox5OymRiJQB0spDP10LHvt+K7G9Ny6NuuMAb25/6n1qyUjAcGNf/AA==",
"license": "ISC",
"engines": {
"node": ">= 6"
@@ -4203,9 +4203,9 @@
}
},
"node_modules/electron-to-chromium": {
"version": "1.5.307",
"resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.307.tgz",
"integrity": "sha512-5z3uFKBWjiNR44nFcYdkcXjKMbg5KXNdciu7mhTPo9tB7NbqSNP2sSnGR+fqknZSCwKkBN+oxiiajWs4dT6ORg==",
"version": "1.5.321",
"resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.321.tgz",
"integrity": "sha512-L2C7Q279W2D/J4PLZLk7sebOILDSWos7bMsMNN06rK482umHUrh/3lM8G7IlHFOYip2oAg5nha1rCMxr/rs6ZQ==",
"dev": true,
"license": "ISC"
},
@@ -4324,9 +4324,9 @@
}
},
"node_modules/es-iterator-helpers": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/es-iterator-helpers/-/es-iterator-helpers-1.3.0.tgz",
"integrity": "sha512-04cg8iJFDOxWcYlu0GFFWgs7vtaEPCmr5w1nrj9V3z3axu/48HCMwK6VMp45Zh3ZB+xLP1ifbJfrq86+1ypKKQ==",
"version": "1.3.1",
"resolved": "https://registry.npmjs.org/es-iterator-helpers/-/es-iterator-helpers-1.3.1.tgz",
"integrity": "sha512-zWwRvqWiuBPr0muUG/78cW3aHROFCNIQ3zpmYDpwdbnt2m+xlNyRWpHBpa2lJjSBit7BQ+RXA1iwbSmu5yJ/EQ==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -5066,9 +5066,9 @@
}
},
"node_modules/flatted": {
"version": "3.4.1",
"resolved": "https://registry.npmjs.org/flatted/-/flatted-3.4.1.tgz",
"integrity": "sha512-IxfVbRFVlV8V/yRaGzk0UVIcsKKHMSfYw66T/u4nTwlWteQePsxe//LjudR1AMX4tZW3WFCh3Zqa/sjlqpbURQ==",
"version": "3.4.2",
"resolved": "https://registry.npmjs.org/flatted/-/flatted-3.4.2.tgz",
"integrity": "sha512-PjDse7RzhcPkIJwy5t7KPWQSZ9cAbzQXcafsetQoD7sOJRQlGikNbx7yZp2OotDnJyrDcbyRq3Ttb18iYOqkxA==",
"dev": true,
"license": "ISC"
},
@@ -7043,9 +7043,9 @@
}
},
"node_modules/path-scurry/node_modules/lru-cache": {
"version": "11.2.6",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.6.tgz",
"integrity": "sha512-ESL2CrkS/2wTPfuend7Zhkzo2u0daGJ/A2VucJOgQ/C48S/zB8MMeMHSGKYpXhIjbPxfuezITkaBH1wqv00DDQ==",
"version": "11.2.7",
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.7.tgz",
"integrity": "sha512-aY/R+aEsRelme17KGQa/1ZSIpLpNYYrhcrepKTZgE+W3WM16YMCaPwOHLHsmopZHELU0Ojin1lPVxKR0MihncA==",
"dev": true,
"license": "BlueOak-1.0.0",
"engines": {
@@ -7580,9 +7580,9 @@
}
},
"node_modules/rollup": {
"version": "4.59.0",
"resolved": "https://registry.npmjs.org/rollup/-/rollup-4.59.0.tgz",
"integrity": "sha512-2oMpl67a3zCH9H79LeMcbDhXW/UmWG/y2zuqnF2jQq5uq9TbM9TVyXvA4+t+ne2IIkBdrLpAaRQAvo7YI/Yyeg==",
"version": "4.60.0",
"resolved": "https://registry.npmjs.org/rollup/-/rollup-4.60.0.tgz",
"integrity": "sha512-yqjxruMGBQJ2gG4HtjZtAfXArHomazDHoFwFFmZZl0r7Pdo7qCIXKqKHZc8yeoMgzJJ+pO6pEEHa+V7uzWlrAQ==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -7596,31 +7596,31 @@
"npm": ">=8.0.0"
},
"optionalDependencies": {
"@rollup/rollup-android-arm-eabi": "4.59.0",
"@rollup/rollup-android-arm64": "4.59.0",
"@rollup/rollup-darwin-arm64": "4.59.0",
"@rollup/rollup-darwin-x64": "4.59.0",
"@rollup/rollup-freebsd-arm64": "4.59.0",
"@rollup/rollup-freebsd-x64": "4.59.0",
"@rollup/rollup-linux-arm-gnueabihf": "4.59.0",
"@rollup/rollup-linux-arm-musleabihf": "4.59.0",
"@rollup/rollup-linux-arm64-gnu": "4.59.0",
"@rollup/rollup-linux-arm64-musl": "4.59.0",
"@rollup/rollup-linux-loong64-gnu": "4.59.0",
"@rollup/rollup-linux-loong64-musl": "4.59.0",
"@rollup/rollup-linux-ppc64-gnu": "4.59.0",
"@rollup/rollup-linux-ppc64-musl": "4.59.0",
"@rollup/rollup-linux-riscv64-gnu": "4.59.0",
"@rollup/rollup-linux-riscv64-musl": "4.59.0",
"@rollup/rollup-linux-s390x-gnu": "4.59.0",
"@rollup/rollup-linux-x64-gnu": "4.59.0",
"@rollup/rollup-linux-x64-musl": "4.59.0",
"@rollup/rollup-openbsd-x64": "4.59.0",
"@rollup/rollup-openharmony-arm64": "4.59.0",
"@rollup/rollup-win32-arm64-msvc": "4.59.0",
"@rollup/rollup-win32-ia32-msvc": "4.59.0",
"@rollup/rollup-win32-x64-gnu": "4.59.0",
"@rollup/rollup-win32-x64-msvc": "4.59.0",
"@rollup/rollup-android-arm-eabi": "4.60.0",
"@rollup/rollup-android-arm64": "4.60.0",
"@rollup/rollup-darwin-arm64": "4.60.0",
"@rollup/rollup-darwin-x64": "4.60.0",
"@rollup/rollup-freebsd-arm64": "4.60.0",
"@rollup/rollup-freebsd-x64": "4.60.0",
"@rollup/rollup-linux-arm-gnueabihf": "4.60.0",
"@rollup/rollup-linux-arm-musleabihf": "4.60.0",
"@rollup/rollup-linux-arm64-gnu": "4.60.0",
"@rollup/rollup-linux-arm64-musl": "4.60.0",
"@rollup/rollup-linux-loong64-gnu": "4.60.0",
"@rollup/rollup-linux-loong64-musl": "4.60.0",
"@rollup/rollup-linux-ppc64-gnu": "4.60.0",
"@rollup/rollup-linux-ppc64-musl": "4.60.0",
"@rollup/rollup-linux-riscv64-gnu": "4.60.0",
"@rollup/rollup-linux-riscv64-musl": "4.60.0",
"@rollup/rollup-linux-s390x-gnu": "4.60.0",
"@rollup/rollup-linux-x64-gnu": "4.60.0",
"@rollup/rollup-linux-x64-musl": "4.60.0",
"@rollup/rollup-openbsd-x64": "4.60.0",
"@rollup/rollup-openharmony-arm64": "4.60.0",
"@rollup/rollup-win32-arm64-msvc": "4.60.0",
"@rollup/rollup-win32-ia32-msvc": "4.60.0",
"@rollup/rollup-win32-x64-gnu": "4.60.0",
"@rollup/rollup-win32-x64-msvc": "4.60.0",
"fsevents": "~2.3.2"
}
},
@@ -8307,9 +8307,9 @@
}
},
"node_modules/terser": {
"version": "5.46.0",
"resolved": "https://registry.npmjs.org/terser/-/terser-5.46.0.tgz",
"integrity": "sha512-jTwoImyr/QbOWFFso3YoU3ik0jBBDJ6JTOQiy/J2YxVJdZCc+5u7skhNwiOR3FQIygFqVUPHl7qbbxtjW2K3Qg==",
"version": "5.46.1",
"resolved": "https://registry.npmjs.org/terser/-/terser-5.46.1.tgz",
"integrity": "sha512-vzCjQO/rgUuK9sf8VJZvjqiqiHFaZLnOiimmUuOKODxWL8mm/xua7viT7aqX7dgPY60otQjUotzFMmCB4VdmqQ==",
"dev": true,
"license": "BSD-2-Clause",
"dependencies": {
@@ -9514,24 +9514,6 @@
"dev": true,
"license": "ISC"
},
"node_modules/yaml": {
"version": "2.8.2",
"resolved": "https://registry.npmjs.org/yaml/-/yaml-2.8.2.tgz",
"integrity": "sha512-mplynKqc1C2hTVYxd0PU2xQAc22TI1vShAYGksCCfxbn/dFwnHTNi1bvYsBTkhdUNtGIf5xNOg938rrSSYvS9A==",
"dev": true,
"license": "ISC",
"optional": true,
"peer": true,
"bin": {
"yaml": "bin.mjs"
},
"engines": {
"node": ">= 14.6"
},
"funding": {
"url": "https://github.com/sponsors/eemeli"
}
},
"node_modules/yocto-queue": {
"version": "0.1.0",
"resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz",

View File

@@ -64,7 +64,7 @@
"notifications_actions_not_supported": "Cette action n'est pas supportée dans l'application web",
"notifications_actions_http_request_title": "Envoyer une requête HTTP {{method}} à {{url}}",
"publish_dialog_attachment_limits_quota_reached": "quota dépassé, {{remainingBytes}} restants",
"publish_dialog_tags_placeholder": "Liste d'étiquettes séparée par des virgules, par ex. avertissement,backup-srv1",
"publish_dialog_tags_placeholder": "Liste d'étiquettes séparée par des virgules, par ex. avertissement, backup-srv1",
"publish_dialog_priority_label": "Priorité",
"publish_dialog_click_label": "URL du clic",
"publish_dialog_click_placeholder": "URL ouverte lors d'un clic sur la notification",

View File

@@ -63,9 +63,10 @@ func (s *Store) UpsertSubscription(endpoint string, auth, p256dh, userID string,
} else if err != nil {
return err
}
// Insert or update subscription
// Insert or update subscription, and read back the actual ID (which may differ from
// the generated one if another request for the same endpoint raced us and inserted first)
updatedAt, warnedAt := time.Now().Unix(), 0
if _, err := tx.Exec(s.queries.upsertSubscription, subscriptionID, endpoint, auth, p256dh, userID, subscriberIP.String(), updatedAt, warnedAt); err != nil {
if err := tx.QueryRow(s.queries.upsertSubscription, subscriptionID, endpoint, auth, p256dh, userID, subscriberIP.String(), updatedAt, warnedAt).Scan(&subscriptionID); err != nil {
return err
}
// Replace all subscription topics

View File

@@ -53,6 +53,7 @@ const (
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
ON CONFLICT (endpoint)
DO UPDATE SET key_auth = excluded.key_auth, key_p256dh = excluded.key_p256dh, user_id = excluded.user_id, subscriber_ip = excluded.subscriber_ip, updated_at = excluded.updated_at, warned_at = excluded.warned_at
RETURNING id
`
postgresUpdateSubscriptionWarningSentQuery = `UPDATE webpush_subscription SET warned_at = $1 WHERE id = $2`
postgresUpdateSubscriptionUpdatedAtQuery = `UPDATE webpush_subscription SET updated_at = $1 WHERE endpoint = $2`

View File

@@ -56,8 +56,9 @@ const (
sqliteUpsertSubscriptionQuery = `
INSERT INTO subscription (id, endpoint, key_auth, key_p256dh, user_id, subscriber_ip, updated_at, warned_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT (endpoint)
ON CONFLICT (endpoint)
DO UPDATE SET key_auth = excluded.key_auth, key_p256dh = excluded.key_p256dh, user_id = excluded.user_id, subscriber_ip = excluded.subscriber_ip, updated_at = excluded.updated_at, warned_at = excluded.warned_at
RETURNING id
`
sqliteUpdateSubscriptionWarningSentQuery = `UPDATE subscription SET warned_at = ? WHERE id = ?`
sqliteUpdateSubscriptionUpdatedAtQuery = `UPDATE subscription SET updated_at = ? WHERE endpoint = ?`