Compare commits

...

33 Commits

Author SHA1 Message Date
binwiederhier
2235d44726 Update changelog 2026-03-26 13:39:04 -04:00
binwiederhier
27bbb10a31 Bump 2026-03-26 11:13:43 -04:00
binwiederhier
be4134fc3b Try to fix flaky again 2026-03-26 10:49:39 -04:00
binwiederhier
e19ba059b5 Make attachment.Close() synchronous 2026-03-26 09:57:06 -04:00
binwiederhier
11a14d8fe7 Fix CI 2026-03-26 09:36:34 -04:00
binwiederhier
3759ff26b4 "npm ci" 2026-03-26 09:28:05 -04:00
binwiederhier
136b50f926 I'm tired and GitHub Actions hates me. I'll release this tomorrow ... 2026-03-25 21:35:51 -04:00
binwiederhier
2770f65027 Arrrg 2026-03-25 21:12:27 -04:00
binwiederhier
db6f813386 Release notes derp 2026-03-25 21:03:49 -04:00
binwiederhier
15d963cb53 Fix flaky test due to new attachment expiry cap logic 2026-03-25 21:00:58 -04:00
binwiederhier
a2206dba9f Fix races in tests 2026-03-25 20:47:20 -04:00
binwiederhier
d159580ecf Bump 2026-03-25 20:34:48 -04:00
binwiederhier
0de9dc11ad Bump 2026-03-25 20:29:12 -04:00
binwiederhier
f790143b0b Refined wording, review 2026-03-25 17:35:24 -04:00
binwiederhier
42b0254c9b Merge branch 'main' of github.com:binwiederhier/ntfy into ageru/main 2026-03-25 17:26:49 -04:00
Philipp C. Heckel
d183af61fa Merge pull request #1672 from binwiederhier/attachment-fixes
Attachment fixes to address inconsistencies between DB and backend store
2026-03-25 17:26:18 -04:00
binwiederhier
f256a4101b Fix test 2026-03-25 17:20:02 -04:00
binwiederhier
3ff8bacc45 Update changelog 2026-03-25 17:09:44 -04:00
binwiederhier
ca59cfc1e1 Words 2026-03-25 17:01:29 -04:00
binwiederhier
071543efda Fixes 2026-03-25 15:28:23 -04:00
ageru
e22a77d4bb Rename ntfy-server.openrc to ntfy.openrc 2026-03-24 23:38:16 +01:00
ageru
e0362dce36 install.md - Install ntfy server service manually
Rework of steps to install ntfy server service on systemd and OpenRC
2026-03-24 23:36:19 +01:00
binwiederhier
e55d1cee6b Attachment fixes to address inconsistencies between DB and backend store 2026-03-24 17:37:50 -04:00
ageru
676f1ff1cb Merge branch 'binwiederhier:main' into main 2026-03-24 22:37:38 +01:00
binwiederhier
dd760f16f7 Merge branch 'main' of github.com:binwiederhier/ntfy 2026-03-23 13:05:09 -04:00
binwiederhier
d00277107a Release notes 2026-03-23 13:04:47 -04:00
binwiederhier
b95efe8dd3 Tighten message IDs in attachments 2026-03-23 12:54:13 -04:00
binwiederhier
075f2ffa15 Refine sync() to properly update sizes 2026-03-23 12:44:40 -04:00
Philipp C. Heckel
2f6a044c34 Merge pull request #1656 from binwiederhier/attachment-s3
S3 attachment storage
2026-03-22 21:33:07 -04:00
ageru
e8199fa6b5 install.md - Install ntfy server service manually
First draft - Add manual steps to install ntfy server service on systemd and OpenRC
2026-03-16 23:30:21 +01:00
ageru
c29a7bc8cc Merge branch 'binwiederhier:main' into main 2026-03-16 22:35:14 +01:00
ageru
78f0593abe Merge pull request #1 from ageru/add-openrc-init-file-for-ntfy-server
Create Init service file for OpenRC
2026-03-11 22:00:03 +01:00
ageru
aca58f040f Create Init service file for OpenRC
This in an init file for OpenRC systems.

It should be equivalent in features to the current systemd file, with 2 deliberate changes:
- removed "no-log-dates", as the logs are fine as-is
- Lower nofile limit, as it seems largely sufficient for a self-hosted instance. Feel free to increase to 8192 or 10240 if necessary.

Confirmed functional with Gentoo amd64 and ntfy 2.17.0.
2026-03-11 21:56:14 +01:00
25 changed files with 536 additions and 312 deletions

View File

@@ -146,11 +146,13 @@ web-build:
../server/site/config.js
web-deps:
cd web && $(NPM) install
cd web && $(NPM) ci
# Use "npm ci" so that we don't change the package lock file
# If this fails for .svg files, optimize them with svgo
web-deps-update:
cd web && $(NPM) update
cd web && $(NPM) install
web-fmt:
cd web && $(NPM) run format

View File

@@ -4,7 +4,6 @@ import (
"errors"
"fmt"
"io"
"regexp"
"sync"
"time"
@@ -15,58 +14,70 @@ import (
)
const (
tagStore = "attachment_store"
syncInterval = 15 * time.Minute // How often to run the background sync loop
orphanGracePeriod = time.Hour // Don't delete orphaned objects younger than this to avoid races with in-flight uploads
tagStore = "attachment_store"
syncInterval = 15 * time.Minute // How often to run the background sync loop
)
var (
fileIDRegex = regexp.MustCompile(fmt.Sprintf(`^[-_A-Za-z0-9]{%d}$`, model.MessageIDLength))
errInvalidFileID = errors.New("invalid file ID")
)
var errInvalidFileID = errors.New("invalid file ID")
// Store manages attachment storage with shared logic for size tracking, limiting,
// ID validation, and background sync to reconcile storage with the database.
type Store struct {
backend backend
limit int64 // Defined limit of the store in bytes
size int64 // Current size of the store in bytes
sizes map[string]int64 // File ID -> size, for subtracting on Remove
localIDs func() ([]string, error) // Returns file IDs that should exist locally, used for sync()
closeChan chan struct{}
mu sync.RWMutex // Protects size and sizes
backend backend
limit int64 // Defined limit of the store in bytes
size int64 // Current size of the store in bytes
sizes map[string]int64 // File ID -> size, for subtracting on Remove
attachmentsWithSizes func() (map[string]int64, error) // Returns file ID -> size for active attachments
orphanGracePeriod time.Duration // Don't delete orphaned objects younger than this
closeChan chan struct{}
doneChan chan struct{}
mu sync.RWMutex // Protects size and sizes
}
// NewFileStore creates a new file-system backed attachment cache
func NewFileStore(dir string, totalSizeLimit int64, localIDsFn func() ([]string, error)) (*Store, error) {
func NewFileStore(dir string, totalSizeLimit int64, orphanGracePeriod time.Duration, attachmentsWithSizes func() (map[string]int64, error)) (*Store, error) {
b, err := newFileBackend(dir)
if err != nil {
return nil, err
}
return newStore(b, totalSizeLimit, localIDsFn)
return newStore(b, totalSizeLimit, orphanGracePeriod, attachmentsWithSizes)
}
// NewS3Store creates a new S3-backed attachment cache. The s3URL must be in the format:
//
// s3://ACCESS_KEY:SECRET_KEY@BUCKET[/PREFIX]?region=REGION[&endpoint=ENDPOINT]
func NewS3Store(s3URL string, totalSizeLimit int64, localIDs func() ([]string, error)) (*Store, error) {
func NewS3Store(s3URL string, totalSizeLimit int64, orphanGracePeriod time.Duration, attachmentsWithSizes func() (map[string]int64, error)) (*Store, error) {
config, err := s3.ParseURL(s3URL)
if err != nil {
return nil, err
}
return newStore(newS3Backend(s3.New(config)), totalSizeLimit, localIDs)
return newStore(newS3Backend(s3.New(config)), totalSizeLimit, orphanGracePeriod, attachmentsWithSizes)
}
func newStore(backend backend, totalSizeLimit int64, localIDs func() ([]string, error)) (*Store, error) {
func newStore(backend backend, totalSizeLimit int64, orphanGracePeriod time.Duration, attachmentsWithSizes func() (map[string]int64, error)) (*Store, error) {
c := &Store{
backend: backend,
limit: totalSizeLimit,
sizes: make(map[string]int64),
localIDs: localIDs,
closeChan: make(chan struct{}),
backend: backend,
limit: totalSizeLimit,
sizes: make(map[string]int64),
attachmentsWithSizes: attachmentsWithSizes,
orphanGracePeriod: orphanGracePeriod,
closeChan: make(chan struct{}),
doneChan: make(chan struct{}),
}
if localIDs != nil {
// Hydrate sizes from the database immediately so that Size()/Remaining()/Remove()
// are accurate from the start, without waiting for the first sync() call.
if attachmentsWithSizes != nil {
attachments, err := attachmentsWithSizes()
if err != nil {
return nil, fmt.Errorf("attachment store: failed to load existing attachments: %w", err)
}
for id, size := range attachments {
c.sizes[id] = size
c.size += size
}
go c.syncLoop()
} else {
close(c.doneChan)
}
return c, nil
}
@@ -76,7 +87,7 @@ func newStore(backend backend, totalSizeLimit int64, localIDs func() ([]string,
// from the client's Content-Length header; backends may use it to optimize uploads (e.g.
// streaming directly to S3 without buffering).
func (c *Store) Write(id string, reader io.Reader, untrustedLength int64, limiters ...util.Limiter) (int64, error) {
if !fileIDRegex.MatchString(id) {
if !model.ValidMessageID(id) {
return 0, errInvalidFileID
}
log.Tag(tagStore).Field("message_id", id).Debug("Writing attachment")
@@ -97,7 +108,7 @@ func (c *Store) Write(id string, reader io.Reader, untrustedLength int64, limite
// Read retrieves an attachment file by ID
func (c *Store) Read(id string) (io.ReadCloser, int64, error) {
if !fileIDRegex.MatchString(id) {
if !model.ValidMessageID(id) {
return nil, 0, errInvalidFileID
}
return c.backend.Get(id)
@@ -108,7 +119,7 @@ func (c *Store) Read(id string) (io.ReadCloser, int64, error) {
// started and before the first sync) are corrected by the next sync() call.
func (c *Store) Remove(ids ...string) error {
for _, id := range ids {
if !fileIDRegex.MatchString(id) {
if !model.ValidMessageID(id) {
return errInvalidFileID
}
}
@@ -134,20 +145,21 @@ func (c *Store) Remove(ids ...string) error {
return nil
}
// Sync triggers an immediate reconciliation of storage with the database.
func (c *Store) Sync() error {
return c.sync()
}
// sync reconciles the backend storage with the database. It lists all objects,
// deletes orphans (not in the valid ID set and older than 1 hour), and recomputes
// the total size from the remaining objects.
// deletes orphans (not in the valid ID set and older than the grace period), and
// recomputes the total size from the existing attachments in the database.
func (c *Store) sync() error {
if c.localIDs == nil {
if c.attachmentsWithSizes == nil {
return nil
}
localIDs, err := c.localIDs()
attachmentsWithSizes, err := c.attachmentsWithSizes()
if err != nil {
return fmt.Errorf("attachment sync: failed to get valid IDs: %w", err)
}
localIDMap := make(map[string]struct{}, len(localIDs))
for _, id := range localIDs {
localIDMap[id] = struct{}{}
return fmt.Errorf("attachment sync: failed to get existing attachments: %w", err)
}
remoteObjects, err := c.backend.List()
if err != nil {
@@ -155,25 +167,25 @@ func (c *Store) sync() error {
}
// Calculate total cache size and collect orphaned attachments, excluding objects younger
// than the grace period to account for races, and skipping objects with invalid IDs.
cutoff := time.Now().Add(-orphanGracePeriod)
cutoff := time.Now().Add(-c.orphanGracePeriod)
var orphanIDs []string
var count, size int64
var count, totalSize int64
sizes := make(map[string]int64, len(remoteObjects))
for _, obj := range remoteObjects {
if !fileIDRegex.MatchString(obj.ID) {
if !model.ValidMessageID(obj.ID) {
continue
}
if _, ok := localIDMap[obj.ID]; !ok && obj.LastModified.Before(cutoff) {
if _, ok := attachmentsWithSizes[obj.ID]; !ok && obj.LastModified.Before(cutoff) {
orphanIDs = append(orphanIDs, obj.ID)
} else {
count++
size += obj.Size
sizes[obj.ID] = obj.Size
totalSize += attachmentsWithSizes[obj.ID]
sizes[obj.ID] = attachmentsWithSizes[obj.ID]
}
}
log.Tag(tagStore).Debug("Attachment store updated: %d attachment(s), %s", count, util.FormatSizeHuman(size))
log.Tag(tagStore).Debug("Attachment store updated: %d attachment(s), %s", count, util.FormatSizeHuman(totalSize))
c.mu.Lock()
c.size = size
c.size = totalSize
c.sizes = sizes
c.mu.Unlock()
// Delete orphaned attachments
@@ -208,12 +220,14 @@ func (c *Store) Remaining() int64 {
return remaining
}
// Close stops the background sync goroutine
// Close stops the background sync goroutine and waits for it to finish
func (c *Store) Close() {
close(c.closeChan)
<-c.doneChan
}
func (c *Store) syncLoop() {
defer close(c.doneChan)
if err := c.sync(); err != nil {
log.Tag(tagStore).Err(err).Warn("Attachment sync failed")
}

View File

@@ -2,6 +2,7 @@ package attachment
import (
"testing"
"time"
"github.com/stretchr/testify/require"
)
@@ -9,7 +10,7 @@ import (
func newTestFileStore(t *testing.T, totalSizeLimit int64) (dir string, cache *Store) {
t.Helper()
dir = t.TempDir()
cache, err := NewFileStore(dir, totalSizeLimit, nil)
cache, err := NewFileStore(dir, totalSizeLimit, time.Hour, nil)
require.Nil(t, err)
t.Cleanup(func() { cache.Close() })
return dir, cache

View File

@@ -24,7 +24,7 @@ func TestS3Store_WriteWithPrefix(t *testing.T) {
client := s3.New(cfg)
deleteAllObjects(t, client)
backend := newS3Backend(client)
cache, err := newStore(backend, 10*1024, nil)
cache, err := newStore(backend, 10*1024, time.Hour, nil)
require.Nil(t, err)
t.Cleanup(func() {
deleteAllObjects(t, client)
@@ -62,7 +62,7 @@ func newTestRealS3Store(t *testing.T, totalSizeLimit int64) (*Store, *modTimeOve
inner := newS3Backend(client)
wrapper := &modTimeOverrideBackend{backend: inner, modTimes: make(map[string]time.Time)}
deleteAllObjects(t, client)
store, err := newStore(wrapper, totalSizeLimit, nil)
store, err := newStore(wrapper, totalSizeLimit, time.Hour, nil)
require.Nil(t, err)
t.Cleanup(func() {
deleteAllObjects(t, client)

View File

@@ -162,9 +162,9 @@ func TestStore_SyncRecomputesSize(t *testing.T) {
s.mu.Unlock()
require.Equal(t, int64(999), s.Size())
// Set localIDs to include both files so nothing gets deleted
s.localIDs = func() ([]string, error) {
return []string{"abcdefghijk0", "abcdefghijk1"}, nil
// Set attachmentsWithSizes to include both files so nothing gets deleted
s.attachmentsWithSizes = func() (map[string]int64, error) {
return map[string]int64{"abcdefghijk0": 100, "abcdefghijk1": 200}, nil
}
// Sync should recompute size from the backend
@@ -280,8 +280,8 @@ func TestStore_Sync(t *testing.T) {
require.Equal(t, int64(15), s.Size())
// Set the ID provider to only know about file 0 and 2
s.localIDs = func() ([]string, error) {
return []string{"abcdefghijk0", "abcdefghijk2"}, nil
s.attachmentsWithSizes = func() (map[string]int64, error) {
return map[string]int64{"abcdefghijk0": 5, "abcdefghijk2": 5}, nil
}
// Make file 1 old enough to be cleaned up
@@ -314,8 +314,8 @@ func TestStore_Sync_SkipsRecentFiles(t *testing.T) {
require.Nil(t, err)
// Set the ID provider to return empty (no valid IDs)
s.localIDs = func() ([]string, error) {
return []string{}, nil
s.attachmentsWithSizes = func() (map[string]int64, error) {
return map[string]int64{}, nil
}
// File was just created, so it should NOT be deleted (< 1 hour old)

View File

@@ -28,42 +28,130 @@ resources to get started. _I am not affiliated with Kris or Alex, I just liked t
Please check out the [releases page](https://github.com/binwiederhier/ntfy/releases) for binaries and
deb/rpm packages.
### Download and run
The steps below allow you to download ntfy server and run it in a pinch. But it won't be enough to install it permanently
as a service starting at boot time.
=== "x86_64/amd64"
```bash
wget https://github.com/binwiederhier/ntfy/releases/download/v2.19.2/ntfy_2.19.2_linux_amd64.tar.gz
tar zxvf ntfy_2.19.2_linux_amd64.tar.gz
sudo cp -a ntfy_2.19.2_linux_amd64/ntfy /usr/local/bin/ntfy
sudo mkdir /etc/ntfy && sudo cp ntfy_2.19.2_linux_amd64/{client,server}/*.yml /etc/ntfy
wget https://github.com/binwiederhier/ntfy/releases/download/v2.20.0/ntfy_2.20.0_linux_amd64.tar.gz
tar zxvf ntfy_2.20.0_linux_amd64.tar.gz
sudo cp -a ntfy_2.20.0_linux_amd64/ntfy /usr/local/bin/ntfy
sudo mkdir /etc/ntfy && sudo cp ntfy_2.20.0_linux_amd64/{client,server}/*.yml /etc/ntfy
sudo ntfy serve
```
=== "armv6"
```bash
wget https://github.com/binwiederhier/ntfy/releases/download/v2.19.2/ntfy_2.19.2_linux_armv6.tar.gz
tar zxvf ntfy_2.19.2_linux_armv6.tar.gz
sudo cp -a ntfy_2.19.2_linux_armv6/ntfy /usr/bin/ntfy
sudo mkdir /etc/ntfy && sudo cp ntfy_2.19.2_linux_armv6/{client,server}/*.yml /etc/ntfy
wget https://github.com/binwiederhier/ntfy/releases/download/v2.20.0/ntfy_2.20.0_linux_armv6.tar.gz
tar zxvf ntfy_2.20.0_linux_armv6.tar.gz
sudo cp -a ntfy_2.20.0_linux_armv6/ntfy /usr/bin/ntfy
sudo mkdir /etc/ntfy && sudo cp ntfy_2.20.0_linux_armv6/{client,server}/*.yml /etc/ntfy
sudo ntfy serve
```
=== "armv7/armhf"
```bash
wget https://github.com/binwiederhier/ntfy/releases/download/v2.19.2/ntfy_2.19.2_linux_armv7.tar.gz
tar zxvf ntfy_2.19.2_linux_armv7.tar.gz
sudo cp -a ntfy_2.19.2_linux_armv7/ntfy /usr/bin/ntfy
sudo mkdir /etc/ntfy && sudo cp ntfy_2.19.2_linux_armv7/{client,server}/*.yml /etc/ntfy
wget https://github.com/binwiederhier/ntfy/releases/download/v2.20.0/ntfy_2.20.0_linux_armv7.tar.gz
tar zxvf ntfy_2.20.0_linux_armv7.tar.gz
sudo cp -a ntfy_2.20.0_linux_armv7/ntfy /usr/bin/ntfy
sudo mkdir /etc/ntfy && sudo cp ntfy_2.20.0_linux_armv7/{client,server}/*.yml /etc/ntfy
sudo ntfy serve
```
=== "arm64"
```bash
wget https://github.com/binwiederhier/ntfy/releases/download/v2.19.2/ntfy_2.19.2_linux_arm64.tar.gz
tar zxvf ntfy_2.19.2_linux_arm64.tar.gz
sudo cp -a ntfy_2.19.2_linux_arm64/ntfy /usr/bin/ntfy
sudo mkdir /etc/ntfy && sudo cp ntfy_2.19.2_linux_arm64/{client,server}/*.yml /etc/ntfy
wget https://github.com/binwiederhier/ntfy/releases/download/v2.20.0/ntfy_2.20.0_linux_arm64.tar.gz
tar zxvf ntfy_2.20.0_linux_arm64.tar.gz
sudo cp -a ntfy_2.20.0_linux_arm64/ntfy /usr/bin/ntfy
sudo mkdir /etc/ntfy && sudo cp ntfy_2.20.0_linux_arm64/{client,server}/*.yml /etc/ntfy
sudo ntfy serve
```
### Install as a service
If you want to install ntfy server permanently as a service, and your OS/distribution of choice doesn't offer a package,
there are a few more steps to follow.
Create the ntfy user and group:
```bash
useradd --system --home-dir /var/lib/ntfy --shell /bin/false --comment "User for the simple HTTP-based pub-sub notification service" ntfy
```
Depending on your init system, the following steps will diverge.
#### On systemd systems
Install the ntfy server unit file (which contains parameters to start the service at boot time):
=== "x86_64/amd64"
```bash
sudo mv ntfy_2.20.0_linux_amd64/server/ntfy.service /etc/systemd/system/
sudo chmod 644 /etc/systemd/system/ntfy.service
```
=== "armv6"
```bash
sudo mv ntfy_2.20.0_linux_armv6/server/ntfy.service /etc/systemd/system/
sudo chmod 644 /etc/systemd/system/ntfy.service
```
=== "armv7/armhf"
```bash
sudo mv ntfy_2.20.0_linux_armv7/server/ntfy.service /etc/systemd/system/
sudo chmod 644 /etc/systemd/system/ntfy.service
```
=== "arm64"
```bash
sudo mv ntfy_2.20.0_linux_arm64/server/ntfy.service /etc/systemd/system/
sudo chmod 644 /etc/systemd/system/ntfy.service
```
Then notify systemd we have added a new service and start the service:
```bash
sudo systemctl daemon-reload
sudo systemctl start ntfy
```
#### On OpenRC systems
Install the ntfy server service script:
=== "x86_64/amd64"
```bash
sudo mv ntfy_2.20.0_linux_amd64/server/ntfy.openrc /etc/init.d/ntfy
sudo chmod 755 /etc/init.d/ntfy
```
=== "armv6"
```bash
sudo mv ntfy_2.20.0_linux_armv6/server/ntfy.openrc /etc/init.d/ntfy
sudo chmod 755 /etc/init.d/ntfy
```
=== "armv7/armhf"
```bash
sudo mv ntfy_2.20.0_linux_armv7/server/ntfy.openrc /etc/init.d/ntfy
sudo chmod 755 /etc/init.d/ntfy
```
=== "arm64"
```bash
sudo mv ntfy_2.20.0_linux_arm64/server/ntfy.openrc /etc/init.d/ntfy
sudo chmod 755 /etc/init.d/ntfy
```
Start the ntfy server service:
```bash
sudo rc-service ntfy start
```
Add the ntfy server service to the default runlevel (so that it starts at boot time):
```bash
sudo rc-update add ntfy default
```
## Debian/Ubuntu repository
!!! info
@@ -116,7 +204,7 @@ Manually installing the .deb file:
=== "x86_64/amd64"
```bash
wget https://github.com/binwiederhier/ntfy/releases/download/v2.19.2/ntfy_2.19.2_linux_amd64.deb
wget https://github.com/binwiederhier/ntfy/releases/download/v2.20.0/ntfy_2.20.0_linux_amd64.deb
sudo dpkg -i ntfy_*.deb
sudo systemctl enable ntfy
sudo systemctl start ntfy
@@ -124,7 +212,7 @@ Manually installing the .deb file:
=== "armv6"
```bash
wget https://github.com/binwiederhier/ntfy/releases/download/v2.19.2/ntfy_2.19.2_linux_armv6.deb
wget https://github.com/binwiederhier/ntfy/releases/download/v2.20.0/ntfy_2.20.0_linux_armv6.deb
sudo dpkg -i ntfy_*.deb
sudo systemctl enable ntfy
sudo systemctl start ntfy
@@ -132,7 +220,7 @@ Manually installing the .deb file:
=== "armv7/armhf"
```bash
wget https://github.com/binwiederhier/ntfy/releases/download/v2.19.2/ntfy_2.19.2_linux_armv7.deb
wget https://github.com/binwiederhier/ntfy/releases/download/v2.20.0/ntfy_2.20.0_linux_armv7.deb
sudo dpkg -i ntfy_*.deb
sudo systemctl enable ntfy
sudo systemctl start ntfy
@@ -140,7 +228,7 @@ Manually installing the .deb file:
=== "arm64"
```bash
wget https://github.com/binwiederhier/ntfy/releases/download/v2.19.2/ntfy_2.19.2_linux_arm64.deb
wget https://github.com/binwiederhier/ntfy/releases/download/v2.20.0/ntfy_2.20.0_linux_arm64.deb
sudo dpkg -i ntfy_*.deb
sudo systemctl enable ntfy
sudo systemctl start ntfy
@@ -150,28 +238,28 @@ Manually installing the .deb file:
=== "x86_64/amd64"
```bash
sudo rpm -ivh https://github.com/binwiederhier/ntfy/releases/download/v2.19.2/ntfy_2.19.2_linux_amd64.rpm
sudo rpm -ivh https://github.com/binwiederhier/ntfy/releases/download/v2.20.0/ntfy_2.20.0_linux_amd64.rpm
sudo systemctl enable ntfy
sudo systemctl start ntfy
```
=== "armv6"
```bash
sudo rpm -ivh https://github.com/binwiederhier/ntfy/releases/download/v2.19.2/ntfy_2.19.2_linux_armv6.rpm
sudo rpm -ivh https://github.com/binwiederhier/ntfy/releases/download/v2.20.0/ntfy_2.20.0_linux_armv6.rpm
sudo systemctl enable ntfy
sudo systemctl start ntfy
```
=== "armv7/armhf"
```bash
sudo rpm -ivh https://github.com/binwiederhier/ntfy/releases/download/v2.19.2/ntfy_2.19.2_linux_armv7.rpm
sudo rpm -ivh https://github.com/binwiederhier/ntfy/releases/download/v2.20.0/ntfy_2.20.0_linux_armv7.rpm
sudo systemctl enable ntfy
sudo systemctl start ntfy
```
=== "arm64"
```bash
sudo rpm -ivh https://github.com/binwiederhier/ntfy/releases/download/v2.19.2/ntfy_2.19.2_linux_arm64.rpm
sudo rpm -ivh https://github.com/binwiederhier/ntfy/releases/download/v2.20.0/ntfy_2.20.0_linux_arm64.rpm
sudo systemctl enable ntfy
sudo systemctl start ntfy
```
@@ -213,18 +301,18 @@ pkg install go-ntfy
## macOS
The [ntfy CLI](subscribe/cli.md) (`ntfy publish` and `ntfy subscribe` only) is supported on macOS as well.
To install, please [download the tarball](https://github.com/binwiederhier/ntfy/releases/download/v2.19.2/ntfy_2.19.2_darwin_all.tar.gz),
To install, please [download the tarball](https://github.com/binwiederhier/ntfy/releases/download/v2.20.0/ntfy_2.20.0_darwin_all.tar.gz),
extract it and place it somewhere in your `PATH` (e.g. `/usr/local/bin/ntfy`).
If run as `root`, ntfy will look for its config at `/etc/ntfy/client.yml`. For all other users, it'll look for it at
`~/Library/Application Support/ntfy/client.yml` (sample included in the tarball).
```bash
curl -L https://github.com/binwiederhier/ntfy/releases/download/v2.19.2/ntfy_2.19.2_darwin_all.tar.gz > ntfy_2.19.2_darwin_all.tar.gz
tar zxvf ntfy_2.19.2_darwin_all.tar.gz
sudo cp -a ntfy_2.19.2_darwin_all/ntfy /usr/local/bin/ntfy
curl -L https://github.com/binwiederhier/ntfy/releases/download/v2.20.0/ntfy_2.20.0_darwin_all.tar.gz > ntfy_2.20.0_darwin_all.tar.gz
tar zxvf ntfy_2.20.0_darwin_all.tar.gz
sudo cp -a ntfy_2.20.0_darwin_all/ntfy /usr/local/bin/ntfy
mkdir ~/Library/Application\ Support/ntfy
cp ntfy_2.19.2_darwin_all/client/client.yml ~/Library/Application\ Support/ntfy/client.yml
cp ntfy_2.20.0_darwin_all/client/client.yml ~/Library/Application\ Support/ntfy/client.yml
ntfy --help
```
@@ -245,7 +333,7 @@ brew install ntfy
The ntfy server and CLI are fully supported on Windows. You can run the ntfy server directly or as a Windows service.
To install, you can either
* [Download the latest ZIP](https://github.com/binwiederhier/ntfy/releases/download/v2.19.2/ntfy_2.19.2_windows_amd64.zip),
* [Download the latest ZIP](https://github.com/binwiederhier/ntfy/releases/download/v2.20.0/ntfy_2.20.0_windows_amd64.zip),
extract it and place the `ntfy.exe` binary somewhere in your `%Path%`.
* Or install ntfy from the [Scoop](https://scoop.sh) main repository via `scoop install ntfy`

View File

@@ -6,13 +6,37 @@ and the [ntfy Android app](https://github.com/binwiederhier/ntfy-android/release
| Component | Version | Release date |
|------------------|---------|--------------|
| ntfy server | v2.19.2 | Mar 16, 2026 |
| ntfy server | v2.20.0 | Mar 26, 2026 |
| ntfy Android app | v1.24.0 | Mar 5, 2026 |
| ntfy iOS app | v1.3 | Nov 26, 2023 |
Please check out the release notes for [upcoming releases](#not-released-yet) below.
### ntfy server v2.19.2
### ntfy server v2.20.0
This release is another step towards making it possible to help scale ntfy up and out 🔥! With this release, you can store
attachments in an S3-compatible object store as an alterative to the directory. See [attachment store](config.md#attachments)
for details.
!!! warning
With this release, ntfy will take full control over the attachment directory or S3 bucket. Files/objects in the configured `attachment-cache-dir`
that match the message ID format (12 chars, matching `^[A-Za-z0-9]{12}$`), and have no entries in the message database will be deleted.
**Do not use a directory or S3 bucket as `attachment-cache-dir` that is also used for something else.**
This is a small behavioral change that was necessary because the old logic often left attachments behind and would not clean them
up. Unless you have re-used the attachment directory for anything else (which is hopefully never done), this should not affect
you at all.
**Features:**
* Add S3-compatible object storage as an alternative [attachment store](config.md#attachments) via `attachment-cache-dir` config option ([#1656](https://github.com/binwiederhier/ntfy/pull/1656)/[#1672](https://github.com/binwiederhier/ntfy/pull/1672))
**Bug fixes + maintenance:**
* Reject invalid e-mail addresses (e.g. multiple comma-separated recipients) with HTTP 400
* Add OpenRC init service file ([#1650](https://github.com/binwiederhier/ntfy/pull/1650), thanks to [@ageru](https://github.com/ageru) for the contribution)
## ntfy server v2.19.2
Released March 16, 2026
This is another small bugfix release for PostgreSQL, avoiding races between primary and read replica, as well as to
@@ -1798,12 +1822,4 @@ and the [ntfy Android app](https://github.com/binwiederhier/ntfy-android/release
## Not released yet
### ntfy server v2.20.x (UNRELEASED)
**Features:**
* Add S3-compatible object storage as an alternative [attachment store](config.md#attachments) via `attachment-cache-dir` config option
**Bug fixes + maintenance:**
* Reject invalid e-mail addresses (e.g. multiple comma-separated recipients) with HTTP 400
_Nothing._

8
go.mod
View File

@@ -19,7 +19,7 @@ require (
golang.org/x/sync v0.20.0
golang.org/x/term v0.41.0
golang.org/x/time v0.15.0
google.golang.org/api v0.272.0
google.golang.org/api v0.273.0
gopkg.in/yaml.v2 v2.4.0
)
@@ -30,7 +30,7 @@ require github.com/pkg/errors v0.9.1 // indirect
require (
firebase.google.com/go/v4 v4.19.0
github.com/SherClockHolmes/webpush-go v1.4.0
github.com/jackc/pgx/v5 v5.9.0
github.com/jackc/pgx/v5 v5.9.1
github.com/microcosm-cc/bluemonday v1.0.27
github.com/prometheus/client_golang v1.23.2
github.com/stripe/stripe-go/v74 v74.30.0
@@ -41,7 +41,7 @@ require (
require (
cel.dev/expr v0.25.1 // indirect
cloud.google.com/go v0.123.0 // indirect
cloud.google.com/go/auth v0.18.3-0.20260310051336-87cdcc9f7568 // indirect
cloud.google.com/go/auth v0.19.0 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
cloud.google.com/go/compute/metadata v0.9.0 // indirect
cloud.google.com/go/iam v1.5.3 // indirect
@@ -70,7 +70,7 @@ require (
github.com/google/s2a-go v0.1.9 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.14 // indirect
github.com/googleapis/gax-go/v2 v2.19.0 // indirect
github.com/googleapis/gax-go/v2 v2.20.0 // indirect
github.com/gorilla/css v1.0.1 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect

16
go.sum
View File

@@ -2,8 +2,8 @@ cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4=
cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4=
cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE=
cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU=
cloud.google.com/go/auth v0.18.3-0.20260310051336-87cdcc9f7568 h1:PJt3KrySfZkKdcEV2wlyNkfAPbMZGjtnv5oLrT4tWPg=
cloud.google.com/go/auth v0.18.3-0.20260310051336-87cdcc9f7568/go.mod h1:/Tt0rLCp4FHXEBtdyYqvIZPcJzbpJ/fmqtgIaXseDK4=
cloud.google.com/go/auth v0.19.0 h1:DGYwtbcsGsT1ywuxsIoWi1u/vlks0moIblQHgSDgQkQ=
cloud.google.com/go/auth v0.19.0/go.mod h1:2Aph7BT2KnaSFOM0JDPyiYgNh6PL9vGMiP8CUIXZ+IY=
cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=
cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs=
@@ -98,8 +98,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.3.14 h1:yh8ncqsbUY4shRD5dA6RlzjJaT4hi3kII+zYw8wmLb8=
github.com/googleapis/enterprise-certificate-proxy v0.3.14/go.mod h1:vqVt9yG9480NtzREnTlmGSBmFrA+bzb0yl0TxoBQXOg=
github.com/googleapis/gax-go/v2 v2.19.0 h1:fYQaUOiGwll0cGj7jmHT/0nPlcrZDFPrZRhTsoCr8hE=
github.com/googleapis/gax-go/v2 v2.19.0/go.mod h1:w2ROXVdfGEVFXzmlciUU4EdjHgWvB5h2n6x/8XSTTJA=
github.com/googleapis/gax-go/v2 v2.20.0 h1:NIKVuLhDlIV74muWlsMM4CcQZqN6JJ20Qcxd9YMuYcs=
github.com/googleapis/gax-go/v2 v2.20.0/go.mod h1:But/NJU6TnZsrLai/xBAQLLz+Hc7fHZJt/hsCz3Fih4=
github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8=
github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
@@ -108,8 +108,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.9.0 h1:T/dI+2TvmI2H8s/KH1/lXIbz1CUFk3gn5oTjr0/mBsE=
github.com/jackc/pgx/v5 v5.9.0/go.mod h1:mal1tBGAFfLHvZzaYh77YS/eC6IX9OWbRV1QIIM0Jn4=
github.com/jackc/pgx/v5 v5.9.1 h1:uwrxJXBnx76nyISkhr33kQLlUqjv7et7b9FjCen/tdc=
github.com/jackc/pgx/v5 v5.9.1/go.mod h1:mal1tBGAFfLHvZzaYh77YS/eC6IX9OWbRV1QIIM0Jn4=
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
@@ -272,8 +272,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
google.golang.org/api v0.272.0 h1:eLUQZGnAS3OHn31URRf9sAmRk3w2JjMx37d2k8AjJmA=
google.golang.org/api v0.272.0/go.mod h1:wKjowi5LNJc5qarNvDCvNQBn3rVK8nSy6jg2SwRwzIA=
google.golang.org/api v0.273.0 h1:r/Bcv36Xa/te1ugaN1kdJ5LoA5Wj/cL+a4gj6FiPBjQ=
google.golang.org/api v0.273.0/go.mod h1:JbAt7mF+XVmWu6xNP8/+CTiGH30ofmCmk9nM8d8fHew=
google.golang.org/appengine/v2 v2.0.6 h1:LvPZLGuchSBslPBp+LAhihBeGSiRh1myRoYK4NtuBIw=
google.golang.org/appengine/v2 v2.0.6/go.mod h1:WoEXGoXNfa0mLvaH5sV3ZSGXwVmy8yf7Z1JKf3J3wLI=
google.golang.org/genproto v0.0.0-20260319201613-d00831a3d3e7 h1:XzmzkmB14QhVhgnawEVsOn6OFsnpyxNPRY9QV01dNB0=

View File

@@ -24,7 +24,6 @@ var errNoRows = errors.New("no rows found")
// queries holds the database-specific SQL queries
type queries struct {
insertMessage string
deleteMessage string
selectScheduledMessageIDsBySeqID string
deleteScheduledBySequenceID string
updateMessagesForTopicExpiry string
@@ -35,18 +34,17 @@ type queries struct {
selectMessagesSinceIDScheduled string
selectMessagesLatest string
selectMessagesDue string
selectMessagesExpired string
deleteExpiredMessages string
updateMessagePublished string
selectMessagesCount string
selectTopics string
updateAttachmentDeleted string
selectAttachmentsExpired string
markExpiredAttachmentsDeleted string
selectAttachmentsSizeBySender string
selectAttachmentsSizeByUserID string
selectAttachmentsWithSizes string
selectStats string
updateStats string
updateMessageTime string
selectAttachmentIDs string
}
// Cache stores published messages
@@ -246,14 +244,16 @@ func (c *Cache) MessagesDue() ([]*model.Message, error) {
return readMessages(rows)
}
// MessagesExpired returns a list of IDs for messages that have expired (should be deleted)
func (c *Cache) MessagesExpired() ([]string, error) {
rows, err := c.db.Query(c.queries.selectMessagesExpired, time.Now().Unix())
// DeleteExpiredMessages deletes up to `limit` expired messages in a single query
// and returns the number of deleted rows.
func (c *Cache) DeleteExpiredMessages(limit int) (int64, error) {
c.maybeLock()
defer c.maybeUnlock()
result, err := c.db.Exec(c.queries.deleteExpiredMessages, time.Now().Unix(), limit)
if err != nil {
return nil, err
return 0, err
}
defer rows.Close()
return readStrings(rows)
return result.RowsAffected()
}
// Message returns the message with the given ID, or ErrMessageNotFound if not found
@@ -262,10 +262,10 @@ func (c *Cache) Message(id string) (*model.Message, error) {
if err != nil {
return nil, err
}
defer rows.Close()
if !rows.Next() {
return nil, model.ErrMessageNotFound
}
defer rows.Close()
return readMessage(rows)
}
@@ -312,20 +312,6 @@ func (c *Cache) Topics() ([]string, error) {
return readStrings(rows)
}
// DeleteMessages deletes the messages with the given IDs
func (c *Cache) DeleteMessages(ids ...string) error {
c.maybeLock()
defer c.maybeUnlock()
return db.ExecTx(c.db, func(tx *sql.Tx) error {
for _, id := range ids {
if _, err := tx.Exec(c.queries.deleteMessage, id); err != nil {
return err
}
}
return nil
})
}
// DeleteScheduledBySequenceID deletes unpublished (scheduled) messages with the given topic and sequence ID.
// It returns the message IDs of the deleted messages, which can be used to clean up attachment files.
func (c *Cache) DeleteScheduledBySequenceID(topic, sequenceID string) ([]string, error) {
@@ -363,38 +349,16 @@ func (c *Cache) ExpireMessages(topics ...string) error {
})
}
// AttachmentIDs returns message IDs with active (non-expired, non-deleted) attachments
func (c *Cache) AttachmentIDs() ([]string, error) {
rows, err := c.db.ReadOnly().Query(c.queries.selectAttachmentIDs, time.Now().Unix())
if err != nil {
return nil, err
}
defer rows.Close()
return readStrings(rows)
}
// AttachmentsExpired returns message IDs with expired attachments that have not been deleted
func (c *Cache) AttachmentsExpired() ([]string, error) {
rows, err := c.db.Query(c.queries.selectAttachmentsExpired, time.Now().Unix())
if err != nil {
return nil, err
}
defer rows.Close()
return readStrings(rows)
}
// MarkAttachmentsDeleted marks the attachments for the given message IDs as deleted
func (c *Cache) MarkAttachmentsDeleted(ids ...string) error {
// MarkExpiredAttachmentsDeleted marks up to `limit` expired attachments as deleted in a single
// query and returns the number of updated rows.
func (c *Cache) MarkExpiredAttachmentsDeleted(limit int) (int64, error) {
c.maybeLock()
defer c.maybeUnlock()
return db.ExecTx(c.db, func(tx *sql.Tx) error {
for _, id := range ids {
if _, err := tx.Exec(c.queries.updateAttachmentDeleted, id); err != nil {
return err
}
}
return nil
})
result, err := c.db.Exec(c.queries.markExpiredAttachmentsDeleted, time.Now().Unix(), limit)
if err != nil {
return 0, err
}
return result.RowsAffected()
}
// AttachmentBytesUsedBySender returns the total size of active attachments sent by the given sender
@@ -415,6 +379,30 @@ func (c *Cache) AttachmentBytesUsedByUser(userID string) (int64, error) {
return c.readAttachmentBytesUsed(rows)
}
// AttachmentsWithSizes returns a map of message ID to attachment size for all active
// (non-expired, non-deleted) attachments. This is used to hydrate the attachment store's
// size tracking on startup and during periodic sync.
func (c *Cache) AttachmentsWithSizes() (map[string]int64, error) {
rows, err := c.db.ReadOnly().Query(c.queries.selectAttachmentsWithSizes, time.Now().Unix())
if err != nil {
return nil, err
}
defer rows.Close()
attachments := make(map[string]int64)
for rows.Next() {
var id string
var size int64
if err := rows.Scan(&id, &size); err != nil {
return nil, err
}
attachments[id] = size
}
if err := rows.Err(); err != nil {
return nil, err
}
return attachments, nil
}
func (c *Cache) readAttachmentBytesUsed(rows *sql.Rows) (int64, error) {
defer rows.Close()
var size int64

View File

@@ -12,7 +12,6 @@ const (
INSERT INTO message (mid, sequence_id, time, event, expires, topic, message, title, priority, tags, click, icon, actions, attachment_name, attachment_type, attachment_size, attachment_expires, attachment_url, attachment_deleted, sender, user_id, content_type, encoding, published)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24)
`
postgresDeleteMessageQuery = `DELETE FROM message WHERE mid = $1`
postgresSelectScheduledMessageIDsBySeqIDQuery = `SELECT mid FROM message WHERE topic = $1 AND sequence_id = $2 AND published = FALSE`
postgresDeleteScheduledBySequenceIDQuery = `DELETE FROM message WHERE topic = $1 AND sequence_id = $2 AND published = FALSE`
postgresUpdateMessagesForTopicExpiryQuery = `UPDATE message SET expires = $1 WHERE topic = $2`
@@ -61,26 +60,23 @@ const (
WHERE time <= $1 AND published = FALSE
ORDER BY time, id
`
postgresSelectMessagesExpiredQuery = `SELECT mid FROM message WHERE expires <= $1 AND published = TRUE`
postgresUpdateMessagePublishedQuery = `UPDATE message SET published = TRUE WHERE mid = $1`
postgresSelectMessagesCountQuery = `SELECT COUNT(*) FROM message`
postgresSelectTopicsQuery = `SELECT topic FROM message GROUP BY topic`
postgresUpdateAttachmentDeletedQuery = `UPDATE message SET attachment_deleted = TRUE WHERE mid = $1`
postgresSelectAttachmentsExpiredQuery = `SELECT mid FROM message WHERE attachment_expires > 0 AND attachment_expires <= $1 AND attachment_deleted = FALSE`
postgresDeleteExpiredMessagesQuery = `DELETE FROM message WHERE mid IN (SELECT mid FROM message WHERE expires <= $1 AND published = TRUE LIMIT $2)`
postgresMarkExpiredAttachmentsDeletedQuery = `UPDATE message SET attachment_deleted = TRUE WHERE mid IN (SELECT mid FROM message WHERE attachment_expires > 0 AND attachment_expires <= $1 AND attachment_deleted = FALSE LIMIT $2)`
postgresSelectAttachmentsSizeBySenderQuery = `SELECT COALESCE(SUM(attachment_size), 0) FROM message WHERE user_id = '' AND sender = $1 AND attachment_expires >= $2`
postgresSelectAttachmentsSizeByUserIDQuery = `SELECT COALESCE(SUM(attachment_size), 0) FROM message WHERE user_id = $1 AND attachment_expires >= $2`
postgresSelectAttachmentsWithSizesQuery = `SELECT mid, attachment_size FROM message WHERE attachment_expires > $1 AND attachment_deleted = FALSE`
postgresSelectStatsQuery = `SELECT value FROM message_stats WHERE key = 'messages'`
postgresUpdateStatsQuery = `UPDATE message_stats SET value = $1 WHERE key = 'messages'`
postgresUpdateMessageTimeQuery = `UPDATE message SET time = $1 WHERE mid = $2`
postgresSelectAttachmentIDsQuery = `SELECT mid FROM message WHERE attachment_expires > $1 AND attachment_deleted = FALSE`
)
var postgresQueries = queries{
insertMessage: postgresInsertMessageQuery,
deleteMessage: postgresDeleteMessageQuery,
selectScheduledMessageIDsBySeqID: postgresSelectScheduledMessageIDsBySeqIDQuery,
deleteScheduledBySequenceID: postgresDeleteScheduledBySequenceIDQuery,
updateMessagesForTopicExpiry: postgresUpdateMessagesForTopicExpiryQuery,
@@ -91,18 +87,17 @@ var postgresQueries = queries{
selectMessagesSinceIDScheduled: postgresSelectMessagesSinceIDIncludeScheduledQuery,
selectMessagesLatest: postgresSelectMessagesLatestQuery,
selectMessagesDue: postgresSelectMessagesDueQuery,
selectMessagesExpired: postgresSelectMessagesExpiredQuery,
deleteExpiredMessages: postgresDeleteExpiredMessagesQuery,
updateMessagePublished: postgresUpdateMessagePublishedQuery,
selectMessagesCount: postgresSelectMessagesCountQuery,
selectTopics: postgresSelectTopicsQuery,
updateAttachmentDeleted: postgresUpdateAttachmentDeletedQuery,
selectAttachmentsExpired: postgresSelectAttachmentsExpiredQuery,
markExpiredAttachmentsDeleted: postgresMarkExpiredAttachmentsDeletedQuery,
selectAttachmentsSizeBySender: postgresSelectAttachmentsSizeBySenderQuery,
selectAttachmentsSizeByUserID: postgresSelectAttachmentsSizeByUserIDQuery,
selectAttachmentsWithSizes: postgresSelectAttachmentsWithSizesQuery,
selectStats: postgresSelectStatsQuery,
updateStats: postgresUpdateStatsQuery,
updateMessageTime: postgresUpdateMessageTimeQuery,
selectAttachmentIDs: postgresSelectAttachmentIDsQuery,
}
// NewPostgresStore creates a new PostgreSQL-backed message cache store using an existing database connection pool.

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"heckel.io/ntfy/v2/db"
"heckel.io/ntfy/v2/log"
)
// Initial PostgreSQL schema
@@ -41,6 +42,7 @@ const (
CREATE INDEX IF NOT EXISTS idx_message_sequence_id ON message (sequence_id);
CREATE INDEX IF NOT EXISTS idx_message_topic_published_time ON message (topic, published, time, id);
CREATE INDEX IF NOT EXISTS idx_message_published_expires ON message (published, expires);
CREATE INDEX IF NOT EXISTS idx_message_attachment_expires ON message (attachment_expires) WHERE attachment_deleted = FALSE;
CREATE INDEX IF NOT EXISTS idx_message_sender_attachment_expires ON message (sender, attachment_expires) WHERE user_id = '';
CREATE INDEX IF NOT EXISTS idx_message_user_id_attachment_expires ON message (user_id, attachment_expires);
CREATE TABLE IF NOT EXISTS message_stats (
@@ -57,21 +59,57 @@ const (
// PostgreSQL schema management queries
const (
postgresCurrentSchemaVersion = 14
postgresCurrentSchemaVersion = 15
postgresInsertSchemaVersionQuery = `INSERT INTO schema_version (store, version) VALUES ('message', $1)`
postgresUpdateSchemaVersionQuery = `UPDATE schema_version SET version = $1 WHERE store = 'message'`
postgresSelectSchemaVersionQuery = `SELECT version FROM schema_version WHERE store = 'message'`
)
func setupPostgres(db *sql.DB) error {
// PostgreSQL schema migrations
const (
// 14 -> 15
postgresMigrate14To15CreateIndexQuery = `
CREATE INDEX IF NOT EXISTS idx_message_attachment_expires ON message (attachment_expires) WHERE attachment_deleted = FALSE;
`
)
var postgresMigrations = map[int]func(d *sql.DB) error{
14: postgresMigrateFrom14,
}
func setupPostgres(d *sql.DB) error {
var schemaVersion int
if err := db.QueryRow(postgresSelectSchemaVersionQuery).Scan(&schemaVersion); err != nil {
return setupNewPostgresDB(db)
if err := d.QueryRow(postgresSelectSchemaVersionQuery).Scan(&schemaVersion); err != nil {
return setupNewPostgresDB(d)
} else if schemaVersion == postgresCurrentSchemaVersion {
return nil
} else if schemaVersion > postgresCurrentSchemaVersion {
return fmt.Errorf("unexpected schema version: version %d is higher than current version %d", schemaVersion, postgresCurrentSchemaVersion)
}
for i := schemaVersion; i < postgresCurrentSchemaVersion; i++ {
fn, ok := postgresMigrations[i]
if !ok {
return fmt.Errorf("cannot find migration step from schema version %d to %d", i, i+1)
} else if err := fn(d); err != nil {
return err
}
}
return nil
}
func postgresMigrateFrom14(d *sql.DB) error {
log.Tag(tagMessageCache).Info("Migrating message cache database schema: from 14 to 15")
return db.ExecTx(d, func(tx *sql.Tx) error {
if _, err := tx.Exec(postgresMigrate14To15CreateIndexQuery); err != nil {
return err
}
if _, err := tx.Exec(postgresUpdateSchemaVersionQuery, 15); err != nil {
return err
}
return nil
})
}
func setupNewPostgresDB(sqlDB *sql.DB) error {
return db.ExecTx(sqlDB, func(tx *sql.Tx) error {
if _, err := tx.Exec(postgresCreateTablesQuery); err != nil {

View File

@@ -18,7 +18,6 @@ const (
INSERT INTO messages (mid, sequence_id, time, event, expires, topic, message, title, priority, tags, click, icon, actions, attachment_name, attachment_type, attachment_size, attachment_expires, attachment_url, attachment_deleted, sender, user, content_type, encoding, published)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
`
sqliteDeleteMessageQuery = `DELETE FROM messages WHERE mid = ?`
sqliteSelectScheduledMessageIDsBySeqIDQuery = `SELECT mid FROM messages WHERE topic = ? AND sequence_id = ? AND published = 0`
sqliteDeleteScheduledBySequenceIDQuery = `DELETE FROM messages WHERE topic = ? AND sequence_id = ? AND published = 0`
sqliteUpdateMessagesForTopicExpiryQuery = `UPDATE messages SET expires = ? WHERE topic = ?`
@@ -64,26 +63,23 @@ const (
WHERE time <= ? AND published = 0
ORDER BY time, id
`
sqliteSelectMessagesExpiredQuery = `SELECT mid FROM messages WHERE expires <= ? AND published = 1`
sqliteUpdateMessagePublishedQuery = `UPDATE messages SET published = 1 WHERE mid = ?`
sqliteSelectMessagesCountQuery = `SELECT COUNT(*) FROM messages`
sqliteSelectTopicsQuery = `SELECT topic FROM messages GROUP BY topic`
sqliteUpdateAttachmentDeletedQuery = `UPDATE messages SET attachment_deleted = 1 WHERE mid = ?`
sqliteSelectAttachmentsExpiredQuery = `SELECT mid FROM messages WHERE attachment_expires > 0 AND attachment_expires <= ? AND attachment_deleted = 0`
sqliteDeleteExpiredMessagesQuery = `DELETE FROM messages WHERE mid IN (SELECT mid FROM messages WHERE expires <= ? AND published = 1 LIMIT ?)`
sqliteMarkExpiredAttachmentsDeletedQuery = `UPDATE messages SET attachment_deleted = 1 WHERE mid IN (SELECT mid FROM messages WHERE attachment_expires > 0 AND attachment_expires <= ? AND attachment_deleted = 0 LIMIT ?)`
sqliteSelectAttachmentsSizeBySenderQuery = `SELECT IFNULL(SUM(attachment_size), 0) FROM messages WHERE user = '' AND sender = ? AND attachment_expires >= ?`
sqliteSelectAttachmentsSizeByUserIDQuery = `SELECT IFNULL(SUM(attachment_size), 0) FROM messages WHERE user = ? AND attachment_expires >= ?`
sqliteSelectAttachmentsWithSizesQuery = `SELECT mid, attachment_size FROM messages WHERE attachment_expires > ? AND attachment_deleted = 0`
sqliteSelectStatsQuery = `SELECT value FROM stats WHERE key = 'messages'`
sqliteUpdateStatsQuery = `UPDATE stats SET value = ? WHERE key = 'messages'`
sqliteUpdateMessageTimeQuery = `UPDATE messages SET time = ? WHERE mid = ?`
sqliteSelectAttachmentIDsQuery = `SELECT mid FROM messages WHERE attachment_expires > ? AND attachment_deleted = 0`
)
var sqliteQueries = queries{
insertMessage: sqliteInsertMessageQuery,
deleteMessage: sqliteDeleteMessageQuery,
selectScheduledMessageIDsBySeqID: sqliteSelectScheduledMessageIDsBySeqIDQuery,
deleteScheduledBySequenceID: sqliteDeleteScheduledBySequenceIDQuery,
updateMessagesForTopicExpiry: sqliteUpdateMessagesForTopicExpiryQuery,
@@ -94,18 +90,17 @@ var sqliteQueries = queries{
selectMessagesSinceIDScheduled: sqliteSelectMessagesSinceIDIncludeScheduledQuery,
selectMessagesLatest: sqliteSelectMessagesLatestQuery,
selectMessagesDue: sqliteSelectMessagesDueQuery,
selectMessagesExpired: sqliteSelectMessagesExpiredQuery,
deleteExpiredMessages: sqliteDeleteExpiredMessagesQuery,
updateMessagePublished: sqliteUpdateMessagePublishedQuery,
selectMessagesCount: sqliteSelectMessagesCountQuery,
selectTopics: sqliteSelectTopicsQuery,
updateAttachmentDeleted: sqliteUpdateAttachmentDeletedQuery,
selectAttachmentsExpired: sqliteSelectAttachmentsExpiredQuery,
markExpiredAttachmentsDeleted: sqliteMarkExpiredAttachmentsDeletedQuery,
selectAttachmentsSizeBySender: sqliteSelectAttachmentsSizeBySenderQuery,
selectAttachmentsSizeByUserID: sqliteSelectAttachmentsSizeByUserIDQuery,
selectAttachmentsWithSizes: sqliteSelectAttachmentsWithSizesQuery,
selectStats: sqliteSelectStatsQuery,
updateStats: sqliteUpdateStatsQuery,
updateMessageTime: sqliteUpdateMessageTimeQuery,
selectAttachmentIDs: sqliteSelectAttachmentIDsQuery,
}
// NewSQLiteStore creates a SQLite file-backed cache

View File

@@ -57,7 +57,7 @@ const (
// Schema version management for SQLite
const (
sqliteCurrentSchemaVersion = 14
sqliteCurrentSchemaVersion = 15
sqliteCreateSchemaVersionTableQuery = `
CREATE TABLE IF NOT EXISTS schemaVersion (
id INT PRIMARY KEY,
@@ -208,6 +208,7 @@ var (
11: sqliteMigrateFrom11,
12: sqliteMigrateFrom12,
13: sqliteMigrateFrom13,
14: sqliteMigrateFrom14,
}
)
@@ -451,3 +452,15 @@ func sqliteMigrateFrom13(sqlDB *sql.DB, _ time.Duration) error {
return nil
})
}
// sqliteMigrateFrom14 is a no-op; the corresponding Postgres migration adds
// idx_message_attachment_expires, which SQLite already has from the initial schema.
func sqliteMigrateFrom14(sqlDB *sql.DB, _ time.Duration) error {
log.Tag(tagMessageCache).Info("Migrating cache database schema: from 14 to 15")
return db.ExecTx(sqlDB, func(tx *sql.Tx) error {
if _, err := tx.Exec(sqliteUpdateSchemaVersionQuery, 15); err != nil {
return err
}
return nil
})
}

View File

@@ -209,7 +209,7 @@ func TestSqliteStore_Migration_From9(t *testing.T) {
require.True(t, rows.Next())
var version int
require.Nil(t, rows.Scan(&version))
require.Equal(t, 14, version)
require.Equal(t, 15, version)
require.Nil(t, rows.Close())
messages, err := s.Messages("mytopic", model.SinceAllMessages, false)
@@ -287,6 +287,6 @@ func checkSqliteSchemaVersion(t *testing.T, filename string) {
require.True(t, rows.Next())
var schemaVersion int
require.Nil(t, rows.Scan(&schemaVersion))
require.Equal(t, 14, schemaVersion)
require.Equal(t, 15, schemaVersion)
require.Nil(t, rows.Close())
}

View File

@@ -3,7 +3,6 @@ package message_test
import (
"net/netip"
"path/filepath"
"sort"
"sync"
"testing"
"time"
@@ -274,9 +273,9 @@ func TestStore_Prune(t *testing.T) {
require.Nil(t, err)
require.Equal(t, 3, count)
expiredMessageIDs, err := s.MessagesExpired()
deleted, err := s.DeleteExpiredMessages(10)
require.Nil(t, err)
require.Nil(t, s.DeleteMessages(expiredMessageIDs...))
require.Equal(t, int64(2), deleted)
count, err = s.MessagesCount()
require.Nil(t, err)
@@ -414,10 +413,9 @@ func TestStore_AttachmentsExpired(t *testing.T) {
}
require.Nil(t, s.AddMessage(m))
ids, err := s.AttachmentsExpired()
count, err := s.MarkExpiredAttachmentsDeleted(10)
require.Nil(t, err)
require.Equal(t, 1, len(ids))
require.Equal(t, "m4", ids[0])
require.Equal(t, int64(1), count)
})
}
@@ -583,13 +581,9 @@ func TestStore_ExpireMessages(t *testing.T) {
require.Nil(t, s.ExpireMessages("topic1"))
// topic1 messages should now be expired (expires set to past)
expiredIDs, err := s.MessagesExpired()
deleted, err := s.DeleteExpiredMessages(100)
require.Nil(t, err)
require.Equal(t, 2, len(expiredIDs))
sort.Strings(expiredIDs)
expectedIDs := []string{m1.ID, m2.ID}
sort.Strings(expectedIDs)
require.Equal(t, expectedIDs, expiredIDs)
require.Equal(t, int64(2), deleted)
// topic2 should be unaffected
messages, err = s.Messages("topic2", model.SinceAllMessages, false)
@@ -629,27 +623,15 @@ func TestStore_MarkAttachmentsDeleted(t *testing.T) {
}
require.Nil(t, s.AddMessage(m2))
// Both should show as expired attachments needing cleanup
ids, err := s.AttachmentsExpired()
// Both should be marked as deleted in one batch
count, err := s.MarkExpiredAttachmentsDeleted(10)
require.Nil(t, err)
require.Equal(t, 2, len(ids))
// Mark msg1's attachment as deleted (file cleaned up)
require.Nil(t, s.MarkAttachmentsDeleted("msg1"))
// Now only msg2 should show as needing cleanup
ids, err = s.AttachmentsExpired()
require.Nil(t, err)
require.Equal(t, 1, len(ids))
require.Equal(t, "msg2", ids[0])
// Mark msg2 too
require.Nil(t, s.MarkAttachmentsDeleted("msg2"))
require.Equal(t, int64(2), count)
// No more expired attachments to clean up
ids, err = s.AttachmentsExpired()
count, err = s.MarkExpiredAttachmentsDeleted(10)
require.Nil(t, err)
require.Equal(t, 0, len(ids))
require.Equal(t, int64(0), count)
// Messages themselves still exist
messages, err := s.Messages("mytopic", model.SinceAllMessages, false)

View File

@@ -19,8 +19,8 @@ const (
PollRequestEvent = "poll_request"
)
// MessageIDLength is the length of a randomly generated message ID
const MessageIDLength = 12
// messageIDLength is the length of a randomly generated message ID
const messageIDLength = 12
// Errors for message operations
var (
@@ -133,10 +133,20 @@ func NewAction() *Action {
}
}
// GenerateMessageID creates a new random message ID
func GenerateMessageID() string {
return util.RandomString(messageIDLength)
}
// ValidMessageID returns true if the given string is a valid message ID
func ValidMessageID(s string) bool {
return util.ValidRandomString(s, messageIDLength)
}
// NewMessage creates a new message with the current timestamp
func NewMessage(event, topic, msg string) *Message {
return &Message{
ID: util.RandomString(MessageIDLength),
ID: GenerateMessageID(),
Time: time.Now().Unix(),
Event: event,
Topic: topic,
@@ -173,11 +183,6 @@ func NewPollRequestMessage(topic, pollID string) *Message {
return m
}
// ValidMessageID returns true if the given string is a valid message ID
func ValidMessageID(s string) bool {
return util.ValidRandomString(s, MessageIDLength)
}
// SinceMarker represents a point in time or message ID from which to retrieve messages
type SinceMarker struct {
time time.Time

View File

@@ -20,6 +20,7 @@ const (
DefaultCacheBatchTimeout = time.Duration(0)
DefaultKeepaliveInterval = 45 * time.Second // Not too frequently to save battery (Android read timeout used to be 77s!)
DefaultManagerInterval = time.Minute
DefaultManagerBatchSize = 30000
DefaultDelayedSenderInterval = 10 * time.Second
DefaultMessageDelayMin = 10 * time.Second
DefaultMessageDelayMax = 3 * 24 * time.Hour
@@ -46,11 +47,13 @@ const (
// - total topic limit: max number of topics overall
// - various attachment limits
const (
DefaultMessageSizeLimit = 4096 // Bytes; note that FCM/APNS have a limit of ~4 KB for the entire message
DefaultTotalTopicLimit = 15000
DefaultAttachmentTotalSizeLimit = int64(5 * 1024 * 1024 * 1024) // 5 GB
DefaultAttachmentFileSizeLimit = int64(15 * 1024 * 1024) // 15 MB
DefaultAttachmentExpiryDuration = 3 * time.Hour
DefaultMessageSizeLimit = 4096 // Bytes; note that FCM/APNS have a limit of ~4 KB for the entire message
DefaultTotalTopicLimit = 15000
DefaultAttachmentTotalSizeLimit = int64(5 * 1024 * 1024 * 1024) // 5 GB
DefaultAttachmentFileSizeLimit = int64(15 * 1024 * 1024) // 15 MB
DefaultAttachmentExpiryDuration = 3 * time.Hour
DefaultAttachmentOrphanGracePeriod = time.Hour // Don't delete orphaned objects younger than this to avoid races with in-flight uploads
)
// Defines all per-visitor limits
@@ -115,9 +118,11 @@ type Config struct {
AttachmentTotalSizeLimit int64
AttachmentFileSizeLimit int64
AttachmentExpiryDuration time.Duration
AttachmentOrphanGracePeriod time.Duration
TemplateDir string // Directory to load named templates from
KeepaliveInterval time.Duration
ManagerInterval time.Duration
ManagerBatchSize int
DisallowedTopics []string
WebRoot string // empty to disable
DelayedSenderInterval time.Duration
@@ -217,9 +222,11 @@ func NewConfig() *Config {
AttachmentTotalSizeLimit: DefaultAttachmentTotalSizeLimit,
AttachmentFileSizeLimit: DefaultAttachmentFileSizeLimit,
AttachmentExpiryDuration: DefaultAttachmentExpiryDuration,
AttachmentOrphanGracePeriod: DefaultAttachmentOrphanGracePeriod,
TemplateDir: DefaultTemplateDir,
KeepaliveInterval: DefaultKeepaliveInterval,
ManagerInterval: DefaultManagerInterval,
ManagerBatchSize: DefaultManagerBatchSize,
DisallowedTopics: DefaultDisallowedTopics,
WebRoot: "/",
DelayedSenderInterval: DefaultDelayedSenderInterval,

49
server/ntfy.openrc Normal file
View File

@@ -0,0 +1,49 @@
#!/sbin/openrc-run
# OpenRC service configuration for ntfy Server.
# Should be placed in /etc/init.d/ as "ntfy" or "ntfy-server" (no extension), owned by root:root and with permissions 755.
# Assumes an ntfy system user and group have been created, for example using this command:
# useradd --system --home-dir /var/lib/ntfy --shell /bin/false --comment "User for the simple HTTP-based pub-sub notification service" ntfy
name=$RC_SVCNAME
description="ntfy server"
command="/usr/local/bin/ntfy"
command_background=true
command_args="serve"
command_user="ntfy:ntfy"
extra_started_commands="reload"
pidfile="/run/${RC_SVCNAME}/${RC_SVCNAME}.pid"
# Changes the hard number of open files (nofile) limit to 2048 for the service.
rc_ulimit="-n 2048"
# Allows the service to bind to privileged ports (<1024).
capabilities="^cap_net_bind_service"
error_log="/var/log/ntfy.log"
# Service dependencies
depend() {
use net
after firewall
}
# Check for - and if necessary - create required files and folders. Might require some adjustment dependings on the content of the server.yml file.
start_pre() {
checkpath -f --owner "$command_user" --mode 0644 \
/var/log/ntfy.log
checkpath -d --owner "$command_user" --mode 0750 \
/run/ntfy/
checkpath -d --owner "$command_user" --mode 0755 \
/var/lib/ntfy/
checkpath -d --owner "$command_user" --mode 0750 \
/var/cache/ntfy/
}
reload() {
ebegin "Reloading $RC_SVCNAME's configuration"
start-stop-daemon --signal SIGHUP --pidfile "${pidfile}"
eend $? "Failed to reload $RC_SVCNAME's configuration"
}

View File

@@ -301,13 +301,10 @@ func createMessageCache(conf *Config, pool *db.DB) (*message.Cache, error) {
}
func createAttachmentStore(conf *Config, messageCache *message.Cache) (*attachment.Store, error) {
attachmentIDs := func() ([]string, error) {
return messageCache.AttachmentIDs()
}
if strings.HasPrefix(conf.AttachmentCacheDir, "s3://") {
return attachment.NewS3Store(conf.AttachmentCacheDir, conf.AttachmentTotalSizeLimit, attachmentIDs)
return attachment.NewS3Store(conf.AttachmentCacheDir, conf.AttachmentTotalSizeLimit, conf.AttachmentOrphanGracePeriod, messageCache.AttachmentsWithSizes)
} else if conf.AttachmentCacheDir != "" {
return attachment.NewFileStore(conf.AttachmentCacheDir, conf.AttachmentTotalSizeLimit, attachmentIDs)
return attachment.NewFileStore(conf.AttachmentCacheDir, conf.AttachmentTotalSizeLimit, conf.AttachmentOrphanGracePeriod, messageCache.AttachmentsWithSizes)
}
return nil, nil
}
@@ -436,14 +433,18 @@ func (s *Server) Stop() {
s.attachment.Close()
}
s.closeDatabases()
close(s.closeChan)
if s.closeChan != nil {
close(s.closeChan)
}
}
func (s *Server) closeDatabases() {
if s.userManager != nil {
s.userManager.Close()
}
s.messageCache.Close()
if s.messageCache != nil {
s.messageCache.Close()
}
if s.webPush != nil {
s.webPush.Close()
}
@@ -1429,6 +1430,9 @@ func (s *Server) handleBodyAsAttachment(r *http.Request, v *visitor, m *model.Me
return err
}
attachmentExpiry := time.Now().Add(vinfo.Limits.AttachmentExpiryDuration).Unix()
if m.Expires > 0 && attachmentExpiry > m.Expires {
attachmentExpiry = m.Expires // Attachment must never outlive the message
}
if m.Time > attachmentExpiry {
return errHTTPBadRequestAttachmentsExpiryBeforeDelivery.With(m)
}

View File

@@ -9,6 +9,7 @@ import (
"heckel.io/ntfy/v2/util"
"io"
"net/netip"
"os"
"path/filepath"
"strings"
"testing"
@@ -740,8 +741,13 @@ func TestAccount_Reservation_Delete_Messages_And_Attachments(t *testing.T) {
require.Equal(t, 200, rr.Code)
// Verify that messages and attachments were deleted
// This does not explicitly call the manager!
// This does not explicitly call the manager! We backdate the files so sync's
// grace period doesn't protect them.
past := time.Now().Add(-2 * time.Hour)
os.Chtimes(filepath.Join(s.config.AttachmentCacheDir, m1.ID), past, past)
os.Chtimes(filepath.Join(s.config.AttachmentCacheDir, m2.ID), past, past)
waitFor(t, func() bool {
s.attachment.Sync() // File cleanup is done by sync, not by the manager
ms, err := s.messageCache.Messages("mytopic1", model.SinceAllMessages, false)
require.Nil(t, err)
return len(ms) == 0 && !util.FileExists(filepath.Join(s.config.AttachmentCacheDir, m1.ID))

View File

@@ -3,7 +3,6 @@ package server
import (
"heckel.io/ntfy/v2/log"
"heckel.io/ntfy/v2/util"
"strings"
)
func (s *Server) execManager() {
@@ -120,7 +119,7 @@ func (s *Server) pruneVisitors() {
}
}).
Field("stale_visitors", staleVisitors).
Debug("Deleted %d stale visitor(s)", staleVisitors)
Debug("Finished deleting stale visitors")
}
func (s *Server) pruneTokens() {
@@ -135,7 +134,7 @@ func (s *Server) pruneTokens() {
log.Tag(tagManager).Err(err).Warn("Error deleting soft-deleted users")
}
}).
Debug("Removed expired tokens and users")
Debug("Finished deleting expired tokens and users")
}
}
@@ -143,48 +142,39 @@ func (s *Server) pruneAttachments() {
if s.attachment == nil {
return
}
// Only mark as deleted in DB. The actual storage files are cleaned up
// by the attachment store's sync() loop, which periodically reconciles
// storage with the database and removes orphaned files.
log.
Tag(tagManager).
Timing(func() {
ids, err := s.messageCache.AttachmentsExpired()
count, err := s.messageCache.MarkExpiredAttachmentsDeleted(s.config.ManagerBatchSize)
if err != nil {
log.Tag(tagManager).Err(err).Warn("Error retrieving expired attachments")
} else if len(ids) > 0 {
if log.Tag(tagManager).IsDebug() {
log.Tag(tagManager).Debug("Deleting attachments %s", strings.Join(ids, ", "))
}
if err := s.attachment.Remove(ids...); err != nil {
log.Tag(tagManager).Err(err).Warn("Error deleting attachments")
}
if err := s.messageCache.MarkAttachmentsDeleted(ids...); err != nil {
log.Tag(tagManager).Err(err).Warn("Error marking attachments deleted")
}
log.Tag(tagManager).Err(err).Warn("Error marking expired attachments as deleted")
} else if count > 0 {
log.Tag(tagManager).Debug("Marked %d expired attachment(s) as deleted", count)
} else {
log.Tag(tagManager).Debug("No expired attachments to delete")
}
}).
Debug("Deleted expired attachments")
Debug("Finished marking expired attachments as deleted")
}
func (s *Server) pruneMessages() {
// Only delete DB rows. Attachment storage files are cleaned up by the
// attachment store's sync() loop, which periodically reconciles storage
// with the database and removes orphaned files.
log.
Tag(tagManager).
Timing(func() {
expiredMessageIDs, err := s.messageCache.MessagesExpired()
count, err := s.messageCache.DeleteExpiredMessages(s.config.ManagerBatchSize)
if err != nil {
log.Tag(tagManager).Err(err).Warn("Error retrieving expired messages")
} else if len(expiredMessageIDs) > 0 {
if s.attachment != nil {
if err := s.attachment.Remove(expiredMessageIDs...); err != nil {
log.Tag(tagManager).Err(err).Warn("Error deleting attachments for expired messages")
}
}
if err := s.messageCache.DeleteMessages(expiredMessageIDs...); err != nil {
log.Tag(tagManager).Err(err).Warn("Error marking attachments deleted")
}
log.Tag(tagManager).Err(err).Warn("Error deleting expired messages")
} else if count > 0 {
log.Tag(tagManager).Debug("Deleted %d expired message(s)", count)
} else {
log.Tag(tagManager).Debug("No expired messages to delete")
}
}).
Debug("Pruned messages")
Debug("Finished deleting expired messages")
}

View File

@@ -14,6 +14,7 @@ import (
"heckel.io/ntfy/v2/util"
"io"
"net/netip"
"os"
"path/filepath"
"strings"
"sync"
@@ -543,9 +544,14 @@ func TestPayments_Webhook_Subscription_Updated_Downgrade_From_PastDue_To_Active(
require.Equal(t, 1, len(r)) // "ztopic" reservation was deleted
require.Equal(t, "atopic", r[0].Topic)
// Verify that messages and attachments were deleted
// Verify that messages and attachments were deleted. We backdate the
// attachment files so sync's grace period doesn't protect them.
past := time.Now().Add(-2 * time.Hour)
os.Chtimes(filepath.Join(s.config.AttachmentCacheDir, a2.ID), past, past)
os.Chtimes(filepath.Join(s.config.AttachmentCacheDir, z2.ID), past, past)
time.Sleep(time.Second)
s.execManager()
s.attachment.Sync() // File cleanup is done by sync, not by the manager
ms, err := s.messageCache.Messages("atopic", model.SinceAllMessages, false)
require.Nil(t, err)

View File

@@ -2299,9 +2299,12 @@ func TestServer_PublishAttachmentAndExpire(t *testing.T) {
require.Equal(t, 200, response.Code)
require.Equal(t, content, response.Body.String())
// Prune and makes sure it's gone
// Prune and makes sure it's gone. We backdate the file so sync's grace
// period doesn't protect it, then run the manager + sync explicitly.
require.Nil(t, os.Chtimes(file, time.Now().Add(-2*time.Hour), time.Now().Add(-2*time.Hour)))
waitFor(t, func() bool {
s.execManager() // May run many times
s.execManager()
s.attachment.Sync() // File cleanup is done by sync, not by the manager
return !util.FileExists(file)
})
response = request(t, s, "GET", path, "", nil)
@@ -2411,6 +2414,7 @@ func TestServer_PublishAttachmentWithTierBasedLimits(t *testing.T) {
require.Nil(t, s.userManager.AddTier(&user.Tier{
Code: "test",
MessageLimit: 100,
MessageExpiryDuration: time.Hour,
AttachmentFileSizeLimit: 50_000,
AttachmentTotalSizeLimit: 200_000,
AttachmentExpiryDuration: 30 * time.Second,
@@ -2698,7 +2702,7 @@ func TestServer_PublishWhileUpdatingStatsWithLotsOfMessages(t *testing.T) {
response := request(t, s, "PUT", "/mytopic", "some body", nil)
m := toMessage(t, response.Body.String())
require.Equal(t, "some body", m.Message)
require.True(t, time.Since(start) < 100*time.Millisecond)
require.True(t, time.Since(start) < 2*time.Second)
log.Info("Done: Publishing message; took %s", time.Since(start).Round(time.Millisecond))
// Wait for all Goroutines
@@ -4191,7 +4195,7 @@ func newTestConfigWithAuthFile(t *testing.T, databaseURL string) *Config {
func newTestServer(t *testing.T, config *Config) *Server {
server, err := New(config)
require.Nil(t, err)
t.Cleanup(server.closeDatabases)
t.Cleanup(server.Stop)
return server
}

87
web/package-lock.json generated
View File

@@ -2829,6 +2829,9 @@
"arm"
],
"dev": true,
"libc": [
"glibc"
],
"license": "MIT",
"optional": true,
"os": [
@@ -2843,6 +2846,9 @@
"arm"
],
"dev": true,
"libc": [
"musl"
],
"license": "MIT",
"optional": true,
"os": [
@@ -2857,6 +2863,9 @@
"arm64"
],
"dev": true,
"libc": [
"glibc"
],
"license": "MIT",
"optional": true,
"os": [
@@ -2871,6 +2880,9 @@
"arm64"
],
"dev": true,
"libc": [
"musl"
],
"license": "MIT",
"optional": true,
"os": [
@@ -2885,6 +2897,9 @@
"loong64"
],
"dev": true,
"libc": [
"glibc"
],
"license": "MIT",
"optional": true,
"os": [
@@ -2899,6 +2914,9 @@
"loong64"
],
"dev": true,
"libc": [
"musl"
],
"license": "MIT",
"optional": true,
"os": [
@@ -2913,6 +2931,9 @@
"ppc64"
],
"dev": true,
"libc": [
"glibc"
],
"license": "MIT",
"optional": true,
"os": [
@@ -2927,6 +2948,9 @@
"ppc64"
],
"dev": true,
"libc": [
"musl"
],
"license": "MIT",
"optional": true,
"os": [
@@ -2941,6 +2965,9 @@
"riscv64"
],
"dev": true,
"libc": [
"glibc"
],
"license": "MIT",
"optional": true,
"os": [
@@ -2955,6 +2982,9 @@
"riscv64"
],
"dev": true,
"libc": [
"musl"
],
"license": "MIT",
"optional": true,
"os": [
@@ -2969,6 +2999,9 @@
"s390x"
],
"dev": true,
"libc": [
"glibc"
],
"license": "MIT",
"optional": true,
"os": [
@@ -2983,6 +3016,9 @@
"x64"
],
"dev": true,
"libc": [
"glibc"
],
"license": "MIT",
"optional": true,
"os": [
@@ -2997,6 +3033,9 @@
"x64"
],
"dev": true,
"libc": [
"musl"
],
"license": "MIT",
"optional": true,
"os": [
@@ -3766,9 +3805,9 @@
}
},
"node_modules/caniuse-lite": {
"version": "1.0.30001780",
"resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001780.tgz",
"integrity": "sha512-llngX0E7nQci5BPJDqoZSbuZ5Bcs9F5db7EtgfwBerX9XGtkkiO4NwfDDIRzHTTwcYC8vC7bmeUEPGrKlR/TkQ==",
"version": "1.0.30001781",
"resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001781.tgz",
"integrity": "sha512-RdwNCyMsNBftLjW6w01z8bKEvT6e/5tpPVEgtn22TiLGlstHOVecsX2KHFkD5e/vRnIE4EGzpuIODb3mtswtkw==",
"dev": true,
"funding": [
{
@@ -4203,9 +4242,9 @@
}
},
"node_modules/electron-to-chromium": {
"version": "1.5.321",
"resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.321.tgz",
"integrity": "sha512-L2C7Q279W2D/J4PLZLk7sebOILDSWos7bMsMNN06rK482umHUrh/3lM8G7IlHFOYip2oAg5nha1rCMxr/rs6ZQ==",
"version": "1.5.325",
"resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.325.tgz",
"integrity": "sha512-PwfIw7WQSt3xX7yOf5OE/unLzsK9CaN2f/FvV3WjPR1Knoc1T9vePRVV4W1EM301JzzysK51K7FNKcusCr0zYA==",
"dev": true,
"license": "ISC"
},
@@ -7068,9 +7107,9 @@
"license": "ISC"
},
"node_modules/picomatch": {
"version": "4.0.3",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.4.tgz",
"integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==",
"dev": true,
"license": "MIT",
"engines": {
@@ -9202,9 +9241,9 @@
}
},
"node_modules/workbox-build/node_modules/brace-expansion": {
"version": "5.0.4",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-5.0.4.tgz",
"integrity": "sha512-h+DEnpVvxmfVefa4jFbCf5HdH5YMDXRsmKflpf1pILZWRFlTbJpxeU55nJl4Smt5HQaGzg1o6RHFPJaOqnmBDg==",
"version": "5.0.5",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-5.0.5.tgz",
"integrity": "sha512-VZznLgtwhn+Mact9tfiwx64fA9erHH/MCXEUfB/0bX/6Fz6ny5EGTXYltMocqg4xFAQZtnO3DHWWXi8RiuN7cQ==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -9270,9 +9309,9 @@
}
},
"node_modules/workbox-build/node_modules/picomatch": {
"version": "2.3.1",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
"integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
"version": "2.3.2",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.2.tgz",
"integrity": "sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA==",
"dev": true,
"license": "MIT",
"engines": {
@@ -9514,24 +9553,6 @@
"dev": true,
"license": "ISC"
},
"node_modules/yaml": {
"version": "2.8.3",
"resolved": "https://registry.npmjs.org/yaml/-/yaml-2.8.3.tgz",
"integrity": "sha512-AvbaCLOO2Otw/lW5bmh9d/WEdcDFdQp2Z2ZUH3pX9U2ihyUY0nvLv7J6TrWowklRGPYbB/IuIMfYgxaCPg5Bpg==",
"dev": true,
"license": "ISC",
"optional": true,
"peer": true,
"bin": {
"yaml": "bin.mjs"
},
"engines": {
"node": ">= 14.6"
},
"funding": {
"url": "https://github.com/sponsors/eemeli"
}
},
"node_modules/yocto-queue": {
"version": "0.1.0",
"resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz",