Merge remote-tracking branch 'upstream/develop' into bnakkoma

This commit is contained in:
itepechi 2025-11-28 02:12:29 +09:00
commit 11b4490908
Signed by: itepechi
SSH key fingerprint: SHA256:x1/tRiv2s9tYN2QCcybDZpt22AUtJHQ2DECLMA660xQ
263 changed files with 15901 additions and 5987 deletions

View file

@ -1,121 +0,0 @@
labels:
platform: linux/amd64
depends_on:
- test
variables:
- &scw-secrets
SCW_ACCESS_KEY:
from_secret: SCW_ACCESS_KEY
SCW_SECRET_KEY:
from_secret: SCW_SECRET_KEY
SCW_DEFAULT_ORGANIZATION_ID:
from_secret: SCW_DEFAULT_ORGANIZATION_ID
- &setup-hex "mix local.hex --force && mix local.rebar --force"
- &on-release
when:
event:
- push
- tag
branch:
- develop
- stable
- &on-stable
when:
event:
- push
- tag
branch:
- stable
- &on-pr-open
when:
event:
- pull_request
- &tag-build "export BUILD_TAG=$${CI_COMMIT_TAG:-\"$CI_COMMIT_BRANCH\"} && export PLEROMA_BUILD_BRANCH=$BUILD_TAG"
- &clean "(rm -rf release || true) && (rm -rf _build || true) && (rm -rf /root/.mix)"
- &mix-clean "mix deps.clean --all && mix clean"
steps:
# Canonical amd64
debian-bookworm:
image: hexpm/elixir:1.15.4-erlang-26.0.2-debian-bookworm-20230612
<<: *on-release
environment:
MIX_ENV: prod
DEBIAN_FRONTEND: noninteractive
commands:
- apt-get update && apt-get install -y cmake libmagic-dev rclone zip imagemagick libmagic-dev git build-essential g++ wget
- *clean
- echo "import Config" > config/prod.secret.exs
- *setup-hex
- *tag-build
- mix deps.get --only prod
- mix release --path release
- zip akkoma-amd64.zip -r release
release-debian-bookworm:
image: akkoma/releaser
<<: *on-release
environment: *scw-secrets
commands:
- export SOURCE=akkoma-amd64.zip
# AMD64
- export DEST=scaleway:akkoma-updates/$${CI_COMMIT_TAG:-"$CI_COMMIT_BRANCH"}/akkoma-amd64.zip
- /bin/sh /entrypoint.sh
# Ubuntu jammy (currently compatible)
- export DEST=scaleway:akkoma-updates/$${CI_COMMIT_TAG:-"$CI_COMMIT_BRANCH"}/akkoma-amd64-ubuntu-jammy.zip
- /bin/sh /entrypoint.sh
debian-bullseye:
image: hexpm/elixir:1.15.4-erlang-26.0.2-debian-bullseye-20230612
<<: *on-release
environment:
MIX_ENV: prod
DEBIAN_FRONTEND: noninteractive
commands:
- apt-get update && apt-get install -y cmake libmagic-dev rclone zip imagemagick libmagic-dev git build-essential g++ wget
- *clean
- echo "import Config" > config/prod.secret.exs
- *setup-hex
- *tag-build
- mix deps.get --only prod
- mix release --path release
- zip akkoma-amd64-debian-bullseye.zip -r release
release-debian-bullseye:
image: akkoma/releaser
<<: *on-release
environment: *scw-secrets
commands:
- export SOURCE=akkoma-amd64-debian-bullseye.zip
# AMD64
- export DEST=scaleway:akkoma-updates/$${CI_COMMIT_TAG:-"$CI_COMMIT_BRANCH"}/akkoma-amd64-debian-bullseye.zip
- /bin/sh /entrypoint.sh
# Canonical amd64-musl
musl:
image: hexpm/elixir:1.15.4-erlang-26.0.2-alpine-3.18.2
<<: *on-stable
environment:
MIX_ENV: prod
commands:
- apk add git gcc g++ musl-dev make cmake file-dev rclone wget zip imagemagick
- *clean
- *setup-hex
- *mix-clean
- *tag-build
- mix deps.get --only prod
- mix release --path release
- zip akkoma-amd64-musl.zip -r release
release-musl:
image: akkoma/releaser
<<: *on-stable
environment: *scw-secrets
commands:
- export SOURCE=akkoma-amd64-musl.zip
- export DEST=scaleway:akkoma-updates/$${CI_COMMIT_TAG:-"$CI_COMMIT_BRANCH"}/akkoma-amd64-musl.zip
- /bin/sh /entrypoint.sh

View file

@ -1,93 +0,0 @@
labels:
platform: linux/arm64
depends_on:
- test
variables:
- &scw-secrets
SCW_ACCESS_KEY:
from_secret: SCW_ACCESS_KEY
SCW_SECRET_KEY:
from_secret: SCW_SECRET_KEY
SCW_DEFAULT_ORGANIZATION_ID:
from_secret: SCW_DEFAULT_ORGANIZATION_ID
- &setup-hex "mix local.hex --force && mix local.rebar --force"
- &on-release
when:
event:
- push
- tag
branch:
- stable
- develop
- &on-stable
when:
event:
- push
- tag
branch:
- stable
- &on-pr-open
when:
event:
- pull_request
- &tag-build "export BUILD_TAG=$${CI_COMMIT_TAG:-\"$CI_COMMIT_BRANCH\"} && export PLEROMA_BUILD_BRANCH=$BUILD_TAG"
- &clean "(rm -rf release || true) && (rm -rf _build || true) && (rm -rf /root/.mix)"
- &mix-clean "mix deps.clean --all && mix clean"
steps:
# Canonical arm64
debian-bookworm:
image: hexpm/elixir:1.15.4-erlang-26.0.2-debian-bookworm-20230612
<<: *on-release
environment:
MIX_ENV: prod
DEBIAN_FRONTEND: noninteractive
commands:
- apt-get update && apt-get install -y cmake libmagic-dev rclone zip imagemagick libmagic-dev git build-essential g++ wget
- *clean
- echo "import Config" > config/prod.secret.exs
- *setup-hex
- *tag-build
- mix deps.get --only prod
- mix release --path release
- zip akkoma-arm64.zip -r release
release-debian-bookworm:
image: akkoma/releaser:arm64
<<: *on-release
environment: *scw-secrets
commands:
- export SOURCE=akkoma-arm64.zip
- export DEST=scaleway:akkoma-updates/$${CI_COMMIT_TAG:-"$CI_COMMIT_BRANCH"}/akkoma-arm64-ubuntu-jammy.zip
- /bin/sh /entrypoint.sh
- export DEST=scaleway:akkoma-updates/$${CI_COMMIT_TAG:-"$CI_COMMIT_BRANCH"}/akkoma-arm64.zip
- /bin/sh /entrypoint.sh
# Canonical arm64-musl
musl:
image: hexpm/elixir:1.15.4-erlang-26.0.2-alpine-3.18.2
<<: *on-stable
environment:
MIX_ENV: prod
commands:
- apk add git gcc g++ musl-dev make cmake file-dev rclone wget zip imagemagick
- *clean
- *setup-hex
- *mix-clean
- *tag-build
- mix deps.get --only prod
- mix release --path release
- zip akkoma-arm64-musl.zip -r release
release-musl:
image: akkoma/releaser:arm64
<<: *on-stable
environment: *scw-secrets
commands:
- export SOURCE=akkoma-arm64-musl.zip
- export DEST=scaleway:akkoma-updates/$${CI_COMMIT_TAG:-"$CI_COMMIT_BRANCH"}/akkoma-arm64-musl.zip
- /bin/sh /entrypoint.sh

View file

@ -1,50 +1,15 @@
labels:
platform: linux/amd64
depends_on:
- test
- build-amd64
variables:
- &setup-hex "mix local.hex --force && mix local.rebar --force"
- &on-release
when:
event:
- push
- tag
branch:
- develop
- stable
- refs/tags/v*
- refs/tags/stable-*
- &on-stable
when:
event:
- push
- tag
branch:
- stable
- refs/tags/stable-*
- &on-point-release
when:
event:
- push
branch:
- develop
- stable
- &on-pr-open
when:
event:
- pull_request
- &tag-build "export BUILD_TAG=$${CI_COMMIT_TAG:-\"$CI_COMMIT_BRANCH\"} && export PLEROMA_BUILD_BRANCH=$BUILD_TAG"
- &clean "(rm -rf release || true) && (rm -rf _build || true) && (rm -rf /root/.mix)"
- &mix-clean "mix deps.clean --all && mix clean"
when:
event:
- push
branch:
- develop
- stable
steps:
docs:
<<: *on-point-release
environment:
CI: "true"
SCW_ACCESS_KEY:

View file

@ -1,52 +0,0 @@
labels:
platform: linux/amd64
variables:
- &setup-hex "mix local.hex --force && mix local.rebar --force"
- &on-release
when:
event:
- push
- tag
branch:
- develop
- stable
- refs/tags/v*
- refs/tags/stable-*
- &on-stable
when:
event:
- push
- tag
branch:
- stable
- refs/tags/stable-*
- &on-point-release
when:
event:
- push
branch:
- develop
- stable
- &on-pr-open
when:
event:
- pull_request
- &tag-build "export BUILD_TAG=$${CI_COMMIT_TAG:-\"$CI_COMMIT_BRANCH\"} && export PLEROMA_BUILD_BRANCH=$BUILD_TAG"
- &clean "(rm -rf release || true) && (rm -rf _build || true) && (rm -rf /root/.mix)"
- &mix-clean "mix deps.clean --all && mix clean"
steps:
lint:
image: akkoma/ci-base:1.18-otp27
<<: *on-pr-open
environment:
MIX_ENV: test
commands:
- mix local.hex --force
- mix local.rebar --force
- mix deps.get
- mix compile
- mix format --check-formatted

104
.woodpecker/publish.yml Normal file
View file

@ -0,0 +1,104 @@
when:
event:
- push
- tag
branch:
- develop
- stable
evaluate: 'SKIP_DEVELOP != "YES" || CI_COMMIT_BRANCH != "develop"'
matrix:
include:
# Canonical amd64
- ARCH: amd64
SUFFIX:
IMG_VAR: debian-bookworm-20230612
UBUNTU_EXPORT: YES
# old debian variant of amd64
- ARCH: amd64
SUFFIX: -debian-bullseye
IMG_VAR: debian-bullseye-20230612
# Canonical amd64-musl
- ARCH: amd64
SUFFIX: -musl
IMG_VAR: alpine-3.18.2
SKIP_DEVELOP: YES
# Canonical arm64
- ARCH: arm64
SUFFIX:
RELEASER_TAG: :arm64
IMG_VAR: debian-bookworm-20230612
UBUNTU_EXPORT: YES
# Canonical arm64-musl
- ARCH: arm64
SUFFIX: -musl
RELEASER_TAG: :arm64
IMG_VAR: alpine-3.18.2
SKIP_DEVELOP: YES
labels:
platform: linux/${ARCH}
steps:
# Canonical amd64
build:
image: hexpm/elixir:1.15.4-erlang-26.0.2-${IMG_VAR}
environment:
MIX_ENV: prod
DEBIAN_FRONTEND: noninteractive
commands: |
# install deps
case "${IMG_VAR}" in
debian*)
apt-get update && apt-get install -y \
cmake libmagic-dev rclone zip git wget \
build-essential g++ imagemagick libmagic-dev
;;
alpine*)
apk add git gcc g++ musl-dev make cmake file-dev rclone wget zip imagemagick
;;
*)
echo "No package manager defined for ${BASE_IMG}!"
exit 1
esac
# clean leftovers
rm -rf release
rm -rf _build
rm -rf /root/.mix
# setup
echo "import Config" > config/prod.secret.exs
mix local.hex --force
mix local.rebar --force
export BUILD_TAG=$${CI_COMMIT_TAG:-\"$CI_COMMIT_BRANCH\"}
export PLEROMA_BUILD_BRANCH=$BUILD_TAG
# actually build and zip up
mix deps.get --only prod
mix release --path release
zip akkoma-${ARCH}${SUFFIX}.zip -r release
release:
image: akkoma/releaser${RELEASER_TAG}
environment:
SCW_ACCESS_KEY:
from_secret: SCW_ACCESS_KEY
SCW_SECRET_KEY:
from_secret: SCW_SECRET_KEY
SCW_DEFAULT_ORGANIZATION_ID:
from_secret: SCW_DEFAULT_ORGANIZATION_ID
commands: |
export SOURCE=akkoma-${ARCH}${SUFFIX}.zip
export DEST=scaleway:akkoma-updates/$${CI_COMMIT_TAG:-"$CI_COMMIT_BRANCH"}/$${SOURCE}
/bin/sh /entrypoint.sh
if [ "${UBUNTU_EXPORT}" = "YES" ] ; then
# Ubuntu jammy (currently compatible with our default debian builds)
export DEST=scaleway:akkoma-updates/$${CI_COMMIT_TAG:-"$CI_COMMIT_BRANCH"}/akkoma-${ARCH}-ubuntu-jammy.zip
/bin/sh /entrypoint.sh
fi

View file

@ -1,66 +1,24 @@
labels:
platform: linux/amd64
depends_on:
- lint
when:
- event: pull_request
matrix:
# test the lowest and highest versions
ELIXIR_VERSION:
- 1.14
- 1.18
OTP_VERSION:
- 25
- 27
include:
- ELIXIR_VERSION: 1.14
OTP_VERSION: 25
LINT: NO
PLATFORM: linux/amd64
- ELIXIR_VERSION: 1.18
OTP_VERSION: 27
LINT: YES
PLATFORM: linux/arm64
variables:
- &setup-hex "mix local.hex --force && mix local.rebar --force"
- &on-release
when:
event:
- push
- tag
branch:
- develop
- stable
- refs/tags/v*
- refs/tags/stable-*
- &on-stable
when:
event:
- push
- tag
branch:
- stable
- refs/tags/stable-*
- &on-point-release
when:
event:
- push
branch:
- develop
- stable
- &on-pr-open
when:
event:
- pull_request
- &tag-build "export BUILD_TAG=$${CI_COMMIT_TAG:-\"$CI_COMMIT_BRANCH\"} && export PLEROMA_BUILD_BRANCH=$BUILD_TAG"
- &clean "(rm -rf release || true) && (rm -rf _build || true) && (rm -rf /root/.mix)"
- &mix-clean "mix deps.clean --all && mix clean"
labels:
platform: ${PLATFORM}
services:
postgres:
image: postgres:15
when:
event:
- pull_request
environment:
POSTGRES_DB: pleroma_test_${ELIXIR_VERSION}_${OTP_VERSION}
POSTGRES_USER: postgres
@ -69,18 +27,20 @@ services:
steps:
test:
image: akkoma/ci-base:${ELIXIR_VERSION}-otp${OTP_VERSION}
<<: *on-pr-open
environment:
MIX_ENV: test
POSTGRES_DB: pleroma_test_${ELIXIR_VERSION}_${OTP_VERSION}
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
DB_HOST: postgres
LINT: ${LINT}
commands:
- sh -c 'uname -a && cat /etc/os-release || :'
- mix local.hex --force
- mix local.rebar --force
- mix deps.get
- mix compile
- test "${LINT}" = "NO" || mix format --check-formatted
- mix ecto.drop -f -q
- mix ecto.create
- mix ecto.migrate

View file

@ -5,13 +5,48 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## Unreleased
### REMOVED
### Added
- status responses include two new fields for ActivityPub cross-referencing: `akkoma.quote_apid` and `akkoma.in_reply_to_apid`
### Fixed
- replies and quotes to unresolvable posts now fill out IDs for replied to
status, user or quoted status with a 404-ing ID to make them recognisable as
replies/quotes instead of pretending theyre root posts
### Changed
## 2025.10
### REMOVED
- Dropped `accepts_chat_messages` column from users table in database;
it has been unused for almost 3 years
- Healthcheck responses no longer contain job queue data;
it was useless anyway due to lacking any temporal information about failures
and more complete data can be obtained from Prometheus metrics.
### Added
- We mark our MFM posts as FEP-c16b compliant, and retain remote HTML representations for incoming posts marked as FEP-c16b-compliant. (Safety scrubbers are still applied)
- Prometheus stats now exposes failed ActivityPub deliveries
which failed all attempts and the failure reason
- status and user HTML pages now provide ActivityPub alternate links
- the `prune_objects` mix task no longer deletes pinned posts by default
- added `--prune-pinned` and `--keep-followed {posts,full,none}` options to the `prune_objects` mix task
- timestamps of incoming HTTP signatures are now verified.
By default up to two hour old signatures and a maximal clock skew
of 40 min for future timestamps or explicit expiry deadlines are accepted
- Added `short_description` field to `api/v1/instance` for Mastodon compatibility; the corresponding
new setting `:pleroma, :instance, :short_description` is also preferred for nodeinfo use
- Note AP objects now expose full `replies` collections and those collections can be accessed on their own;
previously only self-replies were inlined as an anonymous collection into the Note object
- Added a reference Grafana dashboard and improved documentation for Prometheus metrics
- New mix task `clean_inlined_replies` to delete some unused data from objects
- New mix task `resync_inlined_caches` to retroactively fix various issues with e.g. boosts, emoji reacts and likes
- It is now possible to allow outgoing requests to use HTTP2 via config option,
but due to bugs in the relevant backend this is not the default nor recommended.
- Prometheus metrics now expose count of scheduled and pending jobs per queue
### Fixed
- Internal actors no longer pretend to have unresolvable follow(er|ing) collections
@ -21,13 +56,43 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
this lead e.g. to unlisted replies from Pleroma instances being partially treated as private posts
- fixed our fetch actor advertising bogus follower and following collection ActivityPub IDs
- fix network-path references not being handled by media proxy
- federation with bridgy now works
- remote signing keys are no longer refreshed multiple times per incoming request
- fix digest emails never being sent and clogging the job queue even if not enabled
- `api/v1/instance`s `uri` field now correctly shows the bare WebFinger domain
- fixed bug leading to `content` and the `contentMap` entry of the primary language to sometimes diverge
- reloading emoji with a broken `pack.json` file being on disk no longer crashes the whole server
- fixed blocked servers being able to access local objects when authorized fetch isnt enabled
even when the remote server identifies itselfs
- fixed handling of inlined "featured" collections
- fixed user endpoint serving invalid ActivityPub for minimal, authfetch-fallback responses
- remote emoji reacts from IceShrimp.NET instances are now handled consistently and always merged with identical other emoji reactions
- ActivityPub requests signatures are now renewed when following redirects making sure path and host actually match the final URL
- private replies no longer increase the publicly visible reply counter
- unblock activities are no longer federated when block federation is disabled (the default)
- fix like activity database IDs rendering as misattributed posts
### Changed
- Internal and relay actors are now again represented with type "Application"
- `cleanup_attachments` is now enabled by default
- shared inboxes are now generally preferred over personal inboxes, cutting down on duplicate publishing churn
- instance actors are now really of type `Service`
- ActivityPub delivery attempts are spaced out more giving up after 3h instead of ~20min before
- ActivityPub delivery attempts are spaced out more and increased by one
now giving up after 24h instead of ~20min by default before
- inboxes now fake a succcess reply on incoming Delete documents whose signing key is unknown but gone;
this prevents older Mastodon from repeatedly trying to deliver Deletes of actors we never knew anyway
- The config option `config :pleroma, :http, :pool_max_idle_time` was removed; it never actually
did anything and was redundant with `config :pleroma, :http, :pool_timeout` which actually works.
- repeated attempt to process incoming ActivityPub objects are spaced out more, allowing unreachable remotes
more time to come back up when e.g. processing repeats of a post not yet locally known
- `/api/v1/statuses/:id/reblog` now honours all possible visibilities except `list` and `conversation`
instead of mapping them down to a boolean private/public
- we no longer repeatedly try to deliver to explicitly deleted inboxes
- Config option `Pleroma.Web.MediaProxy.Invalidation.Http, :options` and
the `:http` subkey of `:media_proxy, :proxy_opts` now only accept
adapter-related settings inside the `:adapter` subkey, no longer on the top-level
- follow requests are now ordered reverse chronologically
## 2025.03

View file

@ -34,7 +34,8 @@ Depending on instance configuration the same may be true for GET requests.
### FEP-c16b: Formatting MFM functions
The optional extension term `htmlMfm` is currently not used.
We set the optional extension term `htmlMfm: true` when using content type "text/x.misskeymarkdown".
Incoming messages containing `htmlMfm: true` will not have their content re-parsed.
## Nodeinfo

View file

@ -53,7 +53,7 @@ defp fetch_timelines(user) do
fetch_public_timeline(user, :local)
fetch_public_timeline(user, :tag)
fetch_notifications(user)
fetch_favourites(user)
fetch_favourited_with_fav_id(user)
fetch_long_thread(user)
fetch_timelines_with_reply_filtering(user)
end
@ -378,21 +378,21 @@ defp fetch_notifications(user) do
end
defp fetch_favourites(user) do
first_page_last = ActivityPub.fetch_favourites(user) |> List.last()
first_page_last = ActivityPub.fetch_favourited_with_fav_id(user) |> List.last()
second_page_last =
ActivityPub.fetch_favourites(user, %{:max_id => first_page_last.id}) |> List.last()
ActivityPub.fetch_favourited_with_fav_id(user, %{:max_id => first_page_last.id}) |> List.last()
third_page_last =
ActivityPub.fetch_favourites(user, %{:max_id => second_page_last.id}) |> List.last()
ActivityPub.fetch_favourited_with_fav_id(user, %{:max_id => second_page_last.id}) |> List.last()
forth_page_last =
ActivityPub.fetch_favourites(user, %{:max_id => third_page_last.id}) |> List.last()
ActivityPub.fetch_favourited_with_fav_id(user, %{:max_id => third_page_last.id}) |> List.last()
Benchee.run(
%{
"Favourites" => fn opts ->
ActivityPub.fetch_favourites(user, opts)
ActivityPub.fetch_favourited_with_fav_id(user, opts)
end
},
inputs: %{
@ -465,7 +465,8 @@ defp render_timelines(user) do
notifications = MastodonAPI.get_notifications(user, opts)
favourites = ActivityPub.fetch_favourites(user)
favourites_keyed = ActivityPub.fetch_favourited_with_fav_id(user)
favourites = Pagiation.unwrap(favourites_keyed)
Benchee.run(
%{

View file

@ -80,8 +80,6 @@
IO.puts("RUM enabled: #{rum_enabled}")
IO.puts("PGroonga enabled: #{pgroonga_enabled}")
config :pleroma, Pleroma.ReverseProxy.Client, Pleroma.ReverseProxy.ClientMock
if File.exists?("./config/benchmark.secret.exs") do
import_config "benchmark.secret.exs"
else

View file

@ -51,6 +51,11 @@
queue_target: 20_000,
migration_lock: nil
# password hash strength
config :argon2_elixir,
t_cost: 8,
parallelism: 2
config :pleroma, Pleroma.Captcha,
enabled: true,
seconds_valid: 300,
@ -182,20 +187,22 @@
# Configures http settings, upstream proxy etc.
config :pleroma, :http,
pool_timeout: :timer.seconds(5),
pool_timeout: :timer.seconds(60),
receive_timeout: :timer.seconds(15),
proxy_url: nil,
protocols: [:http1],
user_agent: :default,
pool_size: 10,
adapter: [],
# see: https://hexdocs.pm/finch/Finch.html#start_link/1
pool_max_idle_time: :timer.seconds(30)
adapter: []
config :pleroma, :instance,
name: "Akkoma",
email: "example@example.com",
notify_email: "noreply@example.com",
# allowed to use HTML (if short_description is set)
description: "Akkoma: The cooler fediverse server",
# only plain text (defaults to description)
short_description: nil,
background_image: "/images/city.jpg",
instance_thumbnail: "/instance/thumbnail.jpeg",
limit: 5_000,
@ -599,6 +606,7 @@
remote_fetcher: 2,
attachments_cleanup: 1,
new_users_digest: 1,
digest_emails: 1,
mute_expire: 5,
search_indexing: 10,
nodeinfo_fetcher: 1,
@ -619,7 +627,7 @@
config :pleroma, :workers,
retries: [
federator_incoming: 5,
federator_outgoing: 5,
federator_outgoing: 6,
search_indexing: 2,
rich_media_backfill: 1
],

View file

@ -298,7 +298,7 @@
key: :ssl,
label: "Use SSL",
type: :boolean,
description: "Use Implicit SSL/TLS. e.g. port 465"
description: "Use Implicit SSL/TLS. e.g. port 465; default: true"
},
%{
group: {:subgroup, Swoosh.Adapters.SMTP},
@ -566,7 +566,16 @@
key: :description,
type: :string,
description:
"The instance's description. It can be seen in nodeinfo and `/api/v1/instance`",
"The instance's description. It may use HTML and can be seen in `/api/v1/instance` and nodeifno if no short description is set",
suggestions: [
"Very cool instance"
]
},
%{
key: :short_description,
type: :string,
description:
"A brief instance description. It must be plain text and can be seen in `/api/v1/instance` and nodeinfo",
suggestions: [
"Very cool instance"
]
@ -3241,8 +3250,7 @@
suggestions: [
Pleroma.Web.Preload.Providers.Instance,
Pleroma.Web.Preload.Providers.User,
Pleroma.Web.Preload.Providers.Timelines,
Pleroma.Web.Preload.Providers.StatusNet
Pleroma.Web.Preload.Providers.Timelines
]
}
]

View file

@ -135,9 +135,7 @@
config :pleroma, Pleroma.Web.WebFinger, update_nickname_on_user_fetch: false
config :pleroma, :side_effects,
ap_streamer: Pleroma.Web.ActivityPub.ActivityPubMock,
logger: Pleroma.LoggerMock
config :pleroma, :side_effects, ap_streamer: Pleroma.Web.ActivityPub.ActivityPubMock
config :pleroma, Pleroma.Search, module: Pleroma.Search.DatabaseSearch

View file

@ -26,13 +26,19 @@ Replaces embedded objects with references to them in the `objects` table. Only n
## Prune old remote posts from the database
This will prune remote posts older than 90 days (configurable with [`config :pleroma, :instance, remote_post_retention_days`](../../configuration/cheatsheet.md#instance)) from the database. Pruned posts may be refetched in some cases.
This will selectively prune remote posts older than 90 days (configurable with [`config :pleroma, :instance, remote_post_retention_days`](../../configuration/cheatsheet.md#instance)) from the database. Pruned posts may be refetched in some cases.
!!! note
The disk space will only be reclaimed after a proper vacuum. By default, Postgresql does this for you on a regular basis, but if your instance has been running for a long time and there are many rows deleted, it may be advantageous to use `VACUUM FULL` (e.g. by using the `--vacuum` option).
The disk space used up by deleted rows only becomes usable for new data after a vaccum.
By default, Postgresql does this for you on a regular basis, but if you delete a lot at once
it might be advantageous to also manually kick off a vacuum and statistics update using `VACUUM ANALYZE`.
**However**, the freed up space is never returned to the operating system unless you run
the much more heavy `VACUUM FULL` operation. This epensive but comprehensive vacuum mode
can be schedlued using the `--vacuum` option.
!!! danger
You may run out of disk space during the execution of the task or vacuuming if you don't have about 1/3rds of the database size free. Vacuum causes a substantial increase in I/O traffic, and may lead to a degraded experience while it is running.
You may run out of disk space during the execution of the task or full vacuuming if you don't have about 1/3rds of the database size free. `VACUUM FULL` causes a substantial increase in I/O traffic, needs full table locks and thus renders the instance basically unusable while its running.
=== "OTP"
@ -48,15 +54,41 @@ This will prune remote posts older than 90 days (configurable with [`config :ple
### Options
The recommended starting point and configuration for small and medium-sized instances is:
```sh
prune_objects --keep-followed posts --keep-threads --keep-non-public
# followed by
prune_orphaned_activities --no-singles
prune_orphaned_activities --no-arrays
# and finally, using psql to manually run:
# VACUUM ANALYZE;
# REINDEX TABLE objects;
# REINDEX TABLE activities;
```
This almost certainly wont delete stuff your interested in and
makes sure the database is immediately utilising the newly freed up space.
If you need more aggressive database size reductions or if this proves too costly to run for you
you can drop restrictions and/or use the `--limit` option.
In the opposite case if everything goes through quickly,
you can combine the three CLI tasks into one for future runs using `--prune-orphaned-activities`
and perhaps even using a full vacuum (which implies a reindex) using `--vacuum` too.
Full details below:
- `--no-fix-replies-count` - Skip recalculating replies count for posts.
When using multiple batches of prunes with `--limit`, all but the last batch
should set this option to avoid unnecessary overhead.
- `--keep-followed <mode>` - If set to `posts` all posts and boosts of users with local follows will be kept.
If set to `full` it will additionally keep any posts such users interacted with; this requires `--keep-threads`.
By default this is set to `none` and followed users are not treated special.
- `--keep-threads` - Don't prune posts when they are part of a thread where at least one post has seen local interaction (e.g. one of the posts is a local post, or is favourited by a local user, or has been repeated by a local user...). It also wont delete posts when at least one of the posts in the thread has seen recent activity or is kept due to `--keep-followed`.
- `--keep-non-public` - Keep non-public posts like DM's and followers-only, even if they are remote.
- `--limit` - limits how many remote posts get pruned. This limit does **not** apply to any of the follow up jobs. If wanting to keep the database load in check it is thus advisable to run the standalone `prune_orphaned_activities` task with a limit afterwards instead of passing `--prune-orphaned-activities` to this task.
See documentation of other options for futher hints.
- `--prune-orphaned-activities` - Also prune orphaned activities afterwards. Activities are things like Like, Create, Announce, Flag (aka reports)... They can significantly help reduce the database size.
- `--prune-pinned` - Also prune pinned posts; keeping pinned posts does not suffice to protect their threads from pruning, even when using `--keep-threads`.
Note, if using this option and pinned posts are pruned, they and their threads will just be refetched on the next user update. Therefore it usually doesn't bring much gain while incurring a heavy fetch load after pruning.
Note, if using this option and pinned posts are pruned, they and their threads will just be refetched on the next user update. Therefore it usually doesn't bring much gain while incurring a heavy fetch load after pruning.
One exception to this is if you already need to use a relatively small `--limit` to keep downtime mangeable or even being able to run it without downtime. Retaining pinned posts adds a mostly constant overhead which will impact repeated runs with small limit much more than one full prune run.
- `--vacuum` - Run `VACUUM FULL` after the objects are pruned. This should not be used on a regular basis, but is useful if your instance has been running for a long time before pruning.
## Prune orphaned activities from the database
@ -223,3 +255,48 @@ to the current day.
```sh
mix pleroma.database prune_task
```
## Clean inlined replies lists
Those inlined arrays of replies AP IDs are not used (anymore).
Delete them to free up a little bit of space.
=== "OTP"
```sh
./bin/pleroma_ctl database clean_inlined_replies
```
=== "From Source"
```sh
mix pleroma.database clean_inlined_replies
```
## Resync data inlined into posts
For legacy and performance reasons some data, e.g. relating to likes and boosts,
is currently copied and inline references post objects. Occasionally this data
may desync from the actual authorative activity and object data stored in the
database which may lead to cosmetic but also functional issues.
For example a particular user may appear unable to like a post.
Run this task to detect and fix such desyncs.
=== "OTP"
```sh
./bin/pleroma_ctl database resync_inlined_caches
```
=== "From Source"
```sh
mix pleroma.database resync_inlined_caches
```
### Options
- `--no-announcements` - Skip fixing announcement counters and lists
- `--no-likes` - Skip fixing like counters and lists
- `--no-reactions` - Skip fixing inlined emoji reaction data
- `--no-replies-count` - Skip fixing replies counters (purely cosmetic)

Binary file not shown.

After

Width:  |  Height:  |  Size: 165 KiB

View file

@ -1,45 +1,275 @@
# Monitoring Akkoma
If you run akkoma, you may be inclined to collect metrics to ensure your instance is running smoothly,
and that there's nothing quietly failing in the background.
If you run Akkoma its a good idea to collect metrics to ensure your instance is running smoothly
without anything silently failing and to aid troubleshooting if something actually goes wrong.
To facilitate this, akkoma exposes a dashboard and prometheus metrics to be scraped.
To facilitate this, Akkoma exposes Prometheus metrics to be scraped for long-term 24/7 monitoring
as well as as two built-in dashboards with ephemeral info about just the current status.
Setting up Prometheus scraping is highly recommended.
## Prometheus
See: [export\_prometheus\_metrics](../../configuration/cheatsheet#instance)
This method gives a more or less complete overview and allows for 24/7 long-term monitoring.
To scrape prometheus metrics, we need an oauth2 token with the `admin:metrics` scope.
Prometheus metric export can be globally disabled if you really want to,
but it doesnt cause much overhead and is enabled by default: see the
[export\_prometheus\_metrics](../../configuration/cheatsheet#instance) config option.
Consider using [constanze](https://akkoma.dev/AkkomaGang/constanze) to make this easier -
Akkoma only exposes the current state of all metrics; to make it actually useful
an external scraper needs to regularly fetch and store those values.
An overview of the necessary steps follows.
### Step 1: generate a token
Accessing prometheus metrics, requires an OAuth2 token with the `admin:metrics` (sub)scope.
An access token with only this subscope will be unable to do anything at all _except_ looking at the exported metrics.
Assuming your account has access to the `admin` scope category,
a suitable metrics-only token can be conveniently generated using
[constanze](https://akkoma.dev/AkkomaGang/constanze).
If you didnt already do so before, set up `constanze` by running `constanze configure`.
Now getting the token is as simple as running the below command and following its instructions:
```bash
constanze token --client-app --scopes "admin:metrics" --client-name "Prometheus"
```
or see `scripts/create_metrics_app.sh` in the source tree for the process to get this token.
Alternatively you may manually call into the token and app APIs;
check `scripts/create_metrics_app.sh` in the source tree for the process for this.
Once you have your token of the form `Bearer $ACCESS_TOKEN`, you can use that in your prometheus config:
The resulting token will have the form `Bearer $ACCESS_TOKEN`;
in the following replace occurrences of `$ACCESS_TOKEN` with the actual token string everywhere.
If you wish, you can now check the token works by manually using it to query the current metrics with `curl`:
```yaml
- job_name: akkoma
scheme: https
authorization:
credentials: $ACCESS_TOKEN # this should have the bearer prefix removed
metrics_path: /api/v1/akkoma/metrics
static_configs:
- targets:
- example.com
!!! note
After restarting the instance it may take a couple minutes for content to show up in the metric endpoint
```sh
curl -i -H 'Authorization: Bearer $ACCESS_TOKEN' https://myinstance.example/api/v1/akkoma/metrics | head -n 100
```
## Dashboard
### Step 2: set up a scraper
You may use the eponymous [Prometheus](https://prometheus.io/)
or anything compatible with it like e.g. [VictoriaMetrics](https://victoriametrics.com/).
The latter claims better performance and storage efficiency.
Both of them can usually be easily installed via distro-packages or docker.
Depending on your distro or installation method the preferred way to change the CLI arguments and the location of config files may differ; consult the documentation of your chosen method to find out.
Of special interest is the location of the prometheus scraping config file
and perhaps the maximal data retention period setting,
to manage used disk space and make sure you keep records long enough for your purposes.
It might also be a good idea to set up a minimal buffer of free disk space if youre tight on that;
with VictoriaMetrics this can be done via the `-storage.minFreeDiskSpaceBytes 1GiB` CLI flag.
Ideally the scraper runs on a different machine than Akkoma to be able to
distinguish Akkoma downtime from scraper downtime, but this is not strictly necessary.
Once youve installed one of them, its time to add a job for scraping Akkoma.
For Prometheus the `scrape_configs` section will usually be added to the main config file,
for VictoriaMetrics this will be in the file passed via `-promscrape.config file_path`.
In either case a `scrape_configs` with just one job for a single Akkoma instance will look like this:
```yaml
scrape_configs:
- job_name: 'akkoma_scrape_job'
scheme: https
metrics_path: /api/v1/akkoma/metrics
static_configs:
- targets: ['myinstance.example']
# reminder: no Bearer prefix here!
bearer_token: '$ACCESS_TOKEN'
# One minute should be frequent enough, but you can choose any value, or rely on the global default.
# However, for later use in Grafana you want to match this exactly, thus make note.
scrape_interval: '1m'
```
Now (re)start the scraper service, wait for a multiple of the scrape interval and check logs
to make sure no errors occur.
### Step 3: visualise the collected data
At last its time to actually get a look at the collected data.
There are many options working with Prometheus-compatible backends
and even software which can act as both the scraper _and_ visualiser in one service.
Here well just deal with Grafana, since we ship a reference Grafana dashboard you can just import.
There are again multiple options for [installing Grafana](https://grafana.com/docs/grafana/latest/setup-grafana/)
and detailing all of them is out of scope here, but its nothing too complicated if you already set up Akkoma.
Once youve got it running and are logged into Grafana,
you first need to tell it about the scraper which acts a the “data source”.
For this go to the “Connections” category and select “Data Sources”.
Here click the shiny button for adding a new data source,
select the “Prometheus” type and fill in the details
matching how you set up the scraper itself.
In particular, **use the same `Scrape Interval` value!**
Now youre ready to go to the “Dashboards” page.
Click the “New” button, select “Import” and upload or copy the contents of
the reference dashboard `installation/grafana_dashboard.json` from Akkomas source tree.
It will now ask you to select the data source you just configured,
as well as for the name of the job in your scraper config
and your instance domain+port identifier.
For the example settings from step 2 above
the latter two are `akkoma_scrape_job` and `myinstance.example:443`.
*(`443` is the default port for HTTPS)*
Thats it, youve got a fancy dashboard with long-term, 24/7 metrics now!
Updating the dashboard can be done by just repeating the import process.
Heres an example taken from a healthy, small instance where
nobody was logged in except for a few minutes
resulting in an (expected) spike in incoming requests:
![Full view of the reference dashboard as it looked at the time of writing](img/grafana_dashboard.webp)
!!! note
By default the dashboard does not count downtime of the data source, e.g. the scraper,
towards instance downtime, but a commented out alternative query is provided in the
panel edit menu. If you host the scraper on the same machine as Akkoma you likely want to swap this out.
### Remarks on interpreting the data
Whats kind of load and even error ratio is normal or irregular can depend on
the instance size, chosen configuration and federation partners.
*(E.g. when following relays, much more activities will be received and the received activities will in turn kick off more internal work and also external fetches raising the overall load)*
Here the 24/7 nature of the metric history helps out, since we can just
look at "known-good" time spans to get a feeling for whats normal and good.
If issues without an immediately clear origin crop up,
we can look for deviations from this known-good pattern.
Still there are some things to be aware of and some common guidelines.
#### Panel-specific time ranges
A select few panels, are set to use a custom time range
independent from what you chose for the dashboard as a whole.
This is indicated with blue text next to the panel title.
Those custom times only take precedence over _relative_ global time ranges.
If you choose fixed start and end dates in the global setting
*(for example to look at a long-term trend after a specific change)*
this will take precedence over custom panel times and everything follows the date range.
In the image above e.g. the uptime percent gauge thus considers the entire last week
while most other panels only display data for the last 6 hours.
#### Long-term trends
The lower section of the dashboard with 24h and 48h averages is particularly useful for observing long-term trends.
E.g. how a patch, version upgrade or database `VACCUUM`/`REINDEX` affects performance.
For small time ranges you can still look at them to make sure the values are at a reasonable level,
but the upper part is probably more interesting.
#### Processing times total
The actions counted by various “time per second” or “time per minute” stats are partially overlapping.
E.g. the time to conclude a HTTP response includes the time it took to run whatever
query was needed to fetch the necessary information from the database.
However not all database queries originate from HTTP requests.
But also, not all of the recorded time might have actually consumed CPU cycles.
Some jobs, e.g. `RemoteFetcherWorker`, will need to fetch data over the network
and often most of the time from job start to completion is just spent waiting
for a reply from the remote server to arrive.
Even a few HTTP endpoints will need to fetch remote data before completing;
e.g. `/inbox` needs to verify the signature of the submission, but if the signing key
wasnt encountered before it first needs to be fetched.
Getting deliveries from such unknown users happens more often than you might initially assume
due to e.g. Mastodon federating actor deletions to _every server it knows about_
regardless of whether there was ever any contact with the deleted user.
*(Meaning in the end the key lookup will just result in a `410 Gone` response and us dropping the superfluous `Delete`)*
Thus if you just add up all timing stats youll count some actions multiple times
and may end up consistently with more processing time being done than time elapsing on the wall clock
even though your server is neither overloaded nor subject to relative time dilation.
For keeping track of CPU and elixir-side(!) IO bottlenecks,
the corresponding BEAM VM gauges are much better indicators.
They should be zero most of the time and never exceed zero by much.
!!! note
The BEAM VM (running our elixir code) cannot know about
the databases IO activity or CPU cycle consumption,
thus this gauge is no indicator for database bottlenecks.
#### Job failures and federation
Most jobs are automatically retried and may fail (“exception”) due to no fault of your own instance
e.g. network issues or a remote server temporarily being overloaded.
Thus seeing some failures here is normal and nothing to be concerned about;
usually it will just resolve itself on the next retry.
Consistent and/or a relatively high success-to-failure ratio though
is worth looking into using logs.
Of particular importance are Publisher jobs;
they handle delivering your local content to its intended remote recipients.
Again some PublisherWorker exceptions are no cause for concern,
but if all retries for a delivery fail, this means a remote recipient never
received something they shouldve seen.
Due to its particular importance, such final delivery failures are
recorded again in a separate metric.
The reference dashboard shows it in the “AP delivery failures” panel.
Everything listed there exhausted all retries without success.
Ideally this will always be empty and for small instances this should be the
case most of the time.
However, whenever a remote instance which once interacted with
your local instance in the past is decommissioned, delivery failures will likely
eventually show up in your metrics. For example:
- a local user might be followed by an user from the dead instance
- a local posts was in the past fetched by the dead instance and this post is now deleted;
Akkoma will attempt to deliver the `Delete` to the dead instance even if theres no follow relationship
Delivery failures for such dead instances will typically list a reason like
`http_410`, `http_502`, `http_520`-`http_530` (cloudflared instances), `econnrefused`, `nxdomain` or just `timeout`.
If all deliveries to a given remote instance consistently fail for a longer time,
Akkoma will mark it as permanently unreachable and stop even attempting to deliver
to it meaning the errors should go away after a while.
*(If Akkoma sees activity from the presumed dead instance again it will resume deliveries for future content, but anything in the past will remain lost)*
Large instances with many users are more likely to have (had) some relationship to
such a recently decommissioned instances and thus might see failures here more often
even if nothing is wrong with the local Akkoma instance.
If this makes too much noise, consider filtering out telltale delivery failures.
On the opposite side of things, a `http_401` error for example is always worth looking into!
## Built-in Dashboard
Administrators can access a live dashboard under `/phoenix/live_dashboard`
giving an overview of uptime, software versions, database stats and more.
The dashboard also includes a variation of the prometheus metrics, however
they do not exactly match due to respective limitations of the dashboard
and the prometheus exporter.
Even more important, the dashboard collects metrics locally in the browser
only while the page is open and cannot give a view on their past history.
For proper monitoring it is recommended to set up prometheus.
This dashboard can also show a limited subset of Prometheus metrics,
however besides being limited it only starts collecting data when opening
the corresponding page in the browser and the history only exists in ephemeral browser memory.
When navigating away from the page, all history is gone.
However, this is not this dashboards main purpose anyway.
The usefulness of this built-in dashboard are the insights into the current state of
the BEAM VM running Akkomas code and statistics about the database and its performance
as well as database diagnostics.
BEAM VM stats include detailed memory consumption breakdowns
and a full list of running processes for example.
## Oban Web
This too requires administrator rights to access and can be found under `/akkoma/oban` if enabled.
The exposed aggregate info is mostly redundant with job statistics already tracked in Prometheus,
but it additionally also:
- shows full argument and meta details for each job
- allows interactively deleting or manually retrying jobs
*(keep this in mind when granting people administrator rights!)*
However, there are two caveats:
1. Just as with the other built-in dashboard, data is not kept around
(although here a **short** backlog actually exists);
when you notice an issue during use and go here to check it likely is already too late.
Job details and history only exists while the jobs are still in the database;
by default failed and succeeded jobs will disappear after about a minute.
2. This dashboard comes with some seemingly constant-ish overhead.
For large instances this appears to be negligible, but small instances on weaker hardware might suffer.
Thus this dashboard can be disabled in the [config](../cheatsheet.md#oban-web).

View file

@ -41,12 +41,24 @@ This is a list of clients that are known to work with Akkoma.
- Platforms: Android
- Features: MastoAPI, No Streaming, Emoji Reactions, Text Formatting, FE Stickers
### Pachli
- Homepage: <https://pachli.app/>
- Source Code: <https://github.com/pachli/pachli-android>
- Contact: [@pachli@mastodon.social](https://mastodon.social/users/pachli)
- Platforms: Android
- Features: MastoAPI, No Streaming
### Tusky
!!! warning
Versions after v30.0 do not support Akkoma-compatible filters.
Consider using another client if you use any filters.
- Homepage: <https://tuskyapp.github.io/>
- Source Code: <https://github.com/tuskyapp/Tusky>
- Contact: [@ConnyDuck@mastodon.social](https://mastodon.social/users/ConnyDuck)
- Platforms: Android
- Features: MastoAPI, No Streaming
- Features: MastoAPI, No Streaming, **No Filters (beyond v30.0)**
### Subway Tooter
- Source Code: <https://github.com/tateisu/SubwayTooter/>

View file

@ -12,7 +12,12 @@ To add configuration to your config file, you can copy it from the base config.
* `name`: The instances name.
* `email`: Email used to reach an Administrator/Moderator of the instance.
* `notify_email`: Email used for notifications.
* `description`: The instances description, can be seen in nodeinfo and `/api/v1/instance`.
* `short_description`: A brief, plain-text-only instance description.
Can be seen in nodeinfo and `/api/v1/instance`.
Defaults to `description` if unset
* `description`: The instances more detailed description.
This is allowed to use HTML if `short_description` is set.
Can be seen in `api/v1/instance`.
* `limit`: Posts character limit (CW/Subject included in the counter).
* `description_limit`: The character limit for image descriptions.
* `remote_limit`: Hard character limit beyond which remote posts will be dropped.
@ -569,6 +574,8 @@ Available caches:
* `receive_timeout`: the amount of time, in ms, to wait for a remote server to respond to a request. (default: `15000`)
* `pool_timeout`: the amount of time, in ms, to wait to check out an HTTP connection from the pool. This likely does not need changing unless your instance is _very_ busy with outbound requests. (default `5000`)
* `protocols`: array of acceptable protocols for outgoing requests; by default both HTTP1 and HTTP2 are supported.
Due to Finch limitations multiplexing cam only be used when this is set to exclusively HTTP2, but this will break federation with HTTP1-only instances.
* `proxy_url`: an upstream proxy to fetch posts and/or media with, (default: `nil`); for example `http://127.0.0.1:3192`. Does not support SOCKS5 proxy, only http(s).
* `send_user_agent`: should we include a user agent with HTTP requests? (default: `true`)
* `user_agent`: what user agent should we use? (default: `:default`), must be string or `:default`
@ -1186,7 +1193,7 @@ Each job has these settings:
* `:max_running` - max concurrently running jobs
* `:max_waiting` - max waiting jobs
### Translation Settings
## Translation Settings
Settings to automatically translate statuses for end users. Currently supported
translation services are DeepL and LibreTranslate. The supported command line tool is [Argos Translate](https://github.com/argosopentech/argos-translate).
@ -1216,3 +1223,12 @@ Translations are available at `/api/v1/statuses/:id/translations/:language`, whe
- `:command_argos_translate` - command for `argos-translate`. Can be the command if it's in your PATH, or the full path to the file (default: `argos-translate`).
- `:command_argospm` - command for `argospm`. Can be the command if it's in your PATH, or the full path to the file (default: `argospm`).
- `:strip_html` - Strip html from the post before translating it (default: `true`).
## Oban Web
The built-in Oban Web dashboard grants all administrators access to look at and modify the instances job queue.
To enable or disable it the following setting can be set to `true` or `false` respectively:
```
config :oban_met, autostart: false
```

View file

@ -7,73 +7,73 @@ The configuration of Akkoma (and Pleroma) has traditionally been managed with a
1. Run the mix task to migrate to the database.
**Source:**
**Source:**
```
$ mix pleroma.config migrate_to_db
```
```
$ mix pleroma.config migrate_to_db
```
or
or
**OTP:**
**OTP:**
*Note: OTP users need Akkoma to be running for `pleroma_ctl` commands to work*
*Note: OTP users need Akkoma to be running for `pleroma_ctl` commands to work*
```
$ ./bin/pleroma_ctl config migrate_to_db
```
```
$ ./bin/pleroma_ctl config migrate_to_db
```
```
Migrating settings from file: /home/pleroma/config/dev.secret.exs
```
Migrating settings from file: /home/pleroma/config/dev.secret.exs
Settings for key instance migrated.
Settings for group :pleroma migrated.
```
Settings for key instance migrated.
Settings for group :pleroma migrated.
```
2. It is recommended to backup your config file now.
```
cp config/dev.secret.exs config/dev.secret.exs.orig
```
```
cp config/dev.secret.exs config/dev.secret.exs.orig
```
3. Edit your Akkoma config to enable database configuration:
```
config :pleroma, configurable_from_database: true
```
```
config :pleroma, configurable_from_database: true
```
4. ⚠️ **THIS IS NOT REQUIRED** ⚠️
Now you can edit your config file and strip it down to the only settings which are not possible to control in the database. e.g., the Postgres (Repo) and webserver (Endpoint) settings cannot be controlled in the database because the application needs the settings to start up and access the database.
Now you can edit your config file and strip it down to the only settings which are not possible to control in the database. e.g., the Postgres (Repo) and webserver (Endpoint) settings cannot be controlled in the database because the application needs the settings to start up and access the database.
Any settings in the database will override those in the config file, but you may find it less confusing if the setting is only declared in one place.
Any settings in the database will override those in the config file, but you may find it less confusing if the setting is only declared in one place.
A non-exhaustive list of settings that are only possible in the config file include the following:
A non-exhaustive list of settings that are only possible in the config file include the following:
* config :pleroma, Pleroma.Web.Endpoint
* config :pleroma, Pleroma.Repo
* config :pleroma, configurable\_from\_database
* config :pleroma, :database, rum_enabled
* config :pleroma, :database, pgroonga_enabled
* config :pleroma, :connections_pool
* config :pleroma, Pleroma.Web.Endpoint
* config :pleroma, Pleroma.Repo
* config :pleroma, configurable\_from\_database
* config :pleroma, :database, rum_enabled
* config :pleroma, :database, pgroonga_enabled
* config :pleroma, :connections_pool
Here is an example of a server config stripped down after migration:
Here is an example of a server config stripped down after migration:
```
use Mix.Config
```
use Mix.Config
config :pleroma, Pleroma.Web.Endpoint,
url: [host: "cool.pleroma.site", scheme: "https", port: 443]
config :pleroma, Pleroma.Web.Endpoint,
url: [host: "cool.pleroma.site", scheme: "https", port: 443]
config :pleroma, Pleroma.Repo,
adapter: Ecto.Adapters.Postgres,
username: "akkoma",
password: "MySecretPassword",
database: "akkoma_prod",
hostname: "localhost"
config :pleroma, Pleroma.Repo,
adapter: Ecto.Adapters.Postgres,
username: "akkoma",
password: "MySecretPassword",
database: "akkoma_prod",
hostname: "localhost"
config :pleroma, configurable_from_database: true
```
config :pleroma, configurable_from_database: true
```
5. Restart your instance and you can now access the Settings tab in admin-fe.
@ -82,28 +82,28 @@ The configuration of Akkoma (and Pleroma) has traditionally been managed with a
1. Run the mix task to migrate back from the database. You'll receive some debugging output and a few messages informing you of what happened.
**Source:**
**Source:**
```
$ mix pleroma.config migrate_from_db
```
```
$ mix pleroma.config migrate_from_db
```
or
or
**OTP:**
**OTP:**
```
$ ./bin/pleroma_ctl config migrate_from_db
```
```
$ ./bin/pleroma_ctl config migrate_from_db
```
```
10:26:30.593 [debug] QUERY OK source="config" db=9.8ms decode=1.2ms queue=26.0ms idle=0.0ms
SELECT c0."id", c0."key", c0."group", c0."value", c0."inserted_at", c0."updated_at" FROM "config" AS c0 []
```
10:26:30.593 [debug] QUERY OK source="config" db=9.8ms decode=1.2ms queue=26.0ms idle=0.0ms
SELECT c0."id", c0."key", c0."group", c0."value", c0."inserted_at", c0."updated_at" FROM "config" AS c0 []
10:26:30.659 [debug] QUERY OK source="config" db=1.1ms idle=80.7ms
SELECT c0."id", c0."key", c0."group", c0."value", c0."inserted_at", c0."updated_at" FROM "config" AS c0 []
Database configuration settings have been saved to config/dev.exported_from_db.secret.exs
```
10:26:30.659 [debug] QUERY OK source="config" db=1.1ms idle=80.7ms
SELECT c0."id", c0."key", c0."group", c0."value", c0."inserted_at", c0."updated_at" FROM "config" AS c0 []
Database configuration settings have been saved to config/dev.exported_from_db.secret.exs
```
2. Remove `config :pleroma, configurable_from_database: true` from your config. The in-database configuration still exists, but it will not be used. Future migrations will erase the database config before importing your config file again.

View file

@ -0,0 +1,48 @@
# General Performance and Optimisation Notes
# Oban Web
The built-in Oban Web dashboard has a seemingly constant'ish overhead
irrelevant to large instances but potentially
noticeable for small instances on low power systems.
Thus if the latter applies to your case, you might want to disable it;
see [the cheatsheet](../cheatsheet.md#oban-web).
# Relays
Subscribing to relays exposes your instance to a high volume flood of incoming activities.
This does not just incur the cost of processing those activities themselves, but typically
each activity may trigger additional work, like fetching ancestors and child posts to
complete the thread, refreshing user profiles, etc.
Furthermore the larger the count of activities and objects in your database the costlier
all database operations on these (highly important) tables get.
Carefully consider whether this is worth the cost
and if you experience performance issues unsubscribe from relays.
Regularly pruning old remote posts and orphaned activities is also especially important
when following relays or just having unfollowed relays for performance reasons.
# Pruning old remote data
Over time your instance accumulates more and more remote data, mainly in form of posts and activities.
Chances are you and your local users do not actually care for the vast majority of those.
Consider regularly *(frequency highly dependent on your individual setup)* pruning such old and irrelevant remote data; see
[the corresponding `mix` tasks](../../../administration/CLI_tasks/database#prune-old-remote-posts-from-the-database).
# Database Maintenance
Akkomas performance is highly dependent on and often bottle-necked by the database.
Taking good care of it pays off!
See the dedicated [PostgreSQL page](../postgresql.md).
# HTTP Request Cache
If your instance is frequently getting _many_ `GET` requests from external
actors *(i.e. everyone except logged-in local users)* an additional
*(Akkoma already has some caching built-in and so might your reverse proxy)*
caching layer as described in the [Varnish Cache guide](varnish_cache.md)
might help alleviate the impact.
If this condition does **not** hold though,
setting up such a cache likely only worsens latency and wastes memory.

View file

@ -433,7 +433,7 @@ Response:
* On success: URL of the unfollowed relay
```json
{"https://example.com/relay"}
"https://example.com/relay"
```
## `POST /api/v1/pleroma/admin/users/invite_token`
@ -1173,20 +1173,23 @@ Loads JSON generated from `config/descriptions.exs`.
- Response:
```json
[
{
"id": 1234,
"data": {
"actor": {
"id": 1,
"nickname": "lain"
{
"items": [
{
"id": 1234,
"data": {
"actor": {
"id": 1,
"nickname": "lain"
},
"action": "relay_follow"
},
"action": "relay_follow"
},
"time": 1502812026, // timestamp
"message": "[2017-08-15 15:47:06] @nick0 followed relay: https://example.org/relay" // log message
}
]
"time": 1502812026, // timestamp
"message": "[2017-08-15 15:47:06] @nick0 followed relay: https://example.org/relay" // log message
}
],
"total": 1
}
```
## `POST /api/v1/pleroma/admin/reload_emoji`
@ -1215,24 +1218,10 @@ Loads JSON generated from `config/descriptions.exs`.
## `GET /api/v1/pleroma/admin/stats`
### Stats
**DEPRECATED; DO NOT USE**!!
- Query Params:
- *optional* `instance`: **string** instance hostname (without protocol) to get stats for
- Example: `https://mypleroma.org/api/v1/pleroma/admin/stats?instance=lain.com`
- Response:
```json
{
"status_visibility": {
"direct": 739,
"private": 9,
"public": 17,
"unlisted": 14
}
}
```
Returned information is only stubbed out.
The endpoint will be removed entirely in an upcoming release.
## `GET /api/v1/pleroma/admin/oauth_app`

View file

@ -32,7 +32,7 @@ Home, public, hashtag & list timelines further accept:
## Statuses
- `visibility`: has additional possible values `list` and `local` (for local-only statuses)
- `visibility`: has additional possible value `local` (for local-only statuses)
- `emoji_reactions`: additional field since Akkoma 3.2.0; identical to `pleroma/emoji_reactions`
Has these additional fields under the `pleroma` object:
@ -193,6 +193,7 @@ Accepts additional parameters:
- `exclude_visibilities`: will exclude the notifications for activities with the given visibilities. The parameter accepts an array of visibility types (`public`, `unlisted`, `private`, `direct`). Usage example: `GET /api/v1/notifications?exclude_visibilities[]=direct&exclude_visibilities[]=private`.
- `include_types`: will include the notifications for activities with the given types. The parameter accepts an array of types (`mention`, `follow`, `reblog`, `favourite`, `move`, `pleroma:emoji_reaction`, `pleroma:report`). Usage example: `GET /api/v1/notifications?include_types[]=mention&include_types[]=reblog`.
**Deprecated:** replaced by `types` which is equivalent but (by now) also supported by vanilla Mastodon.
## DELETE `/api/v1/notifications/destroy_multiple`
@ -215,7 +216,6 @@ Additional parameters can be added to the JSON body/Form data:
- `to`: A list of nicknames (like `admin@otp.akkoma.dev` or `admin` on the local server) that will be used to determine who is going to be addressed by this post. Using this will disable the implicit addressing by mentioned names in the `status` body, only the people in the `to` list will be addressed. The normal rules for post visibility are not affected by this and will still apply.
- `visibility`: string, besides standard MastoAPI values (`direct`, `private`, `unlisted`, `local` or `public`) it can be used to address a List by setting it to `list:LIST_ID`.
- `expires_in`: The number of seconds the posted activity should expire in. When a posted activity expires it will be deleted from the server, and a delete request for it will be federated. This needs to be longer than an hour.
- `in_reply_to_conversation_id`: Will reply to a given conversation, addressing only the people who are part of the recipient set of that conversation. Sets the visibility to `direct`.
## GET `/api/v1/statuses`

View file

@ -376,13 +376,8 @@ See [Admin-API](admin_api.md)
Pleroma Conversations have the same general structure that Mastodon Conversations have. The behavior differs in the following ways when using these endpoints:
1. Pleroma Conversations never add or remove recipients, unless explicitly changed by the user.
1. Pleroma Conversations never add or remove recipients (`accounts` key), unless explicitly changed by the user.
2. Pleroma Conversations statuses can be requested by Conversation id.
3. Pleroma Conversations can be replied to.
Conversations have the additional field `recipients` under the `pleroma` key. This holds a list of all the accounts that will receive a message in this conversation.
The status posting endpoint takes an additional parameter, `in_reply_to_conversation_id`, which, when set, will set the visiblity to direct and address only the people who are the recipients of that Conversation.
⚠ Conversation IDs can be found in direct messages with the `pleroma.direct_conversation_id` key, do not confuse it with `pleroma.conversation_id`.

View file

@ -267,17 +267,33 @@ special meaning to the potential local-scope identifier.
however those are also shown publicly on the local web interface
and are thus visible to non-members.
## List post scope
Messages originally addressed to a custom list will contain
a `listMessage` field with an unresolvable pseudo ActivityPub id.
# Deprecated and Removed Extensions
The following extensions were used in the past but have been dropped.
Documentation is retained here as a reference and since old objects might
still contains related fields.
## List post scope
Messages originally addressed to a custom list will contain
a `listMessage` field with an unresolvable pseudo ActivityPub id.
!!! note
The concept did not work out too well in practice with even remote servers
recognising the `listMessage` extension being unaware of the state of the
list and resulting weird desyncs in thread display and handling between
servers.
As it was it also never found its way in any known clients or frontends.
A more consistent superset of what this was able to actually do
can be achieved without ActivityPub extensions by explicitly addressing
all intended participants without inline mentions.
While true federated and moderated "lists" or "groups"
will need more work and a different approach.
Thus suport for it was removed and it is recommended
to not create any new implementation of it.
## Actor endpoints
The following endpoints used to be present:

View file

@ -1,26 +1,26 @@
certifi==2022.9.24
charset-normalizer==2.1.1
click==8.1.3
ghp-import==2.1.0
idna==3.4
importlib-metadata==4.12.0
Jinja2==3.1.2
Markdown==3.3.7
markdown-include==0.7.0
MarkupSafe==2.1.1
mergedeep==1.3.4
mkdocs==1.4.2
mkdocs-material==8.5.9
mkdocs-material-extensions==1.1
packaging==21.3
Pygments==2.13.0
pymdown-extensions==9.8
pyparsing==3.0.9
python-dateutil==2.8.2
PyYAML==6.0
pyyaml_env_tag==0.1
requests==2.28.1
six==1.16.0
urllib3==1.26.12
watchdog==2.1.9
zipp==3.8.0
certifi
charset-normalizer
click
ghp-import
idna
importlib-metadata
Jinja2
Markdown
markdown-include
MarkupSafe
mergedeep
mkdocs
mkdocs-material
mkdocs-material-extensions
packaging
Pygments
pymdown-extensions
pyparsing
python-dateutil
PyYAML
pyyaml_env_tag
requests
six
urllib3
watchdog
zipp

File diff suppressed because it is too large Load diff

View file

@ -37,6 +37,15 @@ server {
listen 80;
listen [::]:80;
# for nginx versions < 1.25.1, you need to use a listen paramter instead
http2 on;
# Uncomment to enable HTTP/3 suppport (only do this after getting a valid certificate)
# Requires nginx 1.25.0+ and ngx_http_v3_module enabled at build time
#listen 443 quic reuseport;
#listen [::]:443 quic reuseport;
#add_header Alt-Svc 'h3=":443"; ma=86400';
# If you are not using Certbot, comment out the above and uncomment/edit the following
# listen 443 ssl http2;
# listen [::]:443 ssl http2;
@ -70,7 +79,7 @@ server {
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $http_host;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
location ~ ^/(media|proxy) {
@ -91,6 +100,17 @@ server {
listen 80;
listen [::]:80;
# for nginx versions < 1.25.1, you need to use a listen paramter instead
http2 on;
# Uncomment to enable HTTP/3 suppport (only do this after getting a valid certificate)
# Note: reuseport is not specified here as it's already used in the first server block,
# and can only be used once per ip/port pair.
# Requires nginx 1.25.0+ and ngx_http_v3_module enabled at build time
#listen 443 quic;
#listen [::]:443 quic;
#add_header Alt-Svc 'h3=":443"; ma=86400';
# If you are not using certbot, comment the above and copy all the ssl
# stuff from above into here.
@ -108,7 +128,7 @@ server {
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $http_host;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
location ~ ^/(media|proxy) {

View file

@ -24,14 +24,9 @@ def start_pleroma do
Pleroma.Application.limiters_setup()
Application.put_env(:phoenix, :serve_endpoints, false, persistent: true)
proxy_url = Pleroma.Config.get([:http, :proxy_url])
proxy = Pleroma.HTTP.AdapterHelper.format_proxy(proxy_url)
finch_config =
[:http, :adapter]
|> Pleroma.Config.get([])
|> Pleroma.HTTP.AdapterHelper.maybe_add_proxy_pool(proxy)
|> Keyword.put(:name, MyFinch)
Pleroma.Config.get([:http, :adapter])
|> Pleroma.HTTP.AdapterHelper.options()
unless System.get_env("DEBUG") do
Logger.remove_backend(:console)

View file

@ -340,6 +340,7 @@ def run(["prune_objects" | args]) do
keep_non_public: :boolean,
prune_orphaned_activities: :boolean,
prune_pinned: :boolean,
fix_replies_count: :boolean,
limit: :integer
]
)
@ -427,10 +428,15 @@ def run(["prune_objects" | args]) do
AND hto.hashtag_id is NULL
AND ufht.hashtag_id is NULL
"""
|> Repo.query!()
|> Repo.query!([], timeout: :infinity)
Logger.info("Deleted #{del_hashtags} no longer used hashtags...")
if Keyword.get(options, :fix_replies_count, true) do
Logger.info("Fixing reply counters...")
resync_replies_count()
end
if Keyword.get(options, :vacuum) do
Logger.info("Starting vacuum...")
Maintenance.vacuum("full")
@ -446,6 +452,7 @@ def run(["prune_task"]) do
|> Pleroma.Workers.Cron.PruneDatabaseWorker.perform()
end
# fixes wrong type of inlined like references for objects predating the inlined array
def run(["fix_likes_collections"]) do
start_pleroma()
@ -516,6 +523,60 @@ def run(["ensure_expiration"]) do
|> Stream.run()
end
def run(["resync_inlined_caches" | args]) do
{options, [], []} =
OptionParser.parse(
args,
strict: [
replies_count: :boolean,
announcements: :boolean,
likes: :boolean,
reactions: :boolean
]
)
start_pleroma()
if Keyword.get(options, :replies_count, true) do
resync_replies_count()
end
if Keyword.get(options, :announcements, true) do
resync_inlined_array("Announce", "announcement")
end
if Keyword.get(options, :likes, true) do
resync_inlined_array("Like", "like")
end
if Keyword.get(options, :reactions, true) do
resync_inlined_reactions()
end
end
def run(["clean_inlined_replies"]) do
# The inlined replies array is not used after the initial processing
# when first receiving the object and only wastes space
start_pleroma()
# We cannot check jsonb_typeof(array) and jsonb_array_length() in the same query
# since the checks do not short-circuit and NULL values will raise an error for the latter
has_replies =
Pleroma.Object
|> select([o], %{id: o.id})
|> where([o], fragment("jsonb_typeof(?->'replies') = 'array'", o.data))
{update_cnt, _} =
Pleroma.Object
|> with_cte("arrays", as: ^has_replies)
|> join(:inner, [o], a in "arrays", on: o.id == a.id)
|> where([o, _a], fragment("jsonb_array_length(?->'replies') > 0", o.data))
|> update(set: [data: fragment("jsonb_set(data, '{replies}', '[]'::jsonb)")])
|> Pleroma.Repo.update_all([], timeout: :infinity)
Logger.info("Emptied inlined replies lists from #{update_cnt} rows.")
end
def run(["set_text_search_config", tsconfig]) do
start_pleroma()
%{rows: [[tsc]]} = Ecto.Adapters.SQL.query!(Pleroma.Repo, "SHOW default_text_search_config;")
@ -623,4 +684,125 @@ def run(["rollback", version]) do
shell_info(inspect(result))
end
end
defp resync_replies_count() do
public_str = Pleroma.Constants.as_public()
ref =
Pleroma.Object
|> select([o], %{apid: fragment("?->>'inReplyTo'", o.data), cnt: count()})
|> where(
[o],
fragment("?->>'type' <> 'Answer'", o.data) and
fragment("?->>'inReplyTo' IS NOT NULL", o.data) and
(fragment("?->'to' @> ?::jsonb", o.data, ^public_str) or
fragment("?->'cc' @> ?::jsonb", o.data, ^public_str))
)
|> group_by([o], fragment("?->>'inReplyTo'", o.data))
{update_cnt, _} =
Pleroma.Object
|> with_cte("ref", as: ^ref)
|> join(:inner, [o], r in "ref", on: fragment("?->>'id'", o.data) == r.apid)
|> where([o, r], fragment("(?->>'repliesCount')::bigint <> ?", o.data, r.cnt))
|> update([o, r],
set: [data: fragment("jsonb_set(?, '{repliesCount}', to_jsonb(?))", o.data, r.cnt)]
)
|> Pleroma.Repo.update_all([], timeout: :infinity)
Logger.info("Fixed reply counter for #{update_cnt} objects.")
end
defp resync_inlined_array(activity_type, basename) do
array_name = basename <> "s"
counter_name = basename <> "_count"
ref =
Pleroma.Activity
|> select([a], %{
apid: fragment("?->>'object'", a.data),
correct: fragment("to_jsonb(ARRAY_AGG(?->>'actor'))", a.data)
})
|> where(
[a],
fragment("?->>'type' = ?", a.data, ^activity_type) and
fragment("?->>'id' IS NOT NULL", a.data) and
fragment("?->>'actor' IS NOT NULL", a.data)
)
|> group_by([a], fragment("?->>'object'", a.data))
{update_cnt, _} =
Pleroma.Object
|> with_cte("ref", as: ^ref)
|> join(:inner, [o], r in "ref", on: fragment("?->>'id'", o.data) == r.apid)
|> where(
[o, r],
fragment("?->>'id' = ?", o.data, r.apid) and
not (fragment("? @> (?->?)", r.correct, o.data, ^array_name) and
fragment("? <@ (?->?)", r.correct, o.data, ^array_name))
)
|> update([o, r],
set: [
data:
fragment(
"? || jsonb_build_object(?::text, jsonb_array_length(?::jsonb), ?::text, ?::jsonb)",
o.data,
^counter_name,
r.correct,
^array_name,
r.correct
)
]
)
|> Pleroma.Repo.update_all([], timeout: :infinity)
Logger.info("Fixed inlined #{basename} array and counter for #{update_cnt} objects.")
end
defp resync_inlined_reactions() do
expanded_ref =
Pleroma.Activity
|> select([a], %{
apid: selected_as(fragment("?->>'object'", a.data), :apid),
emoji_name: selected_as(fragment("TRIM(?->>'content', ':')", a.data), :emoji_name),
actors: fragment("ARRAY_AGG(DISTINCT ?->>'actor')", a.data),
url: selected_as(fragment("?#>>'{tag,0,icon,url}'", a.data), :url)
})
|> where(
[a],
fragment("?->>'type' = 'EmojiReact'", a.data) and
fragment("?->>'actor' IS NOT NULL", a.data) and
fragment("TRIM(?->>'content', ':') IS NOT NULL", a.data)
)
|> group_by([_], [selected_as(:apid), selected_as(:emoji_name), selected_as(:url)])
ref =
from(e in subquery(expanded_ref))
|> select([e], %{
apid: e.apid,
correct:
fragment(
"jsonb_agg(DISTINCT ARRAY[to_jsonb(?), to_jsonb(?), to_jsonb(?)])",
e.emoji_name,
e.actors,
e.url
)
})
|> group_by([e], e.apid)
{update_cnt, _} =
Pleroma.Object
|> join(:inner, [o], r in subquery(ref), on: r.apid == fragment("?->>'id'", o.data))
|> where(
[o, r],
not (fragment("? @> (?->'reactions')", r.correct, o.data) and
fragment("? <@ (?->'reactions')", r.correct, o.data))
)
|> update([o, r],
set: [data: fragment("jsonb_set(?, '{reactions}', ?)", o.data, r.correct)]
)
|> Pleroma.Repo.update_all([], timeout: :infinity)
Logger.info("Fixed inlined emoji reactions for #{update_cnt} objects.")
end
end

View file

@ -1,68 +0,0 @@
# Pleroma: A lightweight social networking server
# Copyright © 2017-2021 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only
defmodule Mix.Tasks.Pleroma.RefreshCounterCache do
@shortdoc "Refreshes counter cache"
use Mix.Task
alias Pleroma.Activity
alias Pleroma.CounterCache
alias Pleroma.Repo
import Ecto.Query
def run([]) do
Mix.Pleroma.start_pleroma()
instances =
Activity
|> distinct([a], true)
|> select([a], fragment("split_part(?, '/', 3)", a.actor))
|> Repo.all()
instances
|> Enum.with_index(1)
|> Enum.each(fn {instance, i} ->
counters = instance_counters(instance)
CounterCache.set(instance, counters)
Mix.Pleroma.shell_info(
"[#{i}/#{length(instances)}] Setting #{instance} counters: #{inspect(counters)}"
)
end)
Mix.Pleroma.shell_info("Done")
end
defp instance_counters(instance) do
counters = %{"public" => 0, "unlisted" => 0, "private" => 0, "direct" => 0}
Activity
|> where([a], fragment("(? ->> 'type'::text) = 'Create'", a.data))
|> where([a], fragment("split_part(?, '/', 3) = ?", a.actor, ^instance))
|> select(
[a],
{fragment(
"activity_visibility(?, ?, ?)",
a.actor,
a.recipients,
a.data
), count(a.id)}
)
|> group_by(
[a],
fragment(
"activity_visibility(?, ?, ?)",
a.actor,
a.recipients,
a.data
)
)
|> Repo.all(timeout: :timer.minutes(30))
|> Enum.reduce(counters, fn {visibility, count}, acc ->
Map.put(acc, visibility, count)
end)
end
end

View file

@ -33,10 +33,6 @@ defmodule Pleroma.Activity do
field(:recipients, {:array, :string}, default: [])
field(:thread_muted?, :boolean, virtual: true)
# A field that can be used if you need to join some kind of other
# id to order / paginate this field by
field(:pagination_id, :string, virtual: true)
# This is a fake relation,
# do not use outside of with_preloaded_user_actor/with_joined_user_actor
has_one(:user_actor, User, on_delete: :nothing, foreign_key: :id)

View file

@ -59,6 +59,8 @@ def get_cached_scrubbed_html_for_activity(
object = Object.normalize(activity, fetch: false)
add_cache_key_for(activity.id, key)
# callback already produces :commit or :ignore tuples
HTML.ensure_scrubbed_html(content, scrubbers, object.data["fake"] || false, callback)
end)
end

View file

@ -7,7 +7,7 @@ defmodule Pleroma.Activity.Queries do
Contains queries for Activity.
"""
import Ecto.Query, only: [from: 2, where: 3]
import Ecto.Query, only: [from: 2]
@type query :: Ecto.Queryable.t() | Activity.t()
@ -76,22 +76,6 @@ def by_object_id(query, object_id) when is_binary(object_id) do
)
end
@spec by_object_in_reply_to_id(query, String.t(), keyword()) :: query
def by_object_in_reply_to_id(query, in_reply_to_id, opts \\ []) do
query =
if opts[:skip_preloading] do
Activity.with_joined_object(query)
else
Activity.with_preloaded_object(query)
end
where(
query,
[activity, object: o],
fragment("(?)->>'inReplyTo' = ?", o.data, ^to_string(in_reply_to_id))
)
end
@spec by_type(query, String.t()) :: query
def by_type(query \\ Activity, activity_type) do
from(

View file

@ -44,9 +44,6 @@ def start(_type, _args) do
# every time the application is restarted, so we disable module
# conflicts at runtime
Code.compiler_options(ignore_module_conflict: true)
# Disable warnings_as_errors at runtime, it breaks Phoenix live reload
# due to protocol consolidation warnings
Code.compiler_options(warnings_as_errors: false)
Config.Holder.save_default()
Pleroma.HTML.compile_scrubbers()
Pleroma.Config.Oban.warn()
@ -71,14 +68,13 @@ def start(_type, _args) do
http_children() ++
[
Pleroma.Stats,
Pleroma.JobQueueMonitor,
{Majic.Pool, [name: Pleroma.MajicPool, pool_size: Config.get([:majic_pool, :size], 2)]},
{Oban, Config.get(Oban)},
Pleroma.Web.Endpoint,
Pleroma.Web.Telemetry
] ++
elasticsearch_children() ++
task_children(@mix_env) ++
task_children() ++
dont_run_in_test(@mix_env)
# See http://elixir-lang.org/docs/stable/elixir/Supervisor.html
@ -148,35 +144,90 @@ def load_all_pleroma_modules do
defp cachex_children do
[
build_cachex("used_captcha", ttl_interval: seconds_valid_interval()),
build_cachex("user", default_ttl: 25_000, ttl_interval: 1000, limit: 2500),
build_cachex("object", default_ttl: 25_000, ttl_interval: 1000, limit: 2500),
build_cachex("rich_media", default_ttl: :timer.minutes(120), limit: 5000),
build_cachex("scrubber", limit: 2500),
build_cachex("scrubber_management", limit: 2500),
build_cachex("idempotency", expiration: idempotency_expiration(), limit: 2500),
build_cachex("web_resp", limit: 2500),
build_cachex("emoji_packs", expiration: emoji_packs_expiration(), limit: 10),
build_cachex("failed_proxy_url", limit: 2500),
build_cachex("banned_urls", default_ttl: :timer.hours(24 * 30), limit: 5_000),
build_cachex("translations", default_ttl: :timer.hours(24 * 30), limit: 2500),
build_cachex("instances", default_ttl: :timer.hours(24), ttl_interval: 1000, limit: 2500),
build_cachex("request_signatures", default_ttl: :timer.hours(24 * 30), limit: 3000),
build_cachex("rel_me", default_ttl: :timer.hours(24 * 30), limit: 300),
build_cachex("host_meta", default_ttl: :timer.minutes(120), limit: 5000),
build_cachex("http_backoff", default_ttl: :timer.hours(24 * 30), limit: 10000)
build_cachex(
"used_captcha",
expiration: expiration(interval: seconds_valid_interval())
),
build_cachex(
"user",
expiration: expiration(default: 3_000, interval: 1_000),
hooks: [cachex_sched_limit(2500)]
),
build_cachex(
"object",
expiration: expiration(default: 3_000, interval: 1_000),
hooks: [cachex_sched_limit(2500)]
),
build_cachex(
"rich_media",
expiration: expiration(default: :timer.hours(2)),
hooks: [cachex_sched_limit(5000)]
),
build_cachex(
"scrubber",
hooks: [cachex_sched_limit(2500)]
),
build_cachex(
"scrubber_management",
hooks: [cachex_sched_limit(2500)]
),
build_cachex(
"idempotency",
expiration: expiration(default: :timer.hours(6), interval: :timer.minutes(1)),
hooks: [cachex_sched_limit(2500, [], frequency: :timer.minutes(1))]
),
build_cachex(
"web_resp",
hooks: [cachex_sched_limit(2500)]
),
build_cachex(
"emoji_packs",
expiration: expiration(default: :timer.minutes(5), interval: :timer.minutes(1)),
hooks: [cachex_sched_limit(10)]
),
build_cachex(
"failed_proxy_url",
hooks: [cachex_sched_limit(2500)]
),
build_cachex(
"banned_urls",
expiration: expiration(default: :timer.hours(24 * 30)),
hooks: [cachex_sched_limit(5_000, [], frequency: :timer.minutes(5))]
),
build_cachex(
"translations",
expiration: expiration(default: :timer.hours(24 * 30)),
hooks: [cachex_sched_limit(2500)]
),
build_cachex(
"instances",
expiration: expiration(default: :timer.hours(24), interval: 1000),
hooks: [cachex_sched_limit(2500)]
),
build_cachex(
"rel_me",
expiration: expiration(default: :timer.hours(24 * 30)),
hooks: [cachex_sched_limit(300, [], frequency: :timer.minutes(1))]
),
build_cachex(
"host_meta",
expiration: expiration(default: :timer.minutes(120)),
hooks: [cachex_sched_limit(5000, [], frequency: :timer.minutes(1))]
),
build_cachex(
"http_backoff",
expiration: expiration(default: :timer.hours(24 * 30)),
hooks: [cachex_sched_limit(10_000, [], frequency: :timer.minutes(5))]
)
]
end
defp emoji_packs_expiration,
do: expiration(default: :timer.seconds(5 * 60), interval: :timer.seconds(60))
defp idempotency_expiration,
do: expiration(default: :timer.seconds(6 * 60 * 60), interval: :timer.seconds(60))
defp seconds_valid_interval,
do: :timer.seconds(Config.get!([Pleroma.Captcha, :seconds_valid]))
defp cachex_sched_limit(limit, prune_opts \\ [], sched_opts \\ []),
do: hook(module: Cachex.Limit.Scheduled, args: {limit, prune_opts, sched_opts})
@spec build_cachex(String.t(), keyword()) :: map()
def build_cachex(type, opts),
do: %{
@ -204,31 +255,29 @@ defp background_migrators do
]
end
@spec task_children(atom()) :: [map()]
@spec task_children() :: [map()]
defp task_children() do
always =
[
%{
id: :web_push_init,
start: {Task, :start_link, [&Pleroma.Web.Push.init/0]},
restart: :temporary
}
]
defp task_children(:test) do
[
%{
id: :web_push_init,
start: {Task, :start_link, [&Pleroma.Web.Push.init/0]},
restart: :temporary
}
]
end
defp task_children(_) do
[
%{
id: :web_push_init,
start: {Task, :start_link, [&Pleroma.Web.Push.init/0]},
restart: :temporary
},
%{
id: :internal_fetch_init,
start: {Task, :start_link, [&Pleroma.Web.ActivityPub.InternalFetchActor.init/0]},
restart: :temporary
}
]
if @mix_env == :test do
always
else
[
%{
id: :internal_fetch_init,
start: {Task, :start_link, [&Pleroma.Web.ActivityPub.InternalFetchActor.init/0]},
restart: :temporary
}
| always
]
end
end
@spec elasticsearch_children :: [Pleroma.Search.Elasticsearch.Cluster]
@ -262,23 +311,11 @@ def limiters_setup do
end
defp http_children do
proxy_url = Config.get([:http, :proxy_url])
proxy = Pleroma.HTTP.AdapterHelper.format_proxy(proxy_url)
pool_size = Config.get([:http, :pool_size], 10)
pool_timeout = Config.get([:http, :pool_timeout], 60_000)
connection_timeout = Config.get([:http, :conn_max_idle_time], 10_000)
:public_key.cacerts_load()
config =
[:http, :adapter]
|> Config.get([])
|> Pleroma.HTTP.AdapterHelper.add_pool_size(pool_size)
|> Pleroma.HTTP.AdapterHelper.maybe_add_proxy_pool(proxy)
|> Pleroma.HTTP.AdapterHelper.ensure_ipv6()
|> Pleroma.HTTP.AdapterHelper.add_default_conn_max_idle_time(connection_timeout)
|> Pleroma.HTTP.AdapterHelper.add_default_pool_max_idle_time(pool_timeout)
|> Keyword.put(:name, MyFinch)
Config.get([:http, :adapter])
|> Pleroma.HTTP.AdapterHelper.options()
[{Finch, config}]
end

View file

@ -97,7 +97,7 @@ defp validate_usage(token) do
defp mark_captcha_as_used(token) do
ttl = seconds_valid() |> :timer.seconds()
@cachex.put(:used_captcha_cache, token, true, ttl: ttl)
@cachex.put(:used_captcha_cache, token, true, expire: ttl)
end
defp method, do: Pleroma.Config.get!([__MODULE__, :method])

View file

@ -27,6 +27,10 @@ def fetch_collection(%{"type" => type} = page)
partial_as_success(objects_from_collection(page))
end
def fetch_collection(_) do
{:error, :invalid_type}
end
defp partial_as_success({:partial, items}), do: {:ok, items}
defp partial_as_success(res), do: res

View file

@ -22,6 +22,27 @@ defmodule Pleroma.Config.DeprecationWarnings do
"\n* `config :pleroma, :instance, :quarantined_instances` is now covered by `:pleroma, :mrf_simple, :reject`"}
]
def check_skip_thread_containment do
# The default in config/config.exs is "true" since 593b8b1e6a8502cca9bf5559b8bec86f172bbecb
# but when the default is retrieved in code the fallback is still "false"
uses_thread_visibility_filtering = !Config.get([:instance, :skip_thread_containment], false)
if uses_thread_visibility_filtering do
Logger.warning("""
!!!DEPRECATION WARNING!!!
Your config is explicitly enabling thread-based visibility containment by setting the below:
```
config :pleroma, :instance, skip_thread_containment: false
```
This feature comes with a very high performance overhead and is considered for removal.
If you actually need or strongly prefer keeping it, speak up NOW(!) by filing a ticket at
https://akkoma.dev/AkkomaGang/akkoma/issues
Complaints only after the removal happened are much less likely to have any effect.
""")
end
end
def check_exiftool_filter do
filters = Config.get([Pleroma.Upload]) |> Keyword.get(:filters, [])
@ -222,7 +243,8 @@ def warn do
check_http_adapter(),
check_uploader_base_url_set(),
check_uploader_base_url_is_not_base_domain(),
check_exiftool_filter()
check_exiftool_filter(),
check_skip_thread_containment()
]
|> Enum.reduce(:ok, fn
:ok, :ok -> :ok

View file

@ -7,7 +7,9 @@ defmodule Pleroma.ConfigDB do
import Ecto.Changeset
import Ecto.Query, only: [select: 3, from: 2]
import Pleroma.Web.Gettext
use Gettext,
backend: Pleroma.Web.Gettext
alias __MODULE__
alias Pleroma.Repo
@ -303,7 +305,9 @@ def to_elixir_types(%{"tuple" => [":partial_chain", entity]}) do
end
def to_elixir_types(%{"tuple" => entity}) do
Enum.reduce(entity, {}, &Tuple.append(&2, to_elixir_types(&1)))
entity
|> Enum.map(&to_elixir_types(&1))
|> List.to_tuple()
end
def to_elixir_types(entity) when is_map(entity) do

View file

@ -19,7 +19,8 @@ defmodule Pleroma.Constants do
"context_id",
"deleted_activity_id",
"pleroma_internal",
"generator"
"generator",
"voters"
]
)

View file

@ -1,79 +0,0 @@
# Pleroma: A lightweight social networking server
# Copyright © 2017-2021 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.CounterCache do
alias Pleroma.CounterCache
alias Pleroma.Repo
use Ecto.Schema
import Ecto.Changeset
import Ecto.Query
schema "counter_cache" do
field(:instance, :string)
field(:public, :integer)
field(:unlisted, :integer)
field(:private, :integer)
field(:direct, :integer)
end
def changeset(struct, params) do
struct
|> cast(params, [:instance, :public, :unlisted, :private, :direct])
|> validate_required([:instance])
|> unique_constraint(:instance)
end
def get_by_instance(instance) do
CounterCache
|> select([c], %{
"public" => c.public,
"unlisted" => c.unlisted,
"private" => c.private,
"direct" => c.direct
})
|> where([c], c.instance == ^instance)
|> Repo.one()
|> case do
nil -> %{"public" => 0, "unlisted" => 0, "private" => 0, "direct" => 0}
val -> val
end
end
def get_sum do
CounterCache
|> select([c], %{
"public" => type(sum(c.public), :integer),
"unlisted" => type(sum(c.unlisted), :integer),
"private" => type(sum(c.private), :integer),
"direct" => type(sum(c.direct), :integer)
})
|> Repo.one()
end
def set(instance, values) do
params =
Enum.reduce(
["public", "private", "unlisted", "direct"],
%{"instance" => instance},
fn param, acc ->
Map.put_new(acc, param, Map.get(values, param, 0))
end
)
%CounterCache{}
|> changeset(params)
|> Repo.insert(
on_conflict: [
set: [
public: params["public"],
private: params["private"],
unlisted: params["unlisted"],
direct: params["direct"]
]
],
returning: true,
conflict_target: :instance
)
end
end

View file

@ -4,7 +4,7 @@
defmodule Pleroma.Docs.Translator do
require Pleroma.Docs.Translator.Compiler
require Pleroma.Web.Gettext
use Gettext, backend: Pleroma.Web.Gettext
@before_compile Pleroma.Docs.Translator.Compiler
end

View file

@ -7,6 +7,8 @@ defmodule Pleroma.Docs.Translator.Compiler do
@raw_config Pleroma.Config.Loader.read("config/description.exs")
@raw_descriptions @raw_config[:pleroma][:config_description]
require Gettext.Macros
defmacro __before_compile__(_env) do
strings =
__MODULE__.descriptions()
@ -21,7 +23,8 @@ def placeholder do
ctxt = msgctxt_for(path, type)
quote do
Pleroma.Web.Gettext.dpgettext_noop(
Gettext.Macros.dpgettext_noop_with_backend(
Pleroma.Web.Gettext,
"config_descriptions",
unquote(ctxt),
unquote(string)

View file

@ -5,12 +5,13 @@
defmodule Pleroma.Emails.UserEmail do
@moduledoc "User emails"
require Pleroma.Web.Gettext
require Pleroma.Web.GettextCompanion
use Gettext, backend: Pleroma.Web.Gettext
use Pleroma.Web, :mailer
alias Pleroma.Config
alias Pleroma.User
alias Pleroma.Web.Gettext
alias Pleroma.Web.GettextCompanion
import Swoosh.Email
import Phoenix.Swoosh, except: [render_body: 3]
@ -29,7 +30,7 @@ defp recipient(%User{} = user), do: recipient(user.email, user.name)
@spec welcome(User.t(), map()) :: Swoosh.Email.t()
def welcome(user, opts \\ %{}) do
Gettext.with_locale_or_default user.language do
GettextCompanion.with_locale_or_default user.language do
new()
|> to(recipient(user))
|> from(Map.get(opts, :sender, sender()))
@ -37,7 +38,7 @@ def welcome(user, opts \\ %{}) do
Map.get(
opts,
:subject,
Gettext.dpgettext(
dpgettext(
"static_pages",
"welcome email subject",
"Welcome to %{instance_name}!",
@ -49,7 +50,7 @@ def welcome(user, opts \\ %{}) do
Map.get(
opts,
:html,
Gettext.dpgettext(
dpgettext(
"static_pages",
"welcome email html body",
"Welcome to %{instance_name}!",
@ -61,7 +62,7 @@ def welcome(user, opts \\ %{}) do
Map.get(
opts,
:text,
Gettext.dpgettext(
dpgettext(
"static_pages",
"welcome email text body",
"Welcome to %{instance_name}!",
@ -73,11 +74,11 @@ def welcome(user, opts \\ %{}) do
end
def password_reset_email(user, token) when is_binary(token) do
Gettext.with_locale_or_default user.language do
GettextCompanion.with_locale_or_default user.language do
password_reset_url = url(~p[/api/v1/pleroma/password_reset/#{token}])
html_body =
Gettext.dpgettext(
dpgettext(
"static_pages",
"password reset email body",
"""
@ -93,9 +94,7 @@ def password_reset_email(user, token) when is_binary(token) do
new()
|> to(recipient(user))
|> from(sender())
|> subject(
Gettext.dpgettext("static_pages", "password reset email subject", "Password reset")
)
|> subject(dpgettext("static_pages", "password reset email subject", "Password reset"))
|> html_body(html_body)
end
end
@ -106,11 +105,11 @@ def user_invitation_email(
to_email,
to_name \\ nil
) do
Gettext.with_locale_or_default user.language do
GettextCompanion.with_locale_or_default user.language do
registration_url = url(~p[/registration/#{user_invite_token.token}])
html_body =
Gettext.dpgettext(
dpgettext(
"static_pages",
"user invitation email body",
"""
@ -127,7 +126,7 @@ def user_invitation_email(
|> to(recipient(to_email, to_name))
|> from(sender())
|> subject(
Gettext.dpgettext(
dpgettext(
"static_pages",
"user invitation email subject",
"Invitation to %{instance_name}",
@ -139,11 +138,11 @@ def user_invitation_email(
end
def account_confirmation_email(user) do
Gettext.with_locale_or_default user.language do
GettextCompanion.with_locale_or_default user.language do
confirmation_url = url(~p[/api/account/confirm_email/#{user.id}/#{user.confirmation_token}])
html_body =
Gettext.dpgettext(
dpgettext(
"static_pages",
"confirmation email body",
"""
@ -159,7 +158,7 @@ def account_confirmation_email(user) do
|> to(recipient(user))
|> from(sender())
|> subject(
Gettext.dpgettext(
dpgettext(
"static_pages",
"confirmation email subject",
"%{instance_name} account confirmation",
@ -171,9 +170,9 @@ def account_confirmation_email(user) do
end
def approval_pending_email(user) do
Gettext.with_locale_or_default user.language do
GettextCompanion.with_locale_or_default user.language do
html_body =
Gettext.dpgettext(
dpgettext(
"static_pages",
"approval pending email body",
"""
@ -187,7 +186,7 @@ def approval_pending_email(user) do
|> to(recipient(user))
|> from(sender())
|> subject(
Gettext.dpgettext(
dpgettext(
"static_pages",
"approval pending email subject",
"Your account is awaiting approval"
@ -198,9 +197,9 @@ def approval_pending_email(user) do
end
def successful_registration_email(user) do
Gettext.with_locale_or_default user.language do
GettextCompanion.with_locale_or_default user.language do
html_body =
Gettext.dpgettext(
dpgettext(
"static_pages",
"successful registration email body",
"""
@ -216,7 +215,7 @@ def successful_registration_email(user) do
|> to(recipient(user))
|> from(sender())
|> subject(
Gettext.dpgettext(
dpgettext(
"static_pages",
"successful registration email subject",
"Account registered on %{instance_name}",
@ -234,7 +233,7 @@ def successful_registration_email(user) do
"""
@spec digest_email(User.t()) :: Swoosh.Email.t() | nil
def digest_email(user) do
Gettext.with_locale_or_default user.language do
GettextCompanion.with_locale_or_default user.language do
notifications = Pleroma.Notification.for_user_since(user, user.last_digest_emailed_at)
mentions =
@ -295,7 +294,7 @@ def digest_email(user) do
|> to(recipient(user))
|> from(sender())
|> subject(
Gettext.dpgettext(
dpgettext(
"static_pages",
"digest email subject",
"Your digest from %{instance_name}",
@ -336,12 +335,12 @@ def unsubscribe_url(user, notifications_type) do
def backup_is_ready_email(backup, admin_user_id \\ nil) do
%{user: user} = Pleroma.Repo.preload(backup, :user)
Gettext.with_locale_or_default user.language do
GettextCompanion.with_locale_or_default user.language do
download_url = Pleroma.Web.PleromaAPI.BackupView.download_url(backup)
html_body =
if is_nil(admin_user_id) do
Gettext.dpgettext(
dpgettext(
"static_pages",
"account archive email body - self-requested",
"""
@ -353,7 +352,7 @@ def backup_is_ready_email(backup, admin_user_id \\ nil) do
else
admin = Pleroma.Repo.get(User, admin_user_id)
Gettext.dpgettext(
dpgettext(
"static_pages",
"account archive email body - admin requested",
"""
@ -369,7 +368,7 @@ def backup_is_ready_email(backup, admin_user_id \\ nil) do
|> to(recipient(user))
|> from(sender())
|> subject(
Gettext.dpgettext(
dpgettext(
"static_pages",
"account archive email subject",
"Your account archive is ready"

View file

@ -16,7 +16,7 @@ defmodule Pleroma.Emoji do
@ets __MODULE__.Ets
@ets_options [
:ordered_set,
:set,
:protected,
:named_table,
{:read_concurrency, true}
@ -25,6 +25,8 @@ defmodule Pleroma.Emoji do
defstruct [:code, :file, :tags, :safe_code, :safe_file]
@type t :: %__MODULE__{}
@doc "Build emoji struct"
def build({code, file, tags}) do
%__MODULE__{
@ -43,14 +45,14 @@ def start_link(_) do
GenServer.start_link(__MODULE__, [], name: __MODULE__)
end
@doc "Reloads the emojis from disk."
@doc "Reloads the emojis from disk (asynchronous)"
@spec reload() :: :ok
def reload do
GenServer.call(__MODULE__, :reload)
GenServer.cast(__MODULE__, :reload)
end
@doc "Returns the path of the emoji `name`."
@spec get(String.t()) :: String.t() | nil
@doc "Returns the emoji struct of the given `name` if it exists."
@spec get(String.t()) :: t() | nil
def get(name) do
name =
if String.starts_with?(name, ":") do
@ -62,11 +64,23 @@ def get(name) do
end
case :ets.lookup(@ets, name) do
[{_, path}] -> path
[{_, emoji}] -> emoji
_ -> nil
end
end
@doc "Updates or inserts new emoji (asynchronous)"
@spec add_or_update(t()) :: :ok
def add_or_update(%__MODULE__{} = emoji) do
GenServer.cast(__MODULE__, {:add, emoji})
end
@doc "Delete emoji with given shortcode if it exists (asynchronous)"
@spec delete(String.t()) :: :ok
def delete(code) do
GenServer.cast(__MODULE__, {:delete, code})
end
@spec exist?(String.t()) :: boolean()
def exist?(name), do: not is_nil(get(name))
@ -76,9 +90,6 @@ def get_all do
:ets.tab2list(@ets)
end
@doc "Clear out old emojis"
def clear_all, do: :ets.delete_all_objects(@ets)
@doc false
def init(_) do
@ets = :ets.new(@ets, @ets_options)
@ -92,10 +103,14 @@ def handle_cast(:reload, state) do
{:noreply, state}
end
@doc false
def handle_call(:reload, _from, state) do
update_emojis(Loader.load())
{:reply, :ok, state}
def handle_cast({:add, %__MODULE__{} = emoji}, state) do
:ets.insert(@ets, {emoji.code, emoji})
{:noreply, state}
end
def handle_cast({:delete, code}, state) do
:ets.delete(@ets, code)
{:noreply, state}
end
@doc false
@ -109,7 +124,10 @@ def code_change(_old_vsn, state, _extra) do
{:ok, state}
end
defp update_emojis([]), do: true
defp update_emojis(emojis) do
:ets.delete_all_objects(@ets)
:ets.insert(@ets, emojis)
end

View file

@ -69,7 +69,6 @@ def load do
load_pack(Path.join(emoji_dir_path, pack), emoji_groups)
end)
Emoji.clear_all()
emojis
end
@ -103,26 +102,22 @@ defp load_pack(pack_dir, emoji_groups) do
pack_file = Path.join(pack_dir, "pack.json")
if File.exists?(pack_file) do
Logger.info("Loading emoji pack from JSON: #{pack_file}")
contents = Jason.decode!(File.read!(pack_file))
Logger.debug("Loading emoji pack from JSON: #{pack_file}")
contents["files"]
|> Enum.map(fn {name, rel_file} ->
filename = Path.join("/emoji/#{pack_name}", rel_file)
{name, filename, ["pack:#{pack_name}"]}
end)
Jason.decode(File.read!(pack_file))
|> load_from_pack(pack_name)
else
# Load from emoji.txt / all files
emoji_txt = Path.join(pack_dir, "emoji.txt")
if File.exists?(emoji_txt) do
Logger.info("Loading emoji pack from emoji.txt: #{emoji_txt}")
Logger.debug("Loading emoji pack from emoji.txt: #{emoji_txt}")
load_from_file(emoji_txt, emoji_groups)
else
extensions = Config.get([:emoji, :pack_extensions])
Logger.info(
"No emoji.txt found for pack \"#{pack_name}\", assuming all #{Enum.join(extensions, ", ")} files are emoji"
"No pack.json or emoji.txt found for pack \"#{pack_name}\", assuming all #{Enum.join(extensions, ", ")} files are emoji"
)
make_shortcode_to_file_map(pack_dir, extensions)
@ -135,6 +130,21 @@ defp load_pack(pack_dir, emoji_groups) do
end
end
defp load_from_pack({:ok, %{"files" => files}}, pack_name) when is_map(files) do
Enum.map(files, fn {name, rel_file} ->
filename = Path.join("/emoji/#{pack_name}", rel_file)
{name, filename, ["pack:#{pack_name}"]}
end)
end
defp load_from_pack(pack_json_result, pack_name) do
Logger.error(
"Failed to load emoji pack #{pack_name} from pack.json:\n#{inspect(pack_json_result)}"
)
[]
end
def make_shortcode_to_file_map(pack_dir, exts) do
find_all_emoji(pack_dir, exts)
|> Enum.map(&Path.relative_to(&1, pack_dir))

View file

@ -49,12 +49,15 @@ defp path_join_safe(dir, path) do
Path.join(dir, safe_path)
end
defp tags(%__MODULE__{} = pack), do: ["pack:" <> pack.name]
@spec create(String.t()) :: {:ok, t()} | {:error, File.posix()} | {:error, :empty_values}
def create(name) do
with :ok <- validate_not_empty([name]),
dir <- path_join_name_safe(emoji_path(), name),
:ok <- File.mkdir(dir) do
save_pack(%__MODULE__{
name: name,
path: dir,
pack_file: Path.join(dir, "pack.json")
})
@ -142,8 +145,6 @@ def add_file(%Pack{} = pack, _, _, %Plug.Upload{content_type: "application/zip"}
{item, updated_pack}
end)
Emoji.reload()
{:ok, updated_pack}
after
File.rm_rf(tmp_dir)
@ -169,16 +170,19 @@ defp try_add_file(%Pack{} = pack, shortcode, filename, file) do
with :ok <- validate_not_empty([shortcode, filename]),
:ok <- validate_emoji_not_exists(shortcode),
{:ok, updated_pack} <- do_add_file(pack, shortcode, filename, file) do
Emoji.reload()
{:ok, updated_pack}
end
end
defp do_add_file(pack, shortcode, filename, file) do
with :ok <- save_file(file, pack, filename) do
pack
|> put_emoji(shortcode, filename)
|> save_pack()
with :ok <- save_file(file, pack, filename),
pack <- put_emoji(pack, shortcode, filename),
{:ok, pack} <- save_pack(pack) do
{shortcode, filename, tags(pack)}
|> Emoji.build()
|> Emoji.add_or_update()
{:ok, pack}
end
end
@ -188,7 +192,7 @@ def delete_file(%Pack{} = pack, shortcode) do
with :ok <- validate_not_empty([shortcode]),
:ok <- remove_file(pack, shortcode),
{:ok, updated_pack} <- pack |> delete_emoji(shortcode) |> save_pack() do
Emoji.reload()
Emoji.delete(shortcode)
{:ok, updated_pack}
end
end
@ -205,7 +209,12 @@ def update_file(%Pack{} = pack, shortcode, new_shortcode, new_filename, force) d
|> delete_emoji(shortcode)
|> put_emoji(new_shortcode, new_filename)
|> save_pack() do
Emoji.reload()
Emoji.delete(shortcode)
{new_shortcode, new_filename, tags(pack)}
|> Emoji.build()
|> Emoji.add_or_update()
{:ok, updated_pack}
end
end
@ -455,7 +464,7 @@ defp create_archive_and_cache(pack, hash) do
# if pack.json MD5 changes, the cache is not valid anymore
%{hash: hash, pack_data: result},
# Add a minute to cache time for every file in the pack
ttl: overall_ttl
expire: overall_ttl
)
result
@ -580,7 +589,7 @@ defp get_filename(pack, shortcode) do
defp http_get(%URI{} = url), do: url |> to_string() |> http_get()
defp http_get(url) do
with {:ok, %{body: body}} <- Pleroma.HTTP.get(url, [], []) do
with {:ok, %{body: body}} <- Pleroma.HTTP.get(url) do
Jason.decode(body)
end
end

View file

@ -161,7 +161,11 @@ def get_follow_requests_query(%User{id: id}) do
|> where([r], r.state == ^:follow_pending)
|> where([r], r.following_id == ^id)
|> where([r, follower: f], f.is_active == true)
|> select([r, follower: f], f)
end
def get_follow_requesting_users_with_request_id(%User{} = user) do
get_follow_requests_query(user)
|> select([r, follower: f], %{id: r.id, entry: f})
end
def following?(%User{id: follower_id}, %User{id: followed_id}) do

View file

@ -3,6 +3,7 @@
# SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.Formatter do
alias PhoenixHTMLHelpers.Tag
alias Pleroma.HTML
alias Pleroma.User
@ -37,10 +38,10 @@ def mention_tag(%User{id: id} = user, nickname, opts \\ []) do
nickname_text = get_nickname_text(nickname, opts)
:span
|> Phoenix.HTML.Tag.content_tag(
Phoenix.HTML.Tag.content_tag(
|> Tag.content_tag(
Tag.content_tag(
:a,
["@", Phoenix.HTML.Tag.content_tag(:span, nickname_text)],
["@", Tag.content_tag(:span, nickname_text)],
"data-user": id,
class: "u-url mention",
href: user_url,
@ -68,7 +69,7 @@ def hashtag_handler("#" <> tag = tag_text, _buffer, _opts, acc) do
url = "#{Pleroma.Web.Endpoint.url()}/tag/#{tag}"
link =
Phoenix.HTML.Tag.content_tag(:a, tag_text,
Tag.content_tag(:a, tag_text,
class: "hashtag",
"data-tag": tag,
href: url,

View file

@ -84,7 +84,7 @@ defp download_build(frontend_info, dest) do
url = String.replace(frontend_info["build_url"], "${ref}", frontend_info["ref"])
with {:ok, %{status: 200, body: zip_body}} <-
Pleroma.HTTP.get(url, [], receive_timeout: 120_000) do
Pleroma.HTTP.get(url, [], adapter: [receive_timeout: 120_000]) do
unzip(zip_body, dest)
else
{:error, e} -> {:error, e}

View file

@ -14,7 +14,6 @@ defmodule Pleroma.Healthcheck do
active: 0,
idle: 0,
memory_used: 0,
job_queue_stats: nil,
healthy: true
@type t :: %__MODULE__{
@ -22,7 +21,6 @@ defmodule Pleroma.Healthcheck do
active: non_neg_integer(),
idle: non_neg_integer(),
memory_used: number(),
job_queue_stats: map(),
healthy: boolean()
}
@ -32,7 +30,6 @@ def system_info do
memory_used: Float.round(:recon_alloc.memory(:allocated) / 1024 / 1024, 2)
}
|> assign_db_info()
|> assign_job_queue_stats()
|> check_health()
end
@ -58,11 +55,6 @@ defp assign_db_info(healthcheck) do
Map.merge(healthcheck, db_info)
end
defp assign_job_queue_stats(healthcheck) do
stats = Pleroma.JobQueueMonitor.stats()
Map.put(healthcheck, :job_queue_stats, stats)
end
@spec check_health(Healthcheck.t()) :: Healthcheck.t()
def check_health(%{pool_size: pool_size, active: active} = check)
when active >= pool_size do

View file

@ -24,7 +24,7 @@ def missing_dependencies do
def image_resize(url, options) do
with executable when is_binary(executable) <- System.find_executable("convert"),
{:ok, args} <- prepare_image_resize_args(options),
{:ok, env} <- HTTP.get(url, [], []),
{:ok, env} <- HTTP.get(url),
{:ok, fifo_path} <- mkfifo() do
args = List.flatten([fifo_path, args])
run_fifo(fifo_path, env, executable, args)
@ -73,7 +73,7 @@ defp prepare_image_resize_args(_), do: {:error, :missing_options}
# Note: video thumbnail is intentionally not resized (always has original dimensions)
def video_framegrab(url) do
with executable when is_binary(executable) <- System.find_executable("ffmpeg"),
{:ok, env} <- HTTP.get(url, [], []),
{:ok, env} <- HTTP.get(url),
{:ok, fifo_path} <- mkfifo(),
args = [
"-y",

View file

@ -7,10 +7,6 @@ defmodule Pleroma.HTTP do
Wrapper for `Tesla.request/2`.
"""
alias Pleroma.HTTP.AdapterHelper
alias Pleroma.HTTP.Request
alias Pleroma.HTTP.RequestBuilder, as: Builder
alias Tesla.Client
alias Tesla.Env
require Logger
@ -18,6 +14,8 @@ defmodule Pleroma.HTTP do
@type t :: __MODULE__
@type method() :: :get | :post | :put | :delete | :head
@mix_env Mix.env()
@doc """
Performs GET request.
@ -59,40 +57,44 @@ def post(url, body, headers \\ [], options \\ []),
@spec request(method(), Request.url(), String.t(), Request.headers(), keyword()) ::
{:ok, Env.t()} | {:error, any()}
def request(method, url, body, headers, options) when is_binary(url) do
uri = URI.parse(url)
adapter_opts = AdapterHelper.options(uri, options || [])
adapter_opts =
if uri.scheme == :https do
AdapterHelper.maybe_add_cacerts(adapter_opts, :public_key.cacerts_get())
else
adapter_opts
end
options = put_in(options[:adapter], adapter_opts)
params = options[:params] || []
request = build_request(method, headers, options, url, body, params)
client = Tesla.client([Tesla.Middleware.FollowRedirects, Tesla.Middleware.Telemetry])
options = options |> Keyword.delete(:params)
headers = maybe_add_user_agent(headers)
client =
Tesla.client([
Tesla.Middleware.FollowRedirects,
Pleroma.HTTP.Middleware.HTTPSignature,
Tesla.Middleware.Telemetry
])
Logger.debug("Outbound: #{method} #{url}")
request(client, request)
Tesla.request(client,
method: method,
url: url,
query: params,
headers: headers,
body: body,
opts: options
)
rescue
e ->
Logger.error("Failed to fetch #{url}: #{inspect(e)}")
Logger.error("Failed to fetch #{url}: #{Exception.format(:error, e, __STACKTRACE__)}")
{:error, :fetch_error}
end
@spec request(Client.t(), keyword()) :: {:ok, Env.t()} | {:error, any()}
def request(client, request), do: Tesla.request(client, request)
defp build_request(method, headers, options, url, body, params) do
Builder.new()
|> Builder.method(method)
|> Builder.headers(headers)
|> Builder.opts(options)
|> Builder.url(url)
|> Builder.add_param(:body, :body, body)
|> Builder.add_param(:query, :query, params)
|> Builder.convert_to_keyword()
if @mix_env == :test do
defp maybe_add_user_agent(headers) do
with true <- Pleroma.Config.get([:http, :send_user_agent]) do
[{"user-agent", Pleroma.Application.user_agent()} | headers]
else
_ ->
headers
end
end
else
defp maybe_add_user_agent(headers),
do: [{"user-agent", Pleroma.Application.user_agent()} | headers]
end
end

View file

@ -6,141 +6,84 @@ defmodule Pleroma.HTTP.AdapterHelper do
@moduledoc """
Configure Tesla.Client with default and customized adapter options.
"""
@defaults [name: MyFinch, pool_timeout: 5_000, receive_timeout: 5_000]
@type proxy_type() :: :socks4 | :socks5
@type host() :: charlist() | :inet.ip_address()
alias Pleroma.HTTP.AdapterHelper
alias Pleroma.Config
require Logger
@type proxy :: {Connection.proxy_type(), Connection.host(), pos_integer(), list()}
@callback options(keyword(), URI.t()) :: keyword()
@doc """
Merge default connection & adapter options with received ones.
"""
@spec options(Keyword.t()) :: Keyword.t()
def options(opts \\ []) do
[
name: MyFinch,
pools: %{
default: [
size: Config.get!([:http, :pool_size]),
pool_max_idle_time: Config.get!([:http, :pool_timeout]),
conn_max_idle_time: Config.get!([:http, :receive_timeout]),
protocols: Config.get!([:http, :protocols]),
conn_opts: [
# Do NOT add cacerts here as this will cause issues for plain HTTP connections!
# (when we upgrade our deps to Mint >= 1.6.0 we can also explicitly enable "inet4: true")
transport_opts: [inet6: true],
# up to at least version 0.20.0, Finch leaves server_push enabled by default for HTTP2,
# but will actually raise an exception when receiving such a response. Tell servers we don't want it.
# see: https://github.com/sneako/finch/issues/325
client_settings: [enable_push: false]
]
]
}
]
|> maybe_add_proxy_pool(Config.get([:http, :proxy_url]))
|> nested_merge(opts)
# Ensure name is not overwritten
|> Keyword.put(:name, MyFinch)
end
@spec nested_merge(Keyword.t(), Keyword.t()) :: Keyword.t()
defp nested_merge(k1, k2) do
Keyword.merge(k1, k2, &nested_merge/3)
end
defp nested_merge(_key, v1, v2) when is_list(v1) and is_list(v2) do
if Keyword.keyword?(v1) and Keyword.keyword?(v2) do
nested_merge(v1, v2)
else
v2
end
end
defp nested_merge(_key, v1, v2) when is_map(v1) and is_map(v2) do
Map.merge(v1, v2, &nested_merge/3)
end
defp nested_merge(_key, _v1, v2), do: v2
defp maybe_add_proxy_pool(opts, proxy_config) do
case format_proxy(proxy_config) do
nil ->
opts
proxy ->
Logger.info("Using HTTP Proxy: #{inspect(proxy)}")
put_in(opts, [:pools, :default, :conn_opts, :proxy], proxy)
end
end
@spec format_proxy(String.t() | tuple() | nil) :: proxy() | nil
def format_proxy(nil), do: nil
def format_proxy(proxy_url) do
defp format_proxy(proxy_url) do
case parse_proxy(proxy_url) do
{:ok, type, host, port} -> {type, host, port, []}
_ -> nil
end
end
@spec maybe_add_proxy(keyword(), proxy() | nil) :: keyword()
def maybe_add_proxy(opts, nil), do: opts
def maybe_add_proxy(opts, proxy) do
Keyword.put(opts, :proxy, proxy)
end
def maybe_add_proxy_pool(opts, nil), do: opts
def maybe_add_proxy_pool(opts, proxy) do
Logger.info("Using HTTP Proxy: #{inspect(proxy)}")
opts
|> maybe_add_pools()
|> maybe_add_default_pool()
|> maybe_add_conn_opts()
|> put_in([:pools, :default, :conn_opts, :proxy], proxy)
end
def maybe_add_cacerts(opts, nil), do: opts
def maybe_add_cacerts(opts, cacerts) do
opts
|> maybe_add_pools()
|> maybe_add_default_pool()
|> maybe_add_conn_opts()
|> maybe_add_transport_opts()
|> put_in([:pools, :default, :conn_opts, :transport_opts, :cacerts], cacerts)
end
def add_pool_size(opts, pool_size) do
opts
|> maybe_add_pools()
|> maybe_add_default_pool()
|> put_in([:pools, :default, :size], pool_size)
end
def ensure_ipv6(opts) do
# Default transport opts already enable IPv6, so just ensure they're loaded
opts
|> maybe_add_pools()
|> maybe_add_default_pool()
|> maybe_add_conn_opts()
|> maybe_add_transport_opts()
end
defp maybe_add_pools(opts) do
if Keyword.has_key?(opts, :pools) do
opts
else
Keyword.put(opts, :pools, %{})
end
end
defp maybe_add_default_pool(opts) do
pools = Keyword.get(opts, :pools)
if Map.has_key?(pools, :default) do
opts
else
put_in(opts, [:pools, :default], [])
end
end
defp maybe_add_conn_opts(opts) do
conn_opts = get_in(opts, [:pools, :default, :conn_opts])
unless is_nil(conn_opts) do
opts
else
put_in(opts, [:pools, :default, :conn_opts], [])
end
end
defp maybe_add_transport_opts(opts) do
transport_opts = get_in(opts, [:pools, :default, :conn_opts, :transport_opts])
opts =
unless is_nil(transport_opts) do
opts
else
put_in(opts, [:pools, :default, :conn_opts, :transport_opts], [])
end
# IPv6 is disabled and IPv4 enabled by default; ensure we can use both
put_in(opts, [:pools, :default, :conn_opts, :transport_opts, :inet6], true)
end
def add_default_pool_max_idle_time(opts, pool_timeout) do
opts
|> maybe_add_pools()
|> maybe_add_default_pool()
|> put_in([:pools, :default, :pool_max_idle_time], pool_timeout)
end
def add_default_conn_max_idle_time(opts, connection_timeout) do
opts
|> maybe_add_pools()
|> maybe_add_default_pool()
|> put_in([:pools, :default, :conn_max_idle_time], connection_timeout)
end
@doc """
Merge default connection & adapter options with received ones.
"""
@spec options(URI.t(), keyword()) :: keyword()
def options(%URI{} = uri, opts \\ []) do
@defaults
|> Keyword.merge(opts)
|> AdapterHelper.Default.options(uri)
end
defp proxy_type("http"), do: {:ok, :http}
defp proxy_type("https"), do: {:ok, :https}
defp proxy_type(_), do: {:error, :unknown}
@ -149,10 +92,10 @@ defp proxy_type(_), do: {:error, :unknown}
{:ok, proxy_type(), host(), pos_integer()}
| {:error, atom()}
| nil
def parse_proxy(nil), do: nil
def parse_proxy(""), do: nil
defp parse_proxy(nil), do: nil
defp parse_proxy(""), do: nil
def parse_proxy(proxy) when is_binary(proxy) do
defp parse_proxy(proxy) when is_binary(proxy) do
with %URI{} = uri <- URI.parse(proxy),
{:ok, type} <- proxy_type(uri.scheme) do
{:ok, type, uri.host, uri.port}
@ -163,7 +106,7 @@ def parse_proxy(proxy) when is_binary(proxy) do
end
end
def parse_proxy(proxy) when is_tuple(proxy) do
defp parse_proxy(proxy) when is_tuple(proxy) do
with {type, host, port} <- proxy do
{:ok, type, host, port}
else
@ -172,30 +115,4 @@ def parse_proxy(proxy) when is_tuple(proxy) do
{:error, :invalid_proxy}
end
end
@spec parse_host(String.t() | atom() | charlist()) :: charlist() | :inet.ip_address()
def parse_host(host) when is_list(host), do: host
def parse_host(host) when is_atom(host), do: to_charlist(host)
def parse_host(host) when is_binary(host) do
host = to_charlist(host)
case :inet.parse_address(host) do
{:error, :einval} -> host
{:ok, ip} -> ip
end
end
@spec format_host(String.t()) :: charlist()
def format_host(host) do
host_charlist = to_charlist(host)
case :inet.parse_address(host_charlist) do
{:error, :einval} ->
:idna.encode(host_charlist)
{:ok, _ip} ->
host_charlist
end
end
end

View file

@ -1,24 +0,0 @@
# Pleroma: A lightweight social networking server
# Copyright © 2017-2021 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.HTTP.AdapterHelper.Default do
alias Pleroma.HTTP.AdapterHelper
@behaviour Pleroma.HTTP.AdapterHelper
@spec options(keyword(), URI.t()) :: keyword()
def options(opts, _uri) do
proxy = Pleroma.Config.get([:http, :proxy_url])
pool_timeout = Pleroma.Config.get([:http, :pool_timeout], 5000)
receive_timeout = Pleroma.Config.get([:http, :receive_timeout], 15_000)
opts
|> AdapterHelper.maybe_add_proxy(AdapterHelper.format_proxy(proxy))
|> Keyword.put(:pool_timeout, pool_timeout)
|> Keyword.put(:receive_timeout, receive_timeout)
end
@spec get_conn(URI.t(), keyword()) :: {:ok, keyword()}
def get_conn(_uri, opts), do: {:ok, opts}
end

View file

@ -77,6 +77,12 @@ defp next_backoff_timestamp(%{headers: headers}) when is_list(headers) do
defp next_backoff_timestamp(_), do: DateTime.utc_now() |> Timex.shift(seconds: 5 * 60)
defp log_ratelimit(429, host, time),
do: Logger.error("Rate limited on #{host}! Backing off until #{time}...")
defp log_ratelimit(503, host, time),
do: Logger.warning("#{host} temporarily unavailable! Backing off until #{time}...")
# utility function to check the HTTP response for potential backoff headers
# will check if we get a 429 or 503 response, and if we do, will back off for a bit
@spec check_backoff({:ok | :error, HTTP.Env.t()}, binary()) ::
@ -84,11 +90,11 @@ defp next_backoff_timestamp(_), do: DateTime.utc_now() |> Timex.shift(seconds: 5
defp check_backoff({:ok, env}, host) do
case env.status do
status when status in [429, 503] ->
Logger.error("Rate limited on #{host}! Backing off...")
timestamp = next_backoff_timestamp(env)
log_ratelimit(status, host, timestamp)
ttl = Timex.diff(timestamp, DateTime.utc_now(), :seconds)
# we will cache the host for 5 minutes
@cachex.put(@backoff_cache, host, true, ttl: ttl)
@cachex.put(@backoff_cache, host, true, expire: ttl)
{:error, :ratelimit}
_ ->

View file

@ -0,0 +1,121 @@
# Akkoma: Magically expressive social media
# Copyright © 2025 Akkoma Authors <https://akkoma.dev/>
# SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.HTTP.Middleware.HTTPSignature do
alias Pleroma.User.SigningKey
alias Pleroma.Signature
require Logger
@behaviour Tesla.Middleware
@moduledoc """
Adds a HTTP signature and related headers to requests, if a signing key is set in the request env.
If any other middleware can update the target location (e.g. redirects) this MUST be placed after all of them!
(Note: the third argument holds static middleware options from client creation)
"""
@impl true
def call(env, next, _options) do
env = maybe_sign(env)
Tesla.run(env, next)
end
defp maybe_sign(env) do
case Keyword.get(env.opts, :httpsig) do
%{signing_key: %SigningKey{} = key} ->
set_signature_headers(env, key)
_ ->
env
end
end
defp set_signature_headers(env, key) do
Logger.debug("Signing request to: #{env.url}")
{http_headers, signing_headers} = collect_headers_for_signature(env)
signature = Signature.sign(key, signing_headers, has_body: has_body(env))
set_headers(env, [{"signature", signature} | http_headers])
end
defp has_body(%{body: body}) when body in [nil, ""], do: false
defp has_body(_), do: true
defp set_headers(env, []), do: env
defp set_headers(env, [{key, val} | rest]) do
headers = :proplists.delete(key, env.headers)
headers = [{key, val} | headers]
set_headers(%{env | headers: headers}, rest)
end
# Returns tuple.
# First element is headers+values which need to be added to the HTTP request.
# Second element are all headers to be used for signing, including already existing and pseudo headers.
defp collect_headers_for_signature(env) do
{request_target, host} = get_request_target_and_host(env)
date = http_date()
# content-length is always automatically set later on
# since they are needed to establish working connection.
# Similarly host will always be set for HTTP/1, and technically may be omitted for HTTP/2+
# but Tesla doesnt handle it well if we preset it ourselves (and seems to set it even for HTTP/2 anyway)
http_headers = [{"date", date}]
signing_headers = %{
"(request-target)" => request_target,
"host" => host,
"date" => date
}
if has_body(env) do
append_body_headers(env, http_headers, signing_headers)
else
{http_headers, signing_headers}
end
end
defp append_body_headers(env, http_headers, signing_headers) do
content_length = byte_size(env.body)
digest = digest_value(env)
http_headers = [{"digest", digest} | http_headers]
signing_headers =
Map.merge(signing_headers, %{
"digest" => digest,
"content-length" => content_length
})
{http_headers, signing_headers}
end
defp get_request_target_and_host(env) do
uri = URI.parse(env.url)
rt = "#{env.method} #{uri.path}"
host = host_from_uri(uri)
{rt, host}
end
defp digest_value(env) do
# case Tesla.get_header(env, "digest")
encoded_hash = :crypto.hash(:sha256, env.body) |> Base.encode64()
"SHA-256=" <> encoded_hash
end
defp host_from_uri(%URI{port: port, scheme: scheme, host: host}) do
# https://httpwg.org/specs/rfc9110.html#field.host
# https://www.rfc-editor.org/rfc/rfc3986.html#section-3.2.3
if port == URI.default_port(scheme) do
host
else
"#{host}:#{port}"
end
end
defp http_date() do
now = NaiveDateTime.utc_now()
Timex.lformat!(now, "{WDshort}, {0D} {Mshort} {YYYY} {h24}:{m}:{s} GMT", "en")
end
end

View file

@ -1,23 +0,0 @@
# Pleroma: A lightweight social networking server
# Copyright © 2017-2021 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.HTTP.Request do
@moduledoc """
Request struct.
"""
defstruct method: :get, url: "", query: [], headers: [], body: "", opts: []
@type method :: :head | :get | :delete | :trace | :options | :post | :put | :patch
@type url :: String.t()
@type headers :: [{String.t(), String.t()}]
@type t :: %__MODULE__{
method: method(),
url: url(),
query: keyword(),
headers: headers(),
body: String.t(),
opts: keyword()
}
end

View file

@ -1,102 +0,0 @@
# Pleroma: A lightweight social networking server
# Copyright © 2017-2021 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.HTTP.RequestBuilder do
@moduledoc """
Helper functions for building Tesla requests
"""
alias Pleroma.HTTP.Request
alias Tesla.Multipart
@mix_env Mix.env()
@doc """
Creates new request
"""
@spec new(Request.t()) :: Request.t()
def new(%Request{} = request \\ %Request{}), do: request
@doc """
Specify the request method when building a request
"""
@spec method(Request.t(), Request.method()) :: Request.t()
def method(request, m), do: %{request | method: m}
@doc """
Specify the request method when building a request
"""
@spec url(Request.t(), Request.url()) :: Request.t()
def url(request, u), do: %{request | url: u}
@doc """
Add headers to the request
"""
@spec headers(Request.t(), Request.headers()) :: Request.t()
def headers(request, headers) do
headers_list = maybe_add_user_agent(headers, @mix_env)
%{request | headers: headers_list}
end
@doc """
Add custom, per-request middleware or adapter options to the request
"""
@spec opts(Request.t(), keyword()) :: Request.t()
def opts(request, options), do: %{request | opts: options}
@doc """
Add optional parameters to the request
"""
@spec add_param(Request.t(), atom(), atom(), any()) :: Request.t()
def add_param(request, :query, :query, values), do: %{request | query: values}
def add_param(request, :body, :body, value), do: %{request | body: value}
def add_param(request, :body, key, value) do
request
|> Map.put(:body, Multipart.new())
|> Map.update!(
:body,
&Multipart.add_field(
&1,
key,
Jason.encode!(value),
headers: [{"content-type", "application/json"}]
)
)
end
def add_param(request, :file, name, path) do
request
|> Map.put(:body, Multipart.new())
|> Map.update!(:body, &Multipart.add_file(&1, path, name: name))
end
def add_param(request, :form, name, value) do
Map.update(request, :body, %{name => value}, &Map.put(&1, name, value))
end
def add_param(request, location, key, value) do
Map.update(request, location, [{key, value}], &(&1 ++ [{key, value}]))
end
def convert_to_keyword(request) do
request
|> Map.from_struct()
|> Enum.into([])
end
defp maybe_add_user_agent(headers, :test) do
with true <- Pleroma.Config.get([:http, :send_user_agent]) do
[{"user-agent", Pleroma.Application.user_agent()} | headers]
else
_ ->
headers
end
end
defp maybe_add_user_agent(headers, _),
do: [{"user-agent", Pleroma.Application.user_agent()} | headers]
end

View file

@ -10,15 +10,15 @@ defmodule Pleroma.HTTP.Tzdata do
alias Pleroma.HTTP
@impl true
def get(url, headers, options) do
with {:ok, %Tesla.Env{} = env} <- HTTP.get(url, headers, options) do
def get(url, headers, _options) do
with {:ok, %Tesla.Env{} = env} <- HTTP.get(url, headers) do
{:ok, {env.status, env.headers, env.body}}
end
end
@impl true
def head(url, headers, options) do
with {:ok, %Tesla.Env{} = env} <- HTTP.head(url, headers, options) do
def head(url, headers, _options) do
with {:ok, %Tesla.Env{} = env} <- HTTP.head(url, headers) do
{:ok, {env.status, env.headers}}
end
end

View file

@ -5,8 +5,8 @@
defmodule Pleroma.HTTP.WebPush do
@moduledoc false
def post(url, payload, headers, options \\ []) do
def post(url, payload, headers, _options) do
list_headers = Map.to_list(headers)
Pleroma.HTTP.post(url, payload, list_headers, options)
Pleroma.HTTP.post(url, payload, list_headers)
end
end

View file

@ -43,6 +43,4 @@ def host(url_or_host) when is_binary(url_or_host) do
url_or_host
end
end
defdelegate set_request_signatures(url_or_host), to: Instance
end

View file

@ -26,7 +26,6 @@ defmodule Pleroma.Instances.Instance do
field(:favicon, :string)
field(:metadata_updated_at, :naive_datetime)
field(:nodeinfo, :map, default: %{})
field(:has_request_signatures, :boolean)
timestamps()
end
@ -40,8 +39,7 @@ def changeset(struct, params \\ %{}) do
:unreachable_since,
:favicon,
:nodeinfo,
:metadata_updated_at,
:has_request_signatures
:metadata_updated_at
])
|> validate_required([:host])
|> unique_constraint(:host)
@ -244,7 +242,7 @@ defp scrape_nodeinfo(%URI{} = instance_uri) do
{:ok,
Enum.find(links, &(&1["rel"] == "http://nodeinfo.diaspora.software/ns/schema/2.0"))},
{:ok, %Tesla.Env{body: data}} <-
Pleroma.HTTP.get(href, [{"accept", "application/json"}], []),
Pleroma.HTTP.get(href, [{"accept", "application/json"}]),
{:length, true} <- {:length, String.length(data) < 50_000},
{:ok, nodeinfo} <- Jason.decode(data) do
nodeinfo
@ -272,7 +270,7 @@ defp scrape_favicon(%URI{} = instance_uri) do
with true <- Pleroma.Config.get([:instances_favicons, :enabled]),
{_, true} <- {:reachable, reachable?(instance_uri.host)},
{:ok, %Tesla.Env{body: html}} <-
Pleroma.HTTP.get(to_string(instance_uri), [{"accept", "text/html"}], []),
Pleroma.HTTP.get(to_string(instance_uri), [{"accept", "text/html"}]),
{_, [favicon_rel | _]} when is_binary(favicon_rel) <-
{:parse, html |> Floki.parse_document!() |> Floki.attribute("link[rel=icon]", "href")},
{_, favicon} when is_binary(favicon) <-
@ -332,24 +330,4 @@ def get_cached_by_url(url_or_host) do
end)
end
end
def set_request_signatures(url_or_host) when is_binary(url_or_host) do
host = host(url_or_host)
existing_record = Repo.get_by(Instance, %{host: host})
changes = %{has_request_signatures: true}
cond do
is_nil(existing_record) ->
%Instance{}
|> changeset(Map.put(changes, :host, host))
|> Repo.insert()
true ->
existing_record
|> changeset(changes)
|> Repo.update()
end
end
def set_request_signatures(_), do: {:error, :invalid_input}
end

View file

@ -1,95 +0,0 @@
# Pleroma: A lightweight social networking server
# Copyright © 2017-2021 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.JobQueueMonitor do
use GenServer
@initial_state %{workers: %{}, queues: %{}, processed_jobs: 0}
@queue %{processed_jobs: 0, success: 0, failure: 0}
@operation %{processed_jobs: 0, success: 0, failure: 0}
def start_link(_) do
GenServer.start_link(__MODULE__, @initial_state, name: __MODULE__)
end
@impl true
def init(state) do
:telemetry.attach(
"oban-monitor-failure",
[:oban, :job, :exception],
&Pleroma.JobQueueMonitor.handle_event/4,
nil
)
:telemetry.attach(
"oban-monitor-success",
[:oban, :job, :stop],
&Pleroma.JobQueueMonitor.handle_event/4,
nil
)
{:ok, state}
end
def stats do
GenServer.call(__MODULE__, :stats)
end
def handle_event([:oban, :job, event], %{duration: duration}, meta, _) do
GenServer.cast(
__MODULE__,
{:process_event, mapping_status(event), duration, meta}
)
end
@impl true
def handle_call(:stats, _from, state) do
{:reply, state, state}
end
@impl true
def handle_cast({:process_event, status, duration, meta}, state) do
state =
state
|> Map.update!(:workers, fn workers ->
workers
|> Map.put_new(meta.worker, %{})
|> Map.update!(meta.worker, &update_worker(&1, status, meta, duration))
end)
|> Map.update!(:queues, fn workers ->
workers
|> Map.put_new(meta.queue, @queue)
|> Map.update!(meta.queue, &update_queue(&1, status, meta, duration))
end)
|> Map.update!(:processed_jobs, &(&1 + 1))
{:noreply, state}
end
defp update_worker(worker, status, meta, duration) do
worker
|> Map.put_new(meta.args["op"], @operation)
|> Map.update!(meta.args["op"], &update_op(&1, status, meta, duration))
end
defp update_op(op, :enqueue, _meta, _duration) do
op
|> Map.update!(:enqueued, &(&1 + 1))
end
defp update_op(op, status, _meta, _duration) do
op
|> Map.update!(:processed_jobs, &(&1 + 1))
|> Map.update!(status, &(&1 + 1))
end
defp update_queue(queue, status, _meta, _duration) do
queue
|> Map.update!(:processed_jobs, &(&1 + 1))
|> Map.update!(status, &(&1 + 1))
end
defp mapping_status(:stop), do: :success
defp mapping_status(:exception), do: :failure
end

View file

@ -1,7 +0,0 @@
# Pleroma: A lightweight social networking server
# Copyright © 2017-2021 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.Logging do
@callback error(String.t()) :: any()
end

View file

@ -144,7 +144,7 @@ defp warn_on_no_object_preloaded(ap_id) do
Logger.debug("Backtrace: #{inspect(Process.info(:erlang.self(), :current_stacktrace))}")
end
def normalize(_, options \\ [fetch: false, id_only: false])
def normalize(_, options \\ [fetch: false])
# If we pass an Activity to Object.normalize(), we can try to use the preloaded object.
# Use this whenever possible, especially when walking graphs in an O(N) loop!
@ -173,9 +173,6 @@ def normalize(%{"id" => ap_id}, options), do: normalize(ap_id, options)
def normalize(ap_id, options) when is_binary(ap_id) do
cond do
Keyword.get(options, :id_only) ->
ap_id
Keyword.get(options, :fetch) ->
case Fetcher.fetch_object_from_id(ap_id, options) do
{:ok, object} -> object
@ -365,28 +362,6 @@ def local?(%Object{data: %{"id" => id}}) do
String.starts_with?(id, Pleroma.Web.Endpoint.url() <> "/")
end
def replies(object, opts \\ []) do
object = Object.normalize(object, fetch: false)
query =
Object
|> where(
[o],
fragment("(?)->>'inReplyTo' = ?", o.data, ^object.data["id"])
)
|> order_by([o], asc: o.id)
if opts[:self_only] do
actor = object.data["actor"]
where(query, [o], fragment("(?)->>'actor' = ?", o.data, ^actor))
else
query
end
end
def self_replies(object, opts \\ []),
do: replies(object, Keyword.put(opts, :self_only, true))
def tags(%Object{data: %{"tag" => tags}}) when is_list(tags), do: tags
def tags(_), do: []

View file

@ -9,7 +9,6 @@ defmodule Pleroma.Object.Fetcher do
alias Pleroma.Object
alias Pleroma.Object.Containment
alias Pleroma.Repo
alias Pleroma.Signature
alias Pleroma.Web.ActivityPub.InternalFetchActor
alias Pleroma.Web.ActivityPub.ObjectValidator
alias Pleroma.Web.ActivityPub.Transmogrifier
@ -227,36 +226,6 @@ defp prepare_activity_params(data) do
|> Maps.put_if_present("bcc", data["bcc"])
end
defp make_signature(id, date) do
uri = URI.parse(id)
signature =
InternalFetchActor.get_actor()
|> Signature.sign(%{
"(request-target)": "get #{uri.path}",
host: uri.host,
date: date
})
{"signature", signature}
end
defp sign_fetch(headers, id, date) do
if Pleroma.Config.get([:activitypub, :sign_object_fetches]) do
[make_signature(id, date) | headers]
else
headers
end
end
defp maybe_date_fetch(headers, date) do
if Pleroma.Config.get([:activitypub, :sign_object_fetches]) do
[{"date", date} | headers]
else
headers
end
end
@doc """
Fetches arbitrary remote object and performs basic safety and authenticity checks.
When the fetch URL is known to already be a canonical AP id, checks are stricter.
@ -402,20 +371,25 @@ defp get_final_id(final_url, _intial_url) do
@doc "Do NOT use; only public for use in tests"
def get_object(id) do
date = Pleroma.Signature.signed_date()
headers =
[
# The first is required by spec, the second provided as a fallback for buggy implementations
{"accept", "application/ld+json; profile=\"https://www.w3.org/ns/activitystreams\""},
{"accept", "application/activity+json"}
]
|> maybe_date_fetch(date)
|> sign_fetch(id, date)
http_opts =
if Pleroma.Config.get([:activitypub, :sign_object_fetches]) do
signing_actor = InternalFetchActor.get_actor() |> Pleroma.User.SigningKey.load_key()
signing_key = signing_actor.signing_key
[httpsig: %{signing_key: signing_key}]
else
[]
end
with {:ok, %{body: body, status: code, headers: headers, url: final_url}}
when code in 200..299 <-
HTTP.Backoff.get(id, headers),
HTTP.Backoff.get(id, headers, http_opts),
{:has_content_type, {_, content_type}} <-
{:has_content_type, List.keyfind(headers, "content-type", 0)},
{:parse_content_type, {:ok, "application", subtype, type_params}} <-
@ -443,6 +417,13 @@ def get_object(id) do
{:ok, %{status: code}} when code in [404, 410] ->
{:error, :not_found}
{:ok, %{status: code, headers: headers}} ->
{:error, {:http_error, code, headers}}
# connection/protocol-related error
{:ok, %Tesla.Env{} = env} ->
{:error, {:http_error, :connect, env}}
{:error, e} ->
{:error, e}

View file

@ -86,6 +86,16 @@ def paginate(query, options, :offset, table_binding) do
|> restrict(:limit, options, table_binding)
end
@doc """
Unwraps a result list for a query paginated by a foreign id.
Usually you want to keep those foreign ids around until after pagination Link headers got generated.
"""
@spec unwrap([%{id: any(), entry: any()}]) :: [any()]
def unwrap(list) when is_list(list), do: do_unwrap(list, [])
defp do_unwrap([%{entry: entry} | rest], acc), do: do_unwrap(rest, [entry | acc])
defp do_unwrap([], acc), do: Enum.reverse(acc)
defp cast_params(params) do
param_types = %{
min_id: params[:id_type] || :string,
@ -94,13 +104,31 @@ defp cast_params(params) do
offset: :integer,
limit: :integer,
skip_extra_order: :boolean,
skip_order: :boolean
skip_order: :boolean,
order_asc: :boolean
}
params = Map.delete(params, :id_type)
changeset = cast({%{}, param_types}, params, Map.keys(param_types))
changeset.changes
end
defp order_statement(query, table_binding, :asc) do
order_by(
query,
[{u, table_position(query, table_binding)}],
fragment("? asc nulls last", u.id)
)
end
defp order_statement(query, table_binding, :desc) do
order_by(
query,
[{u, table_position(query, table_binding)}],
fragment("? desc nulls last", u.id)
)
end
defp restrict(query, :min_id, %{min_id: min_id}, table_binding) do
where(query, [{q, table_position(query, table_binding)}], q.id > ^min_id)
end
@ -118,19 +146,16 @@ defp restrict(query, :order, %{skip_order: true}, _), do: query
defp restrict(%{order_bys: [_ | _]} = query, :order, %{skip_extra_order: true}, _), do: query
defp restrict(query, :order, %{min_id: _}, table_binding) do
order_by(
query,
[{u, table_position(query, table_binding)}],
fragment("? asc nulls last", u.id)
)
order_statement(query, table_binding, :asc)
end
defp restrict(query, :order, _options, table_binding) do
order_by(
query,
[{u, table_position(query, table_binding)}],
fragment("? desc nulls last", u.id)
)
defp restrict(query, :order, %{max_id: _}, table_binding) do
order_statement(query, table_binding, :desc)
end
defp restrict(query, :order, options, table_binding) do
dir = if options[:order_asc], do: :asc, else: :desc
order_statement(query, table_binding, dir)
end
defp restrict(query, :offset, %{offset: offset}, _table_binding) do
@ -150,11 +175,9 @@ defp restrict(query, :limit, options, _table_binding) do
defp restrict(query, _, _, _), do: query
defp enforce_order(result, %{min_id: _}) do
result
|> Enum.reverse()
end
defp enforce_order(result, %{min_id: _, order_asc: true}), do: result
defp enforce_order(result, %{min_id: _}), do: Enum.reverse(result)
defp enforce_order(result, %{max_id: _, order_asc: true}), do: Enum.reverse(result)
defp enforce_order(result, _), do: result
defp table_position(%Ecto.Query{} = query, binding_name) do

View file

@ -109,7 +109,9 @@ def call(conn = %{method: method}, url, opts) when method in @methods do
with {:ok, nil} <- @cachex.get(:failed_proxy_url_cache, url),
{:ok, status, headers, body} <- request(method, url, req_headers, client_opts),
:ok <-
header_length_constraint(
check_length_constraint(
method,
body,
headers,
Keyword.get(opts, :max_body_length, @max_body_length)
) do
@ -342,7 +344,9 @@ defp build_csp_headers(headers) do
List.keystore(headers, "content-security-policy", 0, {"content-security-policy", "sandbox"})
end
defp header_length_constraint(headers, limit) when is_integer(limit) and limit > 0 do
defp check_length_constraint(_, _, _, limit) when not is_integer(limit) or limit <= 0, do: :ok
defp check_length_constraint(:head, _, headers, limit) do
with {_, size} <- List.keyfind(headers, "content-length", 0),
{size, _} <- Integer.parse(size),
true <- size <= limit do
@ -356,7 +360,15 @@ defp header_length_constraint(headers, limit) when is_integer(limit) and limit >
end
end
defp header_length_constraint(_, _), do: :ok
defp check_length_constraint(_, body, _, limit) when is_binary(body) do
if byte_size(body) <= limit do
:ok
else
{:error, :body_too_large}
end
end
defp check_length_constraint(_, _, _, _), do: :ok
defp track_failed_url(url, error, opts) do
ttl =
@ -366,6 +378,6 @@ defp track_failed_url(url, error, opts) do
nil
end
@cachex.put(:failed_proxy_url_cache, url, true, ttl: ttl)
@cachex.put(:failed_proxy_url_cache, url, true, expire: ttl)
end
end

View file

@ -1,20 +0,0 @@
# Pleroma: A lightweight social networking server
# Copyright © 2017-2021 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.ReverseProxy.Client do
@type status :: pos_integer()
@type header_name :: String.t()
@type header_value :: String.t()
@type headers :: [{header_name(), header_value()}]
@callback request(atom(), String.t(), headers(), String.t(), list()) ::
{:ok, status(), headers(), reference() | map()}
| {:ok, status(), headers()}
| {:ok, reference()}
| {:error, term()}
@callback stream_body(map()) :: {:ok, binary(), map()} | :done | {:error, atom() | String.t()}
@callback close(reference() | pid() | map()) :: :ok
end

View file

@ -1,77 +0,0 @@
# Pleroma: A lightweight social networking server
# Copyright © 2017-2021 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.ReverseProxy.Client.Tesla do
@behaviour Pleroma.ReverseProxy.Client
@type headers() :: [{String.t(), String.t()}]
@type status() :: pos_integer()
@spec request(atom(), String.t(), headers(), String.t(), keyword()) ::
{:ok, status(), headers}
| {:ok, status(), headers, map()}
| {:error, atom() | String.t()}
| no_return()
@impl true
def request(method, url, headers, body, opts \\ []) do
check_adapter()
opts = Keyword.put(opts, :body_as, :chunks)
with {:ok, response} <-
Pleroma.HTTP.request(
method,
url,
body,
headers,
opts
) do
if is_map(response.body) and method != :head do
{:ok, response.status, response.headers, response.body}
else
{:ok, response.status, response.headers}
end
else
{:error, error} -> {:error, error}
end
end
@impl true
@spec stream_body(map()) ::
{:ok, binary(), map()} | {:error, atom() | String.t()} | :done | no_return()
def stream_body(%{pid: _pid, fin: true}) do
:done
end
def stream_body(client) do
case read_chunk!(client) do
{:fin, body} ->
{:ok, body, Map.put(client, :fin, true)}
{:nofin, part} ->
{:ok, part, client}
{:error, error} ->
{:error, error}
end
end
defp read_chunk!(%{pid: pid, stream: stream, opts: opts}) do
adapter = check_adapter()
adapter.read_chunk(pid, stream, opts)
end
@impl true
@spec close(map) :: :ok | no_return()
def close(%{pid: _pid}) do
:ok
end
defp check_adapter do
adapter = Application.get_env(:tesla, :adapter)
adapter
end
end

View file

@ -1,28 +0,0 @@
# Pleroma: A lightweight social networking server
# Copyright © 2017-2021 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.ReverseProxy.Client.Wrapper do
@moduledoc "Meta-client that calls the appropriate client from the config."
@behaviour Pleroma.ReverseProxy.Client
@impl true
def request(method, url, headers, body \\ "", opts \\ []) do
client().request(method, url, headers, body, opts)
end
@impl true
def stream_body(ref), do: client().stream_body(ref)
@impl true
def close(ref), do: client().close(ref)
defp client do
:tesla
|> Application.get_env(:adapter)
|> client()
end
defp client({Tesla.Adapter.Finch, _}), do: Pleroma.ReverseProxy.Client.Tesla
defp client(_), do: Pleroma.Config.get!(Pleroma.ReverseProxy.Client)
end

View file

@ -5,58 +5,73 @@
defmodule Pleroma.Signature do
@behaviour HTTPSignatures.Adapter
alias HTTPSignatures.HTTPKey
alias Pleroma.User
alias Pleroma.User.SigningKey
require Logger
def fetch_public_key(conn) do
with {_, %{"keyId" => kid}} <- {:keyid, HTTPSignatures.signature_for_conn(conn)},
{_, {:ok, %SigningKey{} = sk}, _} <-
{:fetch, SigningKey.get_or_fetch_by_key_id(kid), kid},
def fetch_public_key(kid, _) do
with {_, {:ok, %SigningKey{} = sk}} <- {:fetch, SigningKey.get_or_fetch_by_key_id(kid)},
{_, {%User{} = key_user, _}} <- {:user, {User.get_by_id(sk.user_id), sk.user_id}},
{_, {:ok, decoded_key}} <- {:decode, SigningKey.public_key_decoded(sk)} do
{:ok, decoded_key}
{:ok, %HTTPKey{key: decoded_key, user_data: %{"key_user" => key_user}}}
else
{:fetch, error, kid} ->
Logger.error("Failed to acquire key from signature: #{kid} #{inspect(error)}")
{:error, {:fetch, error}}
e ->
{:error, e}
handle_common_errors(e, kid, "acquire")
end
end
def refetch_public_key(conn) do
with {_, %{"keyId" => kid}} <- {:keyid, HTTPSignatures.signature_for_conn(conn)},
{_, {:ok, %SigningKey{} = sk}, _} <- {:fetch, SigningKey.refresh_by_key_id(kid), kid},
def refetch_public_key(kid, _) do
with {_, {:ok, %SigningKey{} = sk}} <- {:fetch, SigningKey.refresh_by_key_id(kid)},
{_, {%User{} = key_user, _}} <- {:user, {User.get_by_id(sk.user_id), sk.user_id}},
{_, {:ok, decoded_key}} <- {:decode, SigningKey.public_key_decoded(sk)} do
{:ok, decoded_key}
{:ok, %HTTPKey{key: decoded_key, user_data: %{"key_user" => key_user}}}
else
{:fetch, {:error, :too_young}, kid} ->
{:fetch, {:error, :too_young}} ->
Logger.debug("Refusing to refetch recently updated key: #{kid}")
{:error, {:fetch, :too_young}}
{:error, {:too_young, kid}}
{:fetch, {:error, :unknown}, kid} ->
{:fetch, {:error, :unknown}} ->
Logger.warning("Attempted to refresh unknown key; this should not happen: #{kid}")
{:error, {:fetch, :unknown}}
{:error, {:unknown, kid}}
{:fetch, error, kid} ->
Logger.error("Failed to refresh stale key from signature: #{kid} #{inspect(error)}")
e ->
handle_common_errors(e, kid, "refresh stale")
end
end
defp handle_common_errors(error, kid, action_name) do
case error do
{:fetch, {:error, :not_found}} ->
{:halt, {:error, :gone}}
{:fetch, {:reject, reason}} ->
{:halt, {:error, {:reject, reason}}}
{:fetch, error} ->
Logger.error("Failed to #{action_name} key from signature: #{kid} #{inspect(error)}")
{:error, {:fetch, error}}
{:user, {_, uid}} ->
Logger.warning(
"Failed to resolve user (id=#{uid}) for retrieved signing key. Race condition?"
)
e ->
{:error, e}
end
end
def sign(%User{} = user, headers) do
with {:ok, private_key} <- SigningKey.private_key(user) do
HTTPSignatures.sign(private_key, SigningKey.local_key_id(user.ap_id), headers)
def sign(%SigningKey{} = key, headers, opts \\ []) do
with {:ok, private_key_binary} <- SigningKey.private_key_binary(key) do
HTTPSignatures.sign(
%HTTPKey{key: private_key_binary},
key.key_id,
headers,
opts
)
else
_ -> raise "Tried to sign with #{key.key_id} but it has no private key!"
end
end
def signed_date, do: signed_date(NaiveDateTime.utc_now())
def signed_date(%NaiveDateTime{} = date) do
Timex.lformat!(date, "{WDshort}, {0D} {Mshort} {YYYY} {h24}:{m}:{s} GMT", "en")
end
end

View file

@ -7,7 +7,6 @@ defmodule Pleroma.Stats do
import Ecto.Query
alias Pleroma.CounterCache
alias Pleroma.Repo
alias Pleroma.User
alias Pleroma.Instances.Instance
@ -107,15 +106,6 @@ def calculate_stat_data do
}
end
@spec get_status_visibility_count(String.t() | nil) :: map()
def get_status_visibility_count(instance \\ nil) do
if is_nil(instance) do
CounterCache.get_sum()
else
CounterCache.get_by_instance(instance)
end
end
@impl true
def handle_continue(:calculate_stats, _) do
stats = calculate_stat_data()

View file

@ -3,7 +3,8 @@
# SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.Uploaders.Uploader do
import Pleroma.Web.Gettext
use Gettext,
backend: Pleroma.Web.Gettext
@mix_env Mix.env()

View file

@ -91,6 +91,9 @@ defmodule Pleroma.User do
@cachex Pleroma.Config.get([:cachex, :provider], Cachex)
# hide sensitive data from logs
@derive {Inspect, except: [:password, :password_hash, :email]}
schema "users" do
field(:bio, :string, default: "")
field(:raw_bio, :string)
@ -270,13 +273,13 @@ def unquote(:"#{outgoing_relation_target}_ap_ids")(user, restrict_deactivated? \
def cached_blocked_users_ap_ids(user) do
@cachex.fetch!(:user_cache, "blocked_users_ap_ids:#{user.ap_id}", fn _ ->
blocked_users_ap_ids(user)
{:commit, blocked_users_ap_ids(user)}
end)
end
def cached_muted_users_ap_ids(user) do
@cachex.fetch!(:user_cache, "muted_users_ap_ids:#{user.ap_id}", fn _ ->
muted_users_ap_ids(user)
{:commit, muted_users_ap_ids(user)}
end)
end
@ -286,11 +289,6 @@ def cached_muted_users_ap_ids(user) do
defdelegate following_ap_ids(user), to: FollowingRelationship
defdelegate get_follow_requests_query(user), to: FollowingRelationship
def get_follow_requests(user) do
get_follow_requests_query(user)
|> Repo.all()
end
defdelegate search(query, opts \\ []), to: User.Search
@doc """
@ -720,13 +718,6 @@ def force_password_reset(user), do: update_password_reset_pending(user, true)
# Used to auto-register LDAP accounts which won't have a password hash stored locally
def register_changeset_ldap(struct, params = %{password: password})
when is_nil(password) do
params =
if Map.has_key?(params, :email) do
Map.put_new(params, :email, params[:email])
else
params
end
struct
|> cast(params, [
:name,
@ -1174,7 +1165,7 @@ def get_user_friends_ap_ids(user) do
@spec get_cached_user_friends_ap_ids(User.t()) :: [String.t()]
def get_cached_user_friends_ap_ids(user) do
@cachex.fetch!(:user_cache, "friends_ap_ids:#{user.ap_id}", fn _ ->
get_user_friends_ap_ids(user)
{:commit, get_user_friends_ap_ids(user)}
end)
end
@ -1484,17 +1475,17 @@ def get_recipients_from_activity(%Activity{recipients: to, actor: actor}) do
{:ok, list(UserRelationship.t())} | {:error, String.t()}
def mute(%User{} = muter, %User{} = mutee, params \\ %{}) do
notifications? = Map.get(params, :notifications, true)
expires_in = Map.get(params, :expires_in, 0)
duration = Map.get(params, :duration, 0)
with {:ok, user_mute} <- UserRelationship.create_mute(muter, mutee),
{:ok, user_notification_mute} <-
(notifications? && UserRelationship.create_notification_mute(muter, mutee)) ||
{:ok, nil} do
if expires_in > 0 do
if duration > 0 do
Pleroma.Workers.MuteExpireWorker.enqueue(
"unmute_user",
%{"muter_id" => muter.id, "mutee_id" => mutee.id},
schedule_in: expires_in
schedule_in: duration
)
end

View file

@ -7,7 +7,9 @@ defmodule Pleroma.User.Backup do
import Ecto.Changeset
import Ecto.Query
import Pleroma.Web.Gettext
use Gettext,
backend: Pleroma.Web.Gettext
require Pleroma.Constants

View file

@ -8,6 +8,7 @@ defmodule Pleroma.User.SigningKey do
require Logger
@derive {Inspect, only: [:user_id, :key_id]}
@primary_key false
schema "signing_keys" do
belongs_to(:user, Pleroma.User, type: FlakeId.Ecto.CompatType)
@ -136,24 +137,22 @@ def public_key_pem(_e) do
{:error, "key not found"}
end
@spec private_key(User.t()) :: {:ok, binary()} | {:error, String.t()}
@spec private_key_binary(__MODULE__) :: {:ok, binary()} | {:error, String.t()}
@doc """
Given a user, return the private key for that user in binary format.
Given a key, return the corresponding private key in binary format.
"""
def private_key(%User{} = user) do
case Repo.preload(user, :signing_key) do
%{signing_key: %__MODULE__{private_key: private_key_pem}} ->
key =
private_key_pem
|> :public_key.pem_decode()
|> hd()
|> :public_key.pem_entry_decode()
def private_key_binary(%__MODULE__{private_key: private_key_pem}) do
key =
private_key_pem
|> :public_key.pem_decode()
|> hd()
|> :public_key.pem_entry_decode()
{:ok, key}
{:ok, key}
end
_ ->
{:error, "key not found"}
end
def private_key_binary(%__MODULE__{} = key) do
{:error, "key #{key.key_id} has no private key"}
end
@spec get_or_fetch_by_key_id(String.t()) :: {:ok, __MODULE__} | {:error, String.t()}
@ -208,7 +207,12 @@ def fetch_remote_key(key_id) do
else
e ->
Logger.debug("Failed to fetch remote key: #{inspect(e)}")
{:error, "Could not fetch key"}
case e do
{:error, e} -> {:error, e}
{:reject, reason} -> {:reject, reason}
_ -> {:error, {"Could not fetch key", e}}
end
end
end

View file

@ -67,7 +67,7 @@ def create(relationship_type, %User{} = source, %User{} = target) do
target_id: target.id
})
|> Repo.insert(
on_conflict: {:replace_all_except, [:id, :inserted_at]},
on_conflict: {:replace, [:relationship_type, :source_id, :target_id]},
conflict_target: [:source_id, :relationship_type, :target_id],
returning: true
)

View file

@ -31,21 +31,19 @@ defmodule Pleroma.Web do
def controller do
quote do
use Phoenix.Controller, namespace: Pleroma.Web
use Phoenix.Controller,
formats: [html: "View", json: "View"],
layouts: [html: Pleroma.Web.LayoutView]
import Plug.Conn
import Pleroma.Web.Gettext
use Gettext,
backend: Pleroma.Web.Gettext
import Pleroma.Web.TranslationHelpers
unquote(verified_routes())
plug(:set_put_layout)
defp set_put_layout(conn, _) do
put_layout(conn, Pleroma.Config.get(:app_layout, "app.html"))
end
# Marks plugs intentionally skipped and blocks their execution if present in plugs chain
defp skip_plug(conn, plug_modules) do
plug_modules
@ -233,14 +231,18 @@ def router do
def channel do
quote do
use Phoenix.Channel
import Pleroma.Web.Gettext
use Gettext,
backend: Pleroma.Web.Gettext
end
end
defp view_helpers do
quote do
# Use all HTML functionality (forms, tags, etc)
use Phoenix.HTML
import Phoenix.HTML
import Phoenix.HTML.Form
use PhoenixHTMLHelpers
# Import LiveView and .heex helpers (live_render, live_patch, <.form>, etc)
import Phoenix.LiveView.Helpers
@ -249,7 +251,10 @@ defp view_helpers do
import Phoenix.View
import Pleroma.Web.ErrorHelpers
import Pleroma.Web.Gettext
use Gettext,
backend: Pleroma.Web.Gettext
unquote(verified_routes())
end
end

View file

@ -509,6 +509,28 @@ def fetch_activities_for_context(context, opts \\ %{}) do
|> Repo.all()
end
def fetch_objects_for_replies_collection(parent_ap_id, opts \\ %{}) do
opts =
opts
|> Map.put(:order_asc, true)
|> Map.put(:id_type, :integer)
from(o in Object,
where:
fragment("?->>'inReplyTo' = ?", o.data, ^parent_ap_id) and
fragment(
"(?->'to' \\? ?::text OR ?->'cc' \\? ?::text)",
o.data,
^Pleroma.Constants.as_public(),
o.data,
^Pleroma.Constants.as_public()
) and
fragment("?->>'type' <> 'Answer'", o.data),
select: %{id: o.id, ap_id: fragment("?->>'id'", o.data)}
)
|> Pagination.fetch_paginated(opts, :keyset)
end
@spec fetch_latest_direct_activity_id_for_context(String.t(), keyword() | map()) ::
FlakeId.Ecto.CompatType.t() | nil
def fetch_latest_direct_activity_id_for_context(context, opts \\ %{}) do
@ -1445,21 +1467,18 @@ def fetch_activities_query(recipients, opts \\ %{}) do
end
@doc """
Fetch favorites activities of user with order by sort adds to favorites
Fetch posts liked by the given user wrapped in a paginated list with IDs taken from the like activity
"""
@spec fetch_favourites(User.t(), map(), Pagination.type()) :: list(Activity.t())
def fetch_favourites(user, params \\ %{}, pagination \\ :keyset) do
@spec fetch_favourited_with_fav_id(User.t(), map()) ::
list(%{id: binary(), entry: Activity.t()})
def fetch_favourited_with_fav_id(user, params \\ %{}) do
user.ap_id
|> Activity.Queries.by_actor()
|> Activity.Queries.by_type("Like")
|> Activity.with_joined_object()
|> Object.with_joined_activity()
|> select([like, object, activity], %{activity | object: object, pagination_id: like.id})
|> order_by([like, _, _], desc_nulls_last: like.id)
|> Pagination.fetch_paginated(
Map.merge(params, %{skip_order: true}),
pagination
)
|> select([like, object, create], %{id: like.id, entry: %{create | object: object}})
|> Pagination.fetch_paginated(params, :keyset)
end
defp maybe_update_cc(activities, [_ | _] = list_memberships, %User{ap_id: user_ap_id}) do
@ -1586,8 +1605,11 @@ defp object_to_user_data(data, additional) do
invisible = data["invisible"] || false
actor_type = data["type"] || "Person"
featured_address = data["featured"]
{:ok, pinned_objects} = fetch_and_prepare_featured_from_ap_id(featured_address)
{featured_address, pinned_objects} =
case process_featured_collection(data["featured"]) do
{:ok, featured_address, pinned_objects} -> {featured_address, pinned_objects}
_ -> {nil, %{}}
end
# first, check that the owner is correct
signing_key =
@ -1786,57 +1808,35 @@ def maybe_handle_clashing_nickname(data) do
end
end
def pin_data_from_featured_collection(%{
"type" => "OrderedCollection",
"first" => first
}) do
with {:ok, page} <- Fetcher.fetch_and_contain_remote_object_from_id(first) do
page
|> Map.get("orderedItems")
|> Map.new(fn %{"id" => object_ap_id} -> {object_ap_id, NaiveDateTime.utc_now()} end)
else
e ->
Logger.error("Could not decode featured collection at fetch #{first}, #{inspect(e)}")
%{}
end
end
def process_featured_collection(nil), do: {:ok, nil, %{}}
def process_featured_collection(""), do: {:ok, nil, %{}}
def pin_data_from_featured_collection(
%{
"type" => type
} = collection
)
when type in ["OrderedCollection", "Collection"] do
with {:ok, objects} <- Collections.Fetcher.fetch_collection(collection) do
# Items can either be a map _or_ a string
objects
|> Map.new(fn
ap_id when is_binary(ap_id) -> {ap_id, NaiveDateTime.utc_now()}
%{"id" => object_ap_id} -> {object_ap_id, NaiveDateTime.utc_now()}
end)
else
e ->
Logger.warning("Failed to fetch featured collection #{collection}, #{inspect(e)}")
%{}
end
end
def process_featured_collection(featured_collection) do
featured_address =
case get_ap_id(featured_collection) do
id when is_binary(id) -> id
_ -> nil
end
def pin_data_from_featured_collection(obj) do
Logger.error("Could not parse featured collection #{inspect(obj)}")
%{}
end
# TODO: allow passing item/page limit as function opt and use here
case Collections.Fetcher.fetch_collection(featured_collection) do
{:ok, items} ->
now = NaiveDateTime.utc_now()
dated_obj_ids = Map.new(items, fn obj -> {get_ap_id(obj), now} end)
{:ok, featured_address, dated_obj_ids}
def fetch_and_prepare_featured_from_ap_id(nil) do
{:ok, %{}}
end
error ->
Logger.error(
"Could not decode featured collection at fetch #{inspect(featured_collection)}: #{inspect(error)}"
)
def fetch_and_prepare_featured_from_ap_id(ap_id) do
with {:ok, data} <- Fetcher.fetch_and_contain_remote_object_from_id(ap_id) do
{:ok, pin_data_from_featured_collection(data)}
else
e ->
Logger.error("Could not decode featured collection at fetch #{ap_id}, #{inspect(e)}")
{:ok, %{}}
error =
case error do
{:error, e} -> e
e -> e
end
{:error, error}
end
end

View file

@ -22,8 +22,6 @@ defmodule Pleroma.Web.ActivityPub.ActivityPubController do
alias Pleroma.Web.Plugs.EnsureAuthenticatedPlug
alias Pleroma.Web.Plugs.FederatingPlug
require Logger
action_fallback(:errors)
@federating_only_actions [:internal_fetch, :relay, :relay_following, :relay_followers]
@ -64,7 +62,7 @@ defp relay_active?(conn, _) do
Render the user's AP data
WARNING: we cannot actually check if the request has a fragment! so let's play defensively
- IF we have a valid signature, serve full user
- IF we do not, and authorized_fetch_mode is enabled, serve the key only
- IF we do not, and authorized_fetch_mode is enabled, serve only the key and bare minimum info
- OTHERWISE, serve the full actor (since we don't need to worry about the signature)
"""
def user(%{assigns: %{valid_signature: true}} = conn, params) do
@ -96,7 +94,7 @@ def render_key_only_user(conn, %{"nickname" => nickname}) do
conn
|> put_resp_content_type("application/activity+json")
|> put_view(UserView)
|> render("keys.json", %{user: user})
|> render("stripped_user.json", %{user: user})
else
nil -> {:error, :not_found}
%{local: false} -> {:error, :not_found}
@ -121,6 +119,35 @@ def object(%{assigns: assigns} = conn, _) do
end
end
def object_replies(%{assigns: assigns, query_params: params} = conn, _all_params) do
object_ap_id = conn.path_info |> Enum.reverse() |> tl() |> Enum.reverse()
object_ap_id = Endpoint.url() <> "/" <> Enum.join(object_ap_id, "/")
# Most other API params are converted to atoms by OpenAPISpex 3.x
# and therefore helper functions assume atoms. For consistency,
# also convert our params to atoms here.
params =
params
|> Map.new(fn {k, v} -> {String.to_existing_atom(k), v} end)
|> Map.put(:object_ap_id, object_ap_id)
|> Map.put(:order_asc, true)
|> Map.put(:conn, conn)
with %Object{} = object <- Object.get_cached_by_ap_id(object_ap_id),
user <- Map.get(assigns, :user, nil),
{_, true} <- {:visible?, Visibility.visible_for_user?(object, user)} do
conn
|> maybe_skip_cache(user)
|> set_cache_ttl_for(object)
|> put_resp_content_type("application/activity+json")
|> put_view(ObjectView)
|> render("object_replies.json", render_params: params)
else
{:visible?, false} -> {:error, :not_found}
nil -> {:error, :not_found}
end
end
def track_object_fetch(conn, nil), do: conn
def track_object_fetch(conn, object_id) do
@ -287,8 +314,7 @@ def outbox(
|> put_view(UserView)
|> render("activity_collection_page.json", %{
activities: activities,
pagination: ControllerHelper.get_pagination_fields(conn, activities),
iri: "#{user.ap_id}/outbox"
pagination: ControllerHelper.get_pagination_fields(conn, activities)
})
end
end
@ -304,8 +330,6 @@ def outbox(conn, %{"nickname" => nickname}) do
def inbox(%{assigns: %{valid_signature: true}} = conn, %{"nickname" => nickname} = params) do
with %User{} = recipient <- User.get_cached_by_nickname(nickname),
{:ok, %User{} = actor} <- User.get_or_fetch_by_ap_id(params["actor"]),
true <- Utils.recipient_in_message(recipient, actor, params),
params <- Utils.maybe_splice_recipient(recipient.ap_id, params) do
Federator.incoming_ap_doc(params)
json(conn, "ok")
@ -370,8 +394,7 @@ def read_inbox(
|> put_view(UserView)
|> render("activity_collection_page.json", %{
activities: activities,
pagination: ControllerHelper.get_pagination_fields(conn, activities),
iri: "#{user.ap_id}/inbox"
pagination: ControllerHelper.get_pagination_fields(conn, activities)
})
end

View file

@ -316,21 +316,18 @@ def block(blocker, blocked) do
@spec announce(User.t(), Object.t(), keyword()) :: {:ok, map(), keyword()}
def announce(actor, object, options \\ []) do
public? = Keyword.get(options, :public, false)
visibility = Keyword.get(options, :visibility, "public")
to =
cond do
actor.ap_id == Relay.ap_id() ->
[actor.follower_address]
public? and Visibility.is_local_public?(object) ->
[actor.follower_address, object.data["actor"], Utils.as_local_public()]
public? ->
[actor.follower_address, object.data["actor"], Pleroma.Constants.as_public()]
true ->
[actor.follower_address, object.data["actor"]]
{to, cc} =
if actor.ap_id == Relay.ap_id() do
{[actor.follower_address], []}
else
Pleroma.Web.CommonAPI.Utils.get_to_and_cc_for_visibility(
visibility,
actor.follower_address,
nil,
[object.data["actor"]]
)
end
{:ok,
@ -339,6 +336,7 @@ def announce(actor, object, options \\ []) do
"actor" => actor.ap_id,
"object" => object.data["id"],
"to" => to,
"cc" => cc,
"context" => object.data["context"],
"type" => "Announce",
"published" => Utils.make_date()

View file

@ -11,8 +11,8 @@ defmodule Pleroma.Web.ActivityPub.MRF.MediaProxyWarmingPolicy do
require Logger
@adapter_options [
receive_timeout: 10_000
@http_options [
adapter: [receive_timeout: 10_000]
]
@impl true
@ -36,7 +36,7 @@ defp prefetch(url) do
end
end
defp fetch(url), do: HTTP.get(url, [], @adapter_options)
defp fetch(url), do: HTTP.get(url, [], @http_options)
defp preload(%{"object" => %{"attachment" => attachments}} = _message) do
Enum.each(attachments, fn

View file

@ -11,17 +11,39 @@ defmodule Pleroma.Web.ActivityPub.MRF.NormalizeMarkup do
@impl true
def history_awareness, do: :auto
def scrub_if_present(obj, field, scrubber) do
case obj[field] do
text when is_binary(text) ->
update_in(obj[field], &HTML.filter_tags(&1, scrubber))
map when is_map(map) ->
map =
Enum.into(map, %{}, fn
{k, v} when is_binary(v) ->
{k, HTML.filter_tags(v, scrubber)}
{k, v} ->
{k, v}
end)
put_in(obj[field], map)
_ ->
obj
end
end
@impl true
def filter(%{"type" => type, "object" => child_object} = object)
when type in ["Create", "Update"] do
scrub_policy = Pleroma.Config.get([:mrf_normalize_markup, :scrub_policy])
content =
child_object["content"]
|> HTML.filter_tags(scrub_policy)
object = put_in(object, ["object", "content"], content)
child_object =
child_object
|> scrub_if_present("content", scrub_policy)
|> scrub_if_present("contentMap", scrub_policy)
object = put_in(object["object"], child_object)
{:ok, object}
end

View file

@ -112,7 +112,7 @@ defp get_int_header(headers, header_name, default \\ nil) do
defp is_remote_size_within_limit?(url) do
with {:ok, %{status: status, headers: headers} = _response} when status in 200..299 <-
Pleroma.HTTP.request(:head, url, nil, [], []) do
Pleroma.HTTP.head(url) do
content_length = get_int_header(headers, "content-length")
size_limit = Config.get([:mrf_steal_emoji, :size_limit], @size_limit)
@ -165,7 +165,6 @@ def filter(%{"object" => %{"emoji" => foreign_emojis, "actor" => actor}} = messa
if !Enum.empty?(new_emojis) do
Logger.info("Stole new emojis: #{inspect(new_emojis)}")
Pleroma.Emoji.reload()
end
end

View file

@ -63,10 +63,16 @@ def validate(%{"type" => "Undo"} = object, meta) do
|> Ecto.Changeset.apply_action(:insert) do
object = stringify_keys(object)
undone_object = Activity.get_by_ap_id(object["object"])
outgoing_blocks = Pleroma.Config.get([:activitypub, :outgoing_blocks])
# if we're undoing a block, and do not permit federating that:
do_not_federate =
Keyword.get(meta, :do_not_federate) ||
(Map.get(undone_object.data, "type") == "Block" && !outgoing_blocks)
meta =
meta
|> Keyword.put(:object_data, undone_object.data)
|> Keyword.put(:do_not_federate, do_not_federate)
{:ok, object, meta}
end

View file

@ -69,6 +69,7 @@ defp fix_tag(%{"tag" => tag} = data) when is_list(tag) do
defp fix_tag(%{"tag" => tag} = data) when is_map(tag), do: Map.put(data, "tag", [tag])
defp fix_tag(data), do: Map.drop(data, ["tag"])
# legacy internal *oma format
defp fix_replies(%{"replies" => replies} = data) when is_list(replies), do: data
defp fix_replies(%{"replies" => %{"first" => first}} = data) when is_binary(first) do
@ -85,9 +86,16 @@ defp fix_replies(%{"replies" => %{"first" => %{"items" => replies}}} = data)
when is_list(replies),
do: Map.put(data, "replies", replies)
defp fix_replies(%{"replies" => %{"first" => %{"orderedItems" => replies}}} = data)
when is_list(replies),
do: Map.put(data, "replies", replies)
defp fix_replies(%{"replies" => %{"items" => replies}} = data) when is_list(replies),
do: Map.put(data, "replies", replies)
defp fix_replies(%{"replies" => %{"orderedItems" => replies}} = data) when is_list(replies),
do: Map.put(data, "replies", replies)
defp fix_replies(data), do: Map.delete(data, "replies")
defp remote_mention_resolver(
@ -116,6 +124,8 @@ defp remote_mention_resolver(
end
end
defp fix_misskey_content(object = %{"htmlMfm" => true}), do: object
# See https://akkoma.dev/FoundKeyGang/FoundKey/issues/343
# Misskey/Foundkey drops some of the custom formatting when it sends remotely
# So this basically reprocesses the MFM source
@ -136,20 +146,13 @@ defp fix_misskey_content(
# See https://github.com/misskey-dev/misskey/pull/8787
# This is for compatibility with older Misskey instances
defp fix_misskey_content(%{"_misskey_content" => content} = object) when is_binary(content) do
mention_handler = fn nick, buffer, opts, acc ->
remote_mention_resolver(object, nick, buffer, opts, acc)
end
{linked, _, _} =
Utils.format_input(content, "text/x.misskeymarkdown", mention_handler: mention_handler)
object
|> Map.put("source", %{
"content" => content,
"mediaType" => "text/x.misskeymarkdown"
})
|> Map.put("content", linked)
|> Map.delete("_misskey_content")
|> fix_misskey_content()
end
defp fix_misskey_content(data), do: data

View file

@ -15,6 +15,7 @@ defmodule Pleroma.Web.ActivityPub.ObjectValidators.AttachmentValidator do
field(:type, :string)
field(:mediaType, :string, default: "application/octet-stream")
field(:name, :string)
field(:summary, :string)
field(:blurhash, :string)
embeds_many :url, UrlObjectValidator, primary_key: false do
@ -44,7 +45,7 @@ def changeset(struct, data) do
|> fix_url()
struct
|> cast(data, [:id, :type, :mediaType, :name, :blurhash])
|> cast(data, [:id, :type, :mediaType, :name, :summary, :blurhash])
|> cast_embed(:url, with: &url_changeset/2, required: true)
|> validate_inclusion(:type, ~w[Link Document Audio Image Video])
|> validate_required([:type, :mediaType])

View file

@ -31,6 +31,7 @@ defmacro activity_fields do
defmacro object_fields do
quote bind_quoted: binding() do
field(:content, :string)
field(:htmlMfm, :boolean)
field(:published, ObjectValidators.DateTime)
field(:updated, ObjectValidators.DateTime)

View file

@ -57,6 +57,8 @@ defp fix(data) do
|> fix_emoji_qualification()
|> CommonFixes.fix_actor()
|> CommonFixes.fix_activity_addressing()
|> prune_tags()
|> drop_remote_indicator()
data =
if Map.has_key?(data, "tag") do
@ -133,4 +135,54 @@ defp validate_data(data_cng) do
|> validate_emoji()
|> maybe_validate_tag_presence()
end
# All tags but the single emoji tag corresponding to the used custom emoji (if any)
# are ignored anyway. Having a known single-element array makes further processing easier.
# Also ensures the Emoji tag uses a pre-stripped name
defp prune_tags(%{"content" => emoji, "tag" => tags} = data) do
clean_emoji = Emoji.stripped_name(emoji)
pruned_tags =
Enum.reduce_while(tags, [], fn
%{"type" => "Emoji", "name" => name} = tag, res ->
clean_name = Emoji.stripped_name(name)
if clean_name == clean_emoji do
{:halt, [%{tag | "name" => clean_name}]}
else
{:cont, res}
end
_, res ->
{:cont, res}
end)
%{data | "tag" => pruned_tags}
end
defp prune_tags(data), do: data
# some software, like Iceshrimp.NET, federates emoji reaction with (from its POV) remote emoji
# with the source instance added to the name in AP as an @ postfix, similar to how its handled
# in Akkomas REST API.
# However, this leads to duplicated remote indicators being presented to our clients an can cause
# issues when trying to split the values we receive from REST API. Thus just drop them here.
defp drop_remote_indicator(%{"content" => emoji, "tag" => tag} = data) when is_list(tag) do
if String.contains?(emoji, "@") do
stripped_emoji = Emoji.stripped_name(emoji)
[clean_emoji | _] = String.split(stripped_emoji, "@", parts: 2)
clean_tag =
Enum.map(tag, fn
%{"name" => ^stripped_emoji} = t -> %{t | "name" => clean_emoji}
t -> t
end)
%{data | "content" => ":" <> clean_emoji <> ":", "tag" => clean_tag}
else
data
end
end
defp drop_remote_indicator(data), do: data
end

View file

@ -14,6 +14,7 @@ defmodule Pleroma.Web.ActivityPub.ObjectValidators.UserValidator do
@behaviour Pleroma.Web.ActivityPub.ObjectValidator.Validating
alias Pleroma.Object.Containment
alias Pleroma.Web.ActivityPub.Utils
require Pleroma.Constants
@ -62,6 +63,7 @@ defp validate_inbox(%{"id" => id, "inbox" => inbox}) do
defp validate_inbox(_), do: {:error, "No inbox"}
defp check_field_value(%{"id" => id} = _data, value) do
value = Utils.get_ap_id(value)
Containment.same_origin(id, value)
end

View file

@ -51,30 +51,15 @@ def publish_one(
%{"inbox" => inbox, "json" => json, "actor" => %User{} = actor, "id" => id} = params
) do
Logger.debug("Federating #{id} to #{inbox}")
uri = %{path: path} = URI.parse(inbox)
digest = "SHA-256=" <> (:crypto.hash(:sha256, json) |> Base.encode64())
date = Pleroma.Signature.signed_date()
signature =
Pleroma.Signature.sign(actor, %{
"(request-target)": "post #{path}",
host: signature_host(uri),
"content-length": byte_size(json),
digest: digest,
date: date
})
signing_key = Pleroma.User.SigningKey.load_key(actor).signing_key
with {:ok, %{status: code}} = result when code in 200..299 <-
HTTP.post(
inbox,
json,
[
{"Content-Type", "application/activity+json"},
{"Date", date},
{"signature", signature},
{"digest", digest}
]
[{"content-type", "application/activity+json"}],
httpsig: %{signing_key: signing_key}
) do
if not Map.has_key?(params, "unreachable_since") || params["unreachable_since"] do
Instances.set_reachable(inbox)
@ -84,7 +69,7 @@ def publish_one(
else
{_post_result, response} ->
unless params["unreachable_since"], do: Instances.set_unreachable(inbox)
{:error, response}
{:error, format_error_response(response)}
end
end
@ -97,13 +82,13 @@ def publish_one(%{"actor_id" => actor_id} = params) do
|> publish_one()
end
defp signature_host(%URI{port: port, scheme: scheme, host: host}) do
if port == URI.default_port(scheme) do
host
else
"#{host}:#{port}"
end
end
defp format_error_response(%Tesla.Env{status: code, headers: headers}),
do: {:http_error, code, headers}
defp format_error_response(%Tesla.Env{} = env),
do: {:http_error, :connect, env}
defp format_error_response(response), do: response
defp blocked_instances do
Config.get([:instance, :quarantined_instances], []) ++

View file

@ -19,6 +19,7 @@ defmodule Pleroma.Web.ActivityPub.SideEffects do
alias Pleroma.Web.ActivityPub.Builder
alias Pleroma.Web.ActivityPub.Pipeline
alias Pleroma.Web.ActivityPub.Utils
alias Pleroma.Web.ActivityPub.Visibility
alias Pleroma.Web.Push
alias Pleroma.Web.Streamer
alias Pleroma.Workers.PollWorker
@ -26,8 +27,6 @@ defmodule Pleroma.Web.ActivityPub.SideEffects do
require Pleroma.Constants
require Logger
@logger Pleroma.Config.get([:side_effects, :logger], Logger)
@behaviour Pleroma.Web.ActivityPub.SideEffects.Handling
defp ap_streamer, do: Pleroma.Config.get([:side_effects, :ap_streamer], ActivityPub)
@ -204,7 +203,9 @@ def handle(%{data: %{"type" => "Create"}} = activity, meta) do
{:ok, _user} = ActivityPub.increase_note_count_if_public(user, object)
{:ok, _user} = ActivityPub.update_last_status_at_if_public(user, object)
if in_reply_to = object.data["type"] != "Answer" && object.data["inReplyTo"] do
if in_reply_to =
object.data["type"] != "Answer" && Visibility.is_public?(object.data) &&
object.data["inReplyTo"] do
Object.increase_replies_count(in_reply_to)
end
@ -307,7 +308,8 @@ def handle(%{data: %{"type" => "Delete", "object" => deleted_object}} = object,
{:ok, user} = ActivityPub.decrease_note_count_if_public(user, deleted_object)
if in_reply_to = deleted_object.data["inReplyTo"] do
if in_reply_to =
Visibility.is_public?(deleted_object.data) && deleted_object.data["inReplyTo"] do
Object.decrease_replies_count(in_reply_to)
end
@ -316,7 +318,7 @@ def handle(%{data: %{"type" => "Delete", "object" => deleted_object}} = object,
:ok
else
{:actor, _} ->
@logger.error("The object doesn't have an actor: #{inspect(deleted_object)}")
Logger.error("The object doesn't have an actor: #{inspect(deleted_object)}")
:no_object_actor
end

View file

@ -22,8 +22,6 @@ defmodule Pleroma.Web.ActivityPub.Transmogrifier do
alias Pleroma.Web.ActivityPub.ObjectValidators.CommonFixes
alias Pleroma.Web.Federator
import Ecto.Query
require Pleroma.Constants
require Logger
@ -341,6 +339,7 @@ def fix_attachments(%{"attachment" => attachment} = object) when is_list(attachm
}
|> Maps.put_if_present("mediaType", media_type)
|> Maps.put_if_present("name", data["name"])
|> Maps.put_if_present("summary", data["summary"])
|> Maps.put_if_present("blurhash", data["blurhash"])
else
nil
@ -790,49 +789,24 @@ def set_quote_url(%{"quoteUri" => quote} = object) when is_binary(quote) do
def set_quote_url(obj), do: obj
@doc """
Serialized Mastodon-compatible `replies` collection containing _self-replies_.
Based on Mastodon's ActivityPub::NoteSerializer#replies.
Inline first page of the `replies` collection,
containing any replies in chronological order.
"""
def set_replies(obj_data) do
replies_uris =
with limit when limit > 0 <-
Pleroma.Config.get([:activitypub, :note_replies_output_limit], 0),
%Object{} = object <- Object.get_cached_by_ap_id(obj_data["id"]) do
object
|> Object.self_replies()
|> select([o], fragment("?->>'id'", o.data))
|> limit(^limit)
|> Repo.all()
else
_ -> []
end
set_replies(obj_data, replies_uris)
with obj_ap_id when obj_ap_id != nil <- obj_data["id"],
limit when limit > 0 <-
Pleroma.Config.get([:activitypub, :note_replies_output_limit], 0),
collection <-
Pleroma.Web.ActivityPub.ObjectView.render("object_replies.json", %{
render_params: %{object_ap_id: obj_data["id"], limit: limit, skip_ap_ctx: true}
}) do
Map.put(obj_data, "replies", collection)
else
0 -> Map.put(obj_data, "replies", obj_data["id"] <> "/replies")
_ -> obj_data
end
end
defp set_replies(obj, []) do
obj
end
defp set_replies(obj, replies_uris) do
replies_collection = %{
"type" => "Collection",
"items" => replies_uris
}
Map.merge(obj, %{"replies" => replies_collection})
end
def replies(%{"replies" => %{"first" => %{"items" => items}}}) when not is_nil(items) do
items
end
def replies(%{"replies" => %{"items" => items}}) when not is_nil(items) do
items
end
def replies(_), do: []
# Prepares the object of an outgoing create activity.
def prepare_object(object) do
object
@ -905,6 +879,10 @@ def prepare_outgoing(%{"type" => "Update", "object" => %{"type" => objtype} = ob
{:ok, data}
end
def prepare_outgoing(%{"type" => "Update", "object" => %{}} = data) do
raise "Requested to serve an Update for non-updateable object type: #{inspect(data)}"
end
def prepare_outgoing(%{"type" => "Announce", "actor" => ap_id, "object" => object_id} = data) do
object =
object_id

View file

@ -76,18 +76,6 @@ def label_in_message?(label, params),
[params["to"], params["cc"], params["bto"], params["bcc"]]
|> Enum.any?(&label_in_collection?(label, &1))
@spec unaddressed_message?(map()) :: boolean()
def unaddressed_message?(params),
do:
[params["to"], params["cc"], params["bto"], params["bcc"]]
|> Enum.all?(&is_nil(&1))
@spec recipient_in_message(User.t(), User.t(), map()) :: boolean()
def recipient_in_message(%User{ap_id: ap_id} = recipient, %User{} = actor, params),
do:
label_in_message?(ap_id, params) || unaddressed_message?(params) ||
User.following?(recipient, actor)
defp extract_list(target) when is_binary(target), do: [target]
defp extract_list(lst) when is_list(lst), do: lst
defp extract_list(_), do: []
@ -114,7 +102,8 @@ def make_json_ld_header do
"https://www.w3.org/ns/activitystreams",
"#{Endpoint.url()}/schemas/litepub-0.1.jsonld",
%{
"@language" => "und"
"@language" => "und",
"htmlMfm" => "https://w3id.org/fep/c16b#htmlMfm"
}
]
}

View file

@ -0,0 +1,59 @@
# Akkoma: Magically expressive social media
# Copyright © 2017-2021 Pleroma Authors <https://pleroma.social/>
# Copyright © 2025 Akkoma Authors <https://akkoma.dev/>
# SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.Web.ActivityPub.CollectionViewHelper do
alias Pleroma.Web.ActivityPub.Utils
def collection_page_offset(collection, iri, page, show_items \\ true, total \\ nil) do
offset = (page - 1) * 10
items = Enum.slice(collection, offset, 10)
items = Enum.map(items, fn user -> user.ap_id end)
total = total || length(collection)
map = %{
"id" => "#{iri}?page=#{page}",
"type" => "OrderedCollectionPage",
"partOf" => iri,
"totalItems" => total,
"orderedItems" => if(show_items, do: items, else: [])
}
if offset + 10 < total do
Map.put(map, "next", "#{iri}?page=#{page + 1}")
else
map
end
end
defp maybe_omit_next(pagination, _items, nil), do: pagination
defp maybe_omit_next(pagination, items, limit) when is_binary(limit) do
case Integer.parse(limit) do
{limit, ""} -> maybe_omit_next(pagination, items, limit)
_ -> maybe_omit_next(pagination, items, nil)
end
end
defp maybe_omit_next(pagination, items, limit) when is_number(limit) do
if Enum.count(items) < limit, do: Map.delete(pagination, "next"), else: pagination
end
def collection_page_keyset(
display_items,
pagination,
limit \\ nil,
skip_ap_context \\ false
) do
%{
"type" => "OrderedCollectionPage",
"orderedItems" => display_items
}
|> Map.merge(pagination)
|> maybe_omit_next(display_items, limit)
|> then(fn m ->
if skip_ap_context, do: m, else: Map.merge(m, Utils.make_json_ld_header())
end)
end
end

View file

@ -6,7 +6,10 @@ defmodule Pleroma.Web.ActivityPub.ObjectView do
use Pleroma.Web, :view
alias Pleroma.Activity
alias Pleroma.Object
alias Pleroma.Web.ActivityPub.CollectionViewHelper
alias Pleroma.Web.ControllerHelper
alias Pleroma.Web.ActivityPub.Transmogrifier
alias Pleroma.Web.ActivityPub.ActivityPub
def render("object.json", %{object: %Object{} = object}) do
base = Pleroma.Web.ActivityPub.Utils.make_json_ld_header()
@ -15,26 +18,93 @@ def render("object.json", %{object: %Object{} = object}) do
Map.merge(base, additional)
end
def render("object.json", %{object: %Activity{data: %{"type" => activity_type}} = activity})
when activity_type in ["Create"] do
base = Pleroma.Web.ActivityPub.Utils.make_json_ld_header()
object = Object.normalize(activity, fetch: false)
additional =
Transmogrifier.prepare_object(activity.data)
|> Map.put("object", Transmogrifier.prepare_object(object.data))
Map.merge(base, additional)
def render("object.json", %{object: %Activity{} = activity}) do
{:ok, ap_data} = Transmogrifier.prepare_outgoing(activity.data)
ap_data
end
def render("object.json", %{object: %Activity{} = activity}) do
base = Pleroma.Web.ActivityPub.Utils.make_json_ld_header()
object_id = Object.normalize(activity, id_only: true)
def render("object_replies.json", %{
conn: conn,
render_params: %{object_ap_id: object_ap_id, page: "true"} = params
}) do
params = Map.put_new(params, :limit, 40)
additional =
Transmogrifier.prepare_object(activity.data)
|> Map.put("object", object_id)
items = ActivityPub.fetch_objects_for_replies_collection(object_ap_id, params)
display_items = map_reply_collection_items(items)
Map.merge(base, additional)
pagination = ControllerHelper.get_pagination_fields(conn, items, %{}, :asc)
CollectionViewHelper.collection_page_keyset(display_items, pagination, params[:limit])
end
def render(
"object_replies.json",
%{
render_params: %{object_ap_id: object_ap_id} = params
} = opts
) do
params =
params
|> Map.drop([:max_id, :min_id, :since_id, :object_ap_id])
|> Map.put_new(:limit, 40)
|> Map.put(:total, true)
%{total: total, items: items} =
ActivityPub.fetch_objects_for_replies_collection(object_ap_id, params)
display_items = map_reply_collection_items(items)
first_pagination = reply_collection_first_pagination(items, opts)
col_ap =
%{
"id" => object_ap_id <> "/replies",
"type" => "OrderedCollection",
"totalItems" => total
}
col_ap =
if total > 0 do
first_page =
CollectionViewHelper.collection_page_keyset(
display_items,
first_pagination,
params[:limit],
true
)
Map.put(col_ap, "first", first_page)
else
col_ap
end
if params[:skip_ap_ctx] do
col_ap
else
Map.merge(col_ap, Pleroma.Web.ActivityPub.Utils.make_json_ld_header())
end
end
defp map_reply_collection_items(items), do: Enum.map(items, fn %{ap_id: ap_id} -> ap_id end)
defp reply_collection_first_pagination(items, %{conn: %Plug.Conn{} = conn}) do
ControllerHelper.get_pagination_fields(conn, items, %{"page" => true}, :asc)
end
defp reply_collection_first_pagination(items, %{render_params: %{object_ap_id: object_ap_id}}) do
%{
"id" => object_ap_id <> "/replies?page=true",
"partOf" => object_ap_id <> "/replies"
}
|> then(fn m ->
case items do
[] ->
m
i ->
next_id = object_ap_id <> "/replies?page=true&min_id=#{List.last(i)[:id]}"
Map.put(m, "next", next_id)
end
end)
end
end

View file

@ -8,6 +8,7 @@ defmodule Pleroma.Web.ActivityPub.UserView do
alias Pleroma.Object
alias Pleroma.Repo
alias Pleroma.User
alias Pleroma.Web.ActivityPub.CollectionViewHelper
alias Pleroma.Web.ActivityPub.ObjectView
alias Pleroma.Web.ActivityPub.Transmogrifier
alias Pleroma.Web.ActivityPub.Utils
@ -112,7 +113,10 @@ def render("user.json", %{user: user}) do
|> Map.merge(Utils.make_json_ld_header())
end
def render("keys.json", %{user: user}) do
# For unauthenticated requests when authfetch is enabled.
# Still serve the key and the bare minimum of required fields
# to avoid being stuck in an infinite "cannot verify" loop with remotes.
def render("stripped_user.json", %{user: user}) do
{:ok, public_key} = User.SigningKey.public_key_pem(user)
%{
@ -121,7 +125,14 @@ def render("keys.json", %{user: user}) do
"id" => User.SigningKey.key_id_of_local_user(user),
"owner" => user.ap_id,
"publicKeyPem" => public_key
}
},
# REQUIRED fields per AP spec
"inbox" => "#{user.ap_id}/inbox",
"outbox" => "#{user.ap_id}/outbox",
# allow type-based processing
"type" => user.actor_type,
# since Mastodon requires a WebFinger address for all users, this seems like a good idea
"preferredUsername" => user.nickname
}
|> Map.merge(Utils.make_json_ld_header())
end
@ -141,7 +152,13 @@ def render("following.json", %{user: user, page: page} = opts) do
0
end
collection(following, "#{user.ap_id}/following", page, showing_items, total)
CollectionViewHelper.collection_page_offset(
following,
"#{user.ap_id}/following",
page,
showing_items,
total
)
|> Map.merge(Utils.make_json_ld_header())
end
@ -166,7 +183,12 @@ def render("following.json", %{user: user} = opts) do
"totalItems" => total,
"first" =>
if showing_items do
collection(following, "#{user.ap_id}/following", 1, !user.hide_follows)
CollectionViewHelper.collection_page_offset(
following,
"#{user.ap_id}/following",
1,
!user.hide_follows
)
else
"#{user.ap_id}/following?page=1"
end
@ -189,7 +211,13 @@ def render("followers.json", %{user: user, page: page} = opts) do
0
end
collection(followers, "#{user.ap_id}/followers", page, showing_items, total)
CollectionViewHelper.collection_page_offset(
followers,
"#{user.ap_id}/followers",
page,
showing_items,
total
)
|> Map.merge(Utils.make_json_ld_header())
end
@ -213,7 +241,13 @@ def render("followers.json", %{user: user} = opts) do
"type" => "OrderedCollection",
"first" =>
if showing_items do
collection(followers, "#{user.ap_id}/followers", 1, showing_items, total)
CollectionViewHelper.collection_page_offset(
followers,
"#{user.ap_id}/followers",
1,
showing_items,
total
)
else
"#{user.ap_id}/followers?page=1"
end
@ -233,22 +267,15 @@ def render("activity_collection.json", %{iri: iri}) do
def render("activity_collection_page.json", %{
activities: activities,
iri: iri,
pagination: pagination
}) do
collection =
display_items =
Enum.map(activities, fn activity ->
{:ok, data} = Transmogrifier.prepare_outgoing(activity.data)
data
end)
%{
"type" => "OrderedCollectionPage",
"partOf" => iri,
"orderedItems" => collection
}
|> Map.merge(Utils.make_json_ld_header())
|> Map.merge(pagination)
CollectionViewHelper.collection_page_keyset(display_items, pagination)
end
def render("featured.json", %{
@ -276,27 +303,6 @@ defp maybe_put_total_items(map, true, total) do
Map.put(map, "totalItems", total)
end
def collection(collection, iri, page, show_items \\ true, total \\ nil) do
offset = (page - 1) * 10
items = Enum.slice(collection, offset, 10)
items = Enum.map(items, fn user -> user.ap_id end)
total = total || length(collection)
map = %{
"id" => "#{iri}?page=#{page}",
"type" => "OrderedCollectionPage",
"partOf" => iri,
"totalItems" => total,
"orderedItems" => if(show_items, do: items, else: [])
}
if offset < total do
Map.put(map, "next", "#{iri}?page=#{page + 1}")
else
map
end
end
defp maybe_make_image(func, key, user) do
image = func.(user, no_default: true)
maybe_insert_image(key, image)

View file

@ -53,25 +53,11 @@ def is_direct?(activity) do
!is_public?(activity) && !is_private?(activity)
end
def is_list?(%{data: %{"listMessage" => _}}), do: true
def is_list?(_), do: false
@spec visible_for_user?(Object.t() | Activity.t() | nil, User.t() | nil) :: boolean()
def visible_for_user?(%Object{data: %{"type" => "Tombstone"}}, _), do: false
def visible_for_user?(%Activity{actor: ap_id}, %User{ap_id: ap_id}), do: true
def visible_for_user?(%Object{data: %{"actor" => ap_id}}, %User{ap_id: ap_id}), do: true
def visible_for_user?(nil, _), do: false
def visible_for_user?(%Activity{data: %{"listMessage" => _}}, nil), do: false
def visible_for_user?(
%Activity{data: %{"listMessage" => list_ap_id}} = activity,
%User{} = user
) do
user.ap_id in activity.data["to"] ||
list_ap_id
|> Pleroma.List.get_by_ap_id()
|> Pleroma.List.member?(user)
end
def visible_for_user?(%{__struct__: module} = message, nil)
when module in [Activity, Object] do
@ -141,9 +127,6 @@ def get_visibility(object) do
object.data["directMessage"] == true ->
"direct"
is_binary(object.data["listMessage"]) ->
"list"
length(cc) > 0 ->
"private"

View file

@ -11,7 +11,6 @@ defmodule Pleroma.Web.AdminAPI.AdminAPIController do
alias Pleroma.Config
alias Pleroma.MFA
alias Pleroma.ModerationLog
alias Pleroma.Stats
alias Pleroma.User
alias Pleroma.Web.ActivityPub.ActivityPub
alias Pleroma.Web.AdminAPI
@ -399,10 +398,17 @@ def resend_confirmation_email(%{assigns: %{user: admin}} = conn, %{"nicknames" =
json(conn, "")
end
def stats(conn, params) do
counters = Stats.get_status_visibility_count(params["instance"])
json(conn, %{"status_visibility" => counters})
# Legacy endpoint, stubbed out for a transition period before removal
# (atm only used by admin-fe)
def stats(conn, _params) do
json(conn, %{
"status_visibility" => %{
"direct" => 0,
"private" => 0,
"public" => 0,
"unlisted" => 0
}
})
end
def create_backup(%{assigns: %{user: admin}} = conn, %{"nickname" => nickname}) do

Some files were not shown because too many files have changed in this diff Show more