From 7998a0034691722ae2f7d78932c212f66385f58d Mon Sep 17 00:00:00 2001 From: Oneric Date: Fri, 14 Feb 2025 21:57:28 +0100 Subject: [PATCH 01/26] cosmetic/rich_media/parser: fix typo --- lib/pleroma/web/rich_media/parser.ex | 2 +- test/pleroma/web/rich_media/parser_test.exs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/pleroma/web/rich_media/parser.ex b/lib/pleroma/web/rich_media/parser.ex index 7c5fed2bf..15eaaf735 100644 --- a/lib/pleroma/web/rich_media/parser.ex +++ b/lib/pleroma/web/rich_media/parser.ex @@ -96,7 +96,7 @@ defp parse_uri(true, url) do |> validate_page_url end - defp parse_uri(_, _), do: {:error, "not an URL"} + defp parse_uri(_, _), do: {:error, "not a URL"} defp get_tld(host) do host diff --git a/test/pleroma/web/rich_media/parser_test.exs b/test/pleroma/web/rich_media/parser_test.exs index bf7864aa7..77e37784b 100644 --- a/test/pleroma/web/rich_media/parser_test.exs +++ b/test/pleroma/web/rich_media/parser_test.exs @@ -121,13 +121,13 @@ test "refuses to crawl plain HTTP and other scheme URL" do res = Parser.parse(url) assert {:error, {:url, "scheme mismatch"}} == res or - {:error, {:url, "not an URL"}} == res + {:error, {:url, "not a URL"}} == res end) end test "refuses to crawl malformed URLs" do url = "example.com[]/ogp" - assert {:error, {:url, "not an URL"}} == Parser.parse(url) + assert {:error, {:url, "not a URL"}} == Parser.parse(url) end test "refuses to crawl URLs of private network from posts" do From 1c2eb4d79963baef822cdb313ac68b2d9c84cbe2 Mon Sep 17 00:00:00 2001 From: Oneric Date: Fri, 14 Feb 2025 21:59:22 +0100 Subject: [PATCH 02/26] cosmetic/object: drop is_ prefix from is_tombstone_object? The question mark suffix already implies it being an indicator function --- lib/pleroma/object.ex | 6 +++--- lib/pleroma/web/activity_pub/transmogrifier.ex | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/pleroma/object.ex b/lib/pleroma/object.ex index 40c1534b9..d496cb23f 100644 --- a/lib/pleroma/object.ex +++ b/lib/pleroma/object.ex @@ -216,9 +216,9 @@ def get_cached_by_ap_id(ap_id) do end # Intentionally accepts non-Object arguments! - @spec is_tombstone_object?(term()) :: boolean() - def is_tombstone_object?(%Object{data: %{"type" => "Tombstone"}}), do: true - def is_tombstone_object?(_), do: false + @spec tombstone_object?(term()) :: boolean() + def tombstone_object?(%Object{data: %{"type" => "Tombstone"}}), do: true + def tombstone_object?(_), do: false def make_tombstone(%Object{data: %{"id" => id, "type" => type}}, deleted \\ DateTime.utc_now()) do %ObjectTombstone{ diff --git a/lib/pleroma/web/activity_pub/transmogrifier.ex b/lib/pleroma/web/activity_pub/transmogrifier.ex index 6e6244b7c..ecf6d9cd8 100644 --- a/lib/pleroma/web/activity_pub/transmogrifier.ex +++ b/lib/pleroma/web/activity_pub/transmogrifier.ex @@ -560,7 +560,7 @@ defp handle_incoming_normalised( with {_, {:ok, object_id}} <- {:object_id, oid_result}, object <- Object.get_cached_by_ap_id(object_id), - {_, false} <- {:tombstone, Object.is_tombstone_object?(object) && !data["actor"]}, + {_, false} <- {:tombstone, Object.tombstone_object?(object) && !data["actor"]}, {:ok, activity, _} <- Pipeline.common_pipeline(data, local: false) do {:ok, activity} else From 98c7e9534ec6ba1f6ab61f0db8f847b160390836 Mon Sep 17 00:00:00 2001 From: Oneric Date: Mon, 23 Dec 2024 14:14:47 +0100 Subject: [PATCH 03/26] Drop obsolete APNG mime override Commit 9d2c558f64fd7853f9317e74821d6687eb7a8886 bumped to a mime package version including the upstream fix. --- config/config.exs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/config/config.exs b/config/config.exs index 5a6169506..48ebe91d2 100644 --- a/config/config.exs +++ b/config/config.exs @@ -166,10 +166,7 @@ "application/xrd+xml" => ["xrd+xml"], "application/jrd+json" => ["jrd+json"], "application/activity+json" => ["activity+json"], - "application/ld+json" => ["activity+json"], - # Can be removed when bumping MIME past 2.0.5 - # see https://akkoma.dev/AkkomaGang/akkoma/issues/657 - "image/apng" => ["apng"] + "application/ld+json" => ["activity+json"] } config :mime, :extensions, %{ From c8e0f7848bd72434a30e450a5af8c356edb7fbcb Mon Sep 17 00:00:00 2001 From: Oneric Date: Mon, 23 Dec 2024 14:19:30 +0100 Subject: [PATCH 04/26] Migrate back to upstream Plug.Static Commit a924e117fd3059a047dc49348ac4de19b4db5ddd bumped the plug package to 1.16.0 which includes our upstream patch. Resolves: https://akkoma.dev/AkkomaGang/akkoma/issues/734 --- lib/pleroma/web/plugs/instance_static.ex | 4 +- .../web/plugs/static_no_content_type.ex | 469 ------------------ lib/pleroma/web/plugs/uploaded_media.ex | 4 +- 3 files changed, 4 insertions(+), 473 deletions(-) delete mode 100644 lib/pleroma/web/plugs/static_no_content_type.ex diff --git a/lib/pleroma/web/plugs/instance_static.ex b/lib/pleroma/web/plugs/instance_static.ex index b72b604a1..ec188986b 100644 --- a/lib/pleroma/web/plugs/instance_static.ex +++ b/lib/pleroma/web/plugs/instance_static.ex @@ -62,10 +62,10 @@ defp call_static(%{request_path: request_path} = conn, opts, from) do opts = opts |> Map.put(:from, from) - |> Map.put(:set_content_type, false) + |> Map.put(:content_type, false) conn |> set_static_content_type(request_path) - |> Pleroma.Web.Plugs.StaticNoCT.call(opts) + |> Plug.Static.call(opts) end end diff --git a/lib/pleroma/web/plugs/static_no_content_type.ex b/lib/pleroma/web/plugs/static_no_content_type.ex deleted file mode 100644 index ea00a2d5d..000000000 --- a/lib/pleroma/web/plugs/static_no_content_type.ex +++ /dev/null @@ -1,469 +0,0 @@ -# This is almost identical to Plug.Static from Plug 1.15.3 (2024-01-16) -# It being copied is a temporary measure to fix an urgent bug without -# needing to wait for merge of a suitable patch upstream -# The differences are: -# - this leading comment -# - renaming of the module from 'Plug.Static' to 'Pleroma.Web.Plugs.StaticNoCT' -# - additon of set_content_type option - -defmodule Pleroma.Web.Plugs.StaticNoCT do - @moduledoc """ - A plug for serving static assets. - - It requires two options: - - * `:at` - the request path to reach for static assets. - It must be a string. - - * `:from` - the file system path to read static assets from. - It can be either: a string containing a file system path, an - atom representing the application name (where assets will - be served from `priv/static`), a tuple containing the - application name and the directory to serve assets from (besides - `priv/static`), or an MFA tuple. - - The preferred form is to use `:from` with an atom or tuple, since - it will make your application independent from the starting directory. - For example, if you pass: - - plug Plug.Static, from: "priv/app/path" - - Plug.Static will be unable to serve assets if you build releases - or if you change the current directory. Instead do: - - plug Plug.Static, from: {:app_name, "priv/app/path"} - - If a static asset cannot be found, `Plug.Static` simply forwards - the connection to the rest of the pipeline. - - ## Cache mechanisms - - `Plug.Static` uses etags for HTTP caching. This means browsers/clients - should cache assets on the first request and validate the cache on - following requests, not downloading the static asset once again if it - has not changed. The cache-control for etags is specified by the - `cache_control_for_etags` option and defaults to `"public"`. - - However, `Plug.Static` also supports direct cache control by using - versioned query strings. If the request query string starts with - "?vsn=", `Plug.Static` assumes the application is versioning assets - and does not set the `ETag` header, meaning the cache behaviour will - be specified solely by the `cache_control_for_vsn_requests` config, - which defaults to `"public, max-age=31536000"`. - - ## Options - - * `:encodings` - list of 2-ary tuples where first value is value of - the `Accept-Encoding` header and second is extension of the file to - be served if given encoding is accepted by client. Entries will be tested - in order in list, so entries higher in list will be preferred. Defaults - to: `[]`. - - In addition to setting this value directly it supports 2 additional - options for compatibility reasons: - - + `:brotli` - will append `{"br", ".br"}` to the encodings list. - + `:gzip` - will append `{"gzip", ".gz"}` to the encodings list. - - Additional options will be added in the above order (Brotli takes - preference over Gzip) to reflect older behaviour which was set due - to fact that Brotli in general provides better compression ratio than - Gzip. - - * `:cache_control_for_etags` - sets the cache header for requests - that use etags. Defaults to `"public"`. - - * `:etag_generation` - specify a `{module, function, args}` to be used - to generate an etag. The `path` of the resource will be passed to - the function, as well as the `args`. If this option is not supplied, - etags will be generated based off of file size and modification time. - Note it is [recommended for the etag value to be quoted](https://tools.ietf.org/html/rfc7232#section-2.3), - which Plug won't do automatically. - - * `:cache_control_for_vsn_requests` - sets the cache header for - requests starting with "?vsn=" in the query string. Defaults to - `"public, max-age=31536000"`. - - * `:only` - filters which requests to serve. This is useful to avoid - file system access on every request when this plug is mounted - at `"/"`. For example, if `only: ["images", "favicon.ico"]` is - specified, only files in the "images" directory and the - "favicon.ico" file will be served by `Plug.Static`. - Note that `Plug.Static` matches these filters against request - uri and not against the filesystem. When requesting - a file with name containing non-ascii or special characters, - you should use urlencoded form. For example, you should write - `only: ["file%20name"]` instead of `only: ["file name"]`. - Defaults to `nil` (no filtering). - - * `:only_matching` - a relaxed version of `:only` that will - serve any request as long as one of the given values matches the - given path. For example, `only_matching: ["images", "favicon"]` - will match any request that starts at "images" or "favicon", - be it "/images/foo.png", "/images-high/foo.png", "/favicon.ico" - or "/favicon-high.ico". Such matches are useful when serving - digested files at the root. Defaults to `nil` (no filtering). - - * `:headers` - other headers to be set when serving static assets. Specify either - an enum of key-value pairs or a `{module, function, args}` to return an enum. The - `conn` will be passed to the function, as well as the `args`. - - * `:content_types` - custom MIME type mapping. As a map with filename as key - and content type as value. For example: - `content_types: %{"apple-app-site-association" => "application/json"}`. - - * `:set_content_type` - by default Plug.Static (re)sets the content type header - using auto-detection and the `:content_types` map. But when set to `false` - no content-type header will be inserted instead retaining the original - value or lack thereof. This can be useful when custom logic for appropiate - content types is needed which cannot be reasonably expressed as a static - filename map. - - ## Examples - - This plug can be mounted in a `Plug.Builder` pipeline as follows: - - defmodule MyPlug do - use Plug.Builder - - plug Plug.Static, - at: "/public", - from: :my_app, - only: ~w(images robots.txt) - plug :not_found - - def not_found(conn, _) do - send_resp(conn, 404, "not found") - end - end - - """ - - @behaviour Plug - @allowed_methods ~w(GET HEAD) - - import Plug.Conn - alias Plug.Conn - - # In this module, the `:prim_file` Erlang module along with the `:file_info` - # record are used instead of the more common and Elixir-y `File` module and - # `File.Stat` struct, respectively. The reason behind this is performance: all - # the `File` operations pass through a single process in order to support node - # operations that we simply don't need when serving assets. - - require Record - Record.defrecordp(:file_info, Record.extract(:file_info, from_lib: "kernel/include/file.hrl")) - - defmodule InvalidPathError do - defexception message: "invalid path for static asset", plug_status: 400 - end - - @impl true - def init(opts) do - from = - case Keyword.fetch!(opts, :from) do - {_, _} = from -> from - {_, _, _} = from -> from - from when is_atom(from) -> {from, "priv/static"} - from when is_binary(from) -> from - _ -> raise ArgumentError, ":from must be an atom, a binary or a tuple" - end - - encodings = - opts - |> Keyword.get(:encodings, []) - |> maybe_add("br", ".br", Keyword.get(opts, :brotli, false)) - |> maybe_add("gzip", ".gz", Keyword.get(opts, :gzip, false)) - - %{ - encodings: encodings, - only_rules: {Keyword.get(opts, :only, []), Keyword.get(opts, :only_matching, [])}, - qs_cache: Keyword.get(opts, :cache_control_for_vsn_requests, "public, max-age=31536000"), - et_cache: Keyword.get(opts, :cache_control_for_etags, "public"), - et_generation: Keyword.get(opts, :etag_generation, nil), - headers: Keyword.get(opts, :headers, %{}), - content_types: Keyword.get(opts, :content_types, %{}), - set_content_type: Keyword.get(opts, :set_content_type, true), - from: from, - at: opts |> Keyword.fetch!(:at) |> Plug.Router.Utils.split() - } - end - - @impl true - def call( - conn = %Conn{method: meth}, - %{at: at, only_rules: only_rules, from: from, encodings: encodings} = options - ) - when meth in @allowed_methods do - segments = subset(at, conn.path_info) - - if allowed?(only_rules, segments) do - segments = Enum.map(segments, &uri_decode/1) - - if invalid_path?(segments) do - raise InvalidPathError, "invalid path for static asset: #{conn.request_path}" - end - - path = path(from, segments) - range = get_req_header(conn, "range") - encoding = file_encoding(conn, path, range, encodings) - serve_static(encoding, conn, segments, range, options) - else - conn - end - end - - def call(conn, _options) do - conn - end - - defp uri_decode(path) do - # TODO: Remove rescue as this can't fail from Elixir v1.13 - try do - URI.decode(path) - rescue - ArgumentError -> - raise InvalidPathError - end - end - - defp allowed?(_only_rules, []), do: false - defp allowed?({[], []}, _list), do: true - - defp allowed?({full, prefix}, [h | _]) do - h in full or (prefix != [] and match?({0, _}, :binary.match(h, prefix))) - end - - defp maybe_put_content_type(conn, false, _, _), do: conn - - defp maybe_put_content_type(conn, _, types, filename) do - content_type = Map.get(types, filename) || MIME.from_path(filename) - - conn - |> put_resp_header("content-type", content_type) - end - - defp serve_static({content_encoding, file_info, path}, conn, segments, range, options) do - %{ - qs_cache: qs_cache, - et_cache: et_cache, - et_generation: et_generation, - headers: headers, - content_types: types, - set_content_type: set_content_type - } = options - - case put_cache_header(conn, qs_cache, et_cache, et_generation, file_info, path) do - {:stale, conn} -> - filename = List.last(segments) - - conn - |> maybe_put_content_type(set_content_type, types, filename) - |> put_resp_header("accept-ranges", "bytes") - |> maybe_add_encoding(content_encoding) - |> merge_headers(headers) - |> serve_range(file_info, path, range, options) - - {:fresh, conn} -> - conn - |> maybe_add_vary(options) - |> send_resp(304, "") - |> halt() - end - end - - defp serve_static(:error, conn, _segments, _range, _options) do - conn - end - - defp serve_range(conn, file_info, path, [range], options) do - file_info(size: file_size) = file_info - - with %{"bytes" => bytes} <- Plug.Conn.Utils.params(range), - {range_start, range_end} <- start_and_end(bytes, file_size) do - send_range(conn, path, range_start, range_end, file_size, options) - else - _ -> send_entire_file(conn, path, options) - end - end - - defp serve_range(conn, _file_info, path, _range, options) do - send_entire_file(conn, path, options) - end - - defp start_and_end("-" <> rest, file_size) do - case Integer.parse(rest) do - {last, ""} when last > 0 and last <= file_size -> {file_size - last, file_size - 1} - _ -> :error - end - end - - defp start_and_end(range, file_size) do - case Integer.parse(range) do - {first, "-"} when first >= 0 -> - {first, file_size - 1} - - {first, "-" <> rest} when first >= 0 -> - case Integer.parse(rest) do - {last, ""} when last >= first -> {first, min(last, file_size - 1)} - _ -> :error - end - - _ -> - :error - end - end - - defp send_range(conn, path, 0, range_end, file_size, options) when range_end == file_size - 1 do - send_entire_file(conn, path, options) - end - - defp send_range(conn, path, range_start, range_end, file_size, _options) do - length = range_end - range_start + 1 - - conn - |> put_resp_header("content-range", "bytes #{range_start}-#{range_end}/#{file_size}") - |> send_file(206, path, range_start, length) - |> halt() - end - - defp send_entire_file(conn, path, options) do - conn - |> maybe_add_vary(options) - |> send_file(200, path) - |> halt() - end - - defp maybe_add_encoding(conn, nil), do: conn - defp maybe_add_encoding(conn, ce), do: put_resp_header(conn, "content-encoding", ce) - - defp maybe_add_vary(conn, %{encodings: encodings}) do - # If we serve gzip or brotli at any moment, we need to set the proper vary - # header regardless of whether we are serving gzip content right now. - # See: http://www.fastly.com/blog/best-practices-for-using-the-vary-header/ - if encodings != [] do - update_in(conn.resp_headers, &[{"vary", "Accept-Encoding"} | &1]) - else - conn - end - end - - defp put_cache_header( - %Conn{query_string: "vsn=" <> _} = conn, - qs_cache, - _et_cache, - _et_generation, - _file_info, - _path - ) - when is_binary(qs_cache) do - {:stale, put_resp_header(conn, "cache-control", qs_cache)} - end - - defp put_cache_header(conn, _qs_cache, et_cache, et_generation, file_info, path) - when is_binary(et_cache) do - etag = etag_for_path(file_info, et_generation, path) - - conn = - conn - |> put_resp_header("cache-control", et_cache) - |> put_resp_header("etag", etag) - - if etag in get_req_header(conn, "if-none-match") do - {:fresh, conn} - else - {:stale, conn} - end - end - - defp put_cache_header(conn, _, _, _, _, _) do - {:stale, conn} - end - - defp etag_for_path(file_info, et_generation, path) do - case et_generation do - {module, function, args} -> - apply(module, function, [path | args]) - - nil -> - file_info(size: size, mtime: mtime) = file_info - < :erlang.phash2() |> Integer.to_string(16)::binary, ?">> - end - end - - defp file_encoding(conn, path, [_range], _encodings) do - # We do not support compression for range queries. - file_encoding(conn, path, nil, []) - end - - defp file_encoding(conn, path, _range, encodings) do - encoded = - Enum.find_value(encodings, fn {encoding, ext} -> - if file_info = accept_encoding?(conn, encoding) && regular_file_info(path <> ext) do - {encoding, file_info, path <> ext} - end - end) - - cond do - not is_nil(encoded) -> - encoded - - file_info = regular_file_info(path) -> - {nil, file_info, path} - - true -> - :error - end - end - - defp regular_file_info(path) do - case :prim_file.read_file_info(path) do - {:ok, file_info(type: :regular) = file_info} -> - file_info - - _ -> - nil - end - end - - defp accept_encoding?(conn, encoding) do - encoding? = &String.contains?(&1, [encoding, "*"]) - - Enum.any?(get_req_header(conn, "accept-encoding"), fn accept -> - accept |> Plug.Conn.Utils.list() |> Enum.any?(encoding?) - end) - end - - defp maybe_add(list, key, value, true), do: list ++ [{key, value}] - defp maybe_add(list, _key, _value, false), do: list - - defp path({module, function, arguments}, segments) - when is_atom(module) and is_atom(function) and is_list(arguments), - do: Enum.join([apply(module, function, arguments) | segments], "/") - - defp path({app, from}, segments) when is_atom(app) and is_binary(from), - do: Enum.join([Application.app_dir(app), from | segments], "/") - - defp path(from, segments), - do: Enum.join([from | segments], "/") - - defp subset([h | expected], [h | actual]), do: subset(expected, actual) - defp subset([], actual), do: actual - defp subset(_, _), do: [] - - defp invalid_path?(list) do - invalid_path?(list, :binary.compile_pattern(["/", "\\", ":", "\0"])) - end - - defp invalid_path?([h | _], _match) when h in [".", "..", ""], do: true - defp invalid_path?([h | t], match), do: String.contains?(h, match) or invalid_path?(t) - defp invalid_path?([], _match), do: false - - defp merge_headers(conn, {module, function, args}) do - merge_headers(conn, apply(module, function, [conn | args])) - end - - defp merge_headers(conn, headers) do - merge_resp_headers(conn, headers) - end -end diff --git a/lib/pleroma/web/plugs/uploaded_media.ex b/lib/pleroma/web/plugs/uploaded_media.ex index 746203087..357fcb432 100644 --- a/lib/pleroma/web/plugs/uploaded_media.ex +++ b/lib/pleroma/web/plugs/uploaded_media.ex @@ -88,12 +88,12 @@ defp get_media(conn, {:static_dir, directory}, opts) do Map.get(opts, :static_plug_opts) |> Map.put(:at, [@path]) |> Map.put(:from, directory) - |> Map.put(:set_content_type, false) + |> Map.put(:content_type, false) conn = conn |> set_content_type(opts, conn.request_path) - |> Pleroma.Web.Plugs.StaticNoCT.call(static_opts) + |> Plug.Static.call(static_opts) if conn.halted do conn From 7151ef4718e0d096d587829bf872ccfd7fa4810b Mon Sep 17 00:00:00 2001 From: Oneric Date: Wed, 30 Oct 2024 23:18:10 +0100 Subject: [PATCH 05/26] Add SafeZip module This will replace all the slightly different safety workarounds at different ZIP handling sites and ensure safety is actually consistently enforced everywhere while also making code cleaner and easiert to follow. --- lib/pleroma/safe_zip.ex | 216 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 216 insertions(+) create mode 100644 lib/pleroma/safe_zip.ex diff --git a/lib/pleroma/safe_zip.ex b/lib/pleroma/safe_zip.ex new file mode 100644 index 000000000..35fe2be19 --- /dev/null +++ b/lib/pleroma/safe_zip.ex @@ -0,0 +1,216 @@ +# Akkoma: Magically expressive social media +# Copyright © 2024 Akkoma Authors +# SPDX-License-Identifier: AGPL-3.0-only + +defmodule Pleroma.SafeZip do + @moduledoc """ + Wraps the subset of Erlang's zip module we’d like to use + but enforces path-traversal safety everywhere and other checks. + + For convenience almost all functions accept both elixir strings and charlists, + but output elixir strings themselves. However, this means the input parameter type + can no longer be used to distinguish archive file paths from archive binary data in memory, + thus where needed both a _data and _file variant are provided. + """ + + @type text() :: String.t() | [char()] + + defp is_safe_path?(path) do + # Path accepts elixir’s chardata() + case Path.safe_relative(path) do + {:ok, _} -> true + _ -> false + end + end + + defp is_safe_type?(file_type) do + if file_type in [:regular, :directory] do + true + else + false + end + end + + defp maybe_add_file(_type, _path_charlist, nil), do: nil + + defp maybe_add_file(:regular, path_charlist, file_list), + do: [to_string(path_charlist) | file_list] + + defp maybe_add_file(_type, _path_charlist, file_list), do: file_list + + @spec check_safe_archive_and_maybe_list_files(binary() | [char()], [term()], boolean()) :: + {:ok, [String.t()]} | {:error, reason :: term()} + defp check_safe_archive_and_maybe_list_files(archive, opts, list) do + acc = if list, do: [], else: nil + + with {:ok, table} <- :zip.table(archive, opts) do + Enum.reduce_while(table, {:ok, acc}, fn + # ZIP comment + {:zip_comment, _}, acc -> + {:cont, acc} + + # File entry + {:zip_file, path, info, _comment, _offset, _comp_size}, {:ok, fl} -> + with {_, type} <- {:get_type, elem(info, 2)}, + {_, true} <- {:type, is_safe_type?(type)}, + {_, true} <- {:safe_path, is_safe_path?(path)} do + {:cont, {:ok, maybe_add_file(type, path, fl)}} + else + {:get_type, e} -> + {:halt, + {:error, "Couldn't determine file type of ZIP entry at #{path} (#{inspect(e)})"}} + + {:type, _} -> + {:halt, {:error, "Potentially unsafe file type in ZIP at: #{path}"}} + + {:safe_path, _} -> + {:halt, {:error, "Unsafe path in ZIP: #{path}"}} + end + + # new OTP version? + _, _acc -> + {:halt, {:error, "Unknown ZIP record type"}} + end) + end + end + + @spec check_safe_archive_and_list_files(binary() | [char()], [term()]) :: + {:ok, [String.t()]} | {:error, reason :: term()} + defp check_safe_archive_and_list_files(archive, opts \\ []) do + check_safe_archive_and_maybe_list_files(archive, opts, true) + end + + @spec check_safe_archive(binary() | [char()], [term()]) :: :ok | {:error, reason :: term()} + defp check_safe_archive(archive, opts \\ []) do + case check_safe_archive_and_maybe_list_files(archive, opts, false) do + {:ok, _} -> :ok + error -> error + end + end + + @spec check_safe_file_list([text()], text()) :: :ok | {:error, term()} + defp check_safe_file_list([], _), do: :ok + + defp check_safe_file_list([path | tail], cwd) do + with {_, true} <- {:path, is_safe_path?(path)}, + {_, {:ok, fstat}} <- {:stat, File.stat(Path.expand(path, cwd))}, + {_, true} <- {:type, is_safe_type?(fstat.type)} do + check_safe_file_list(tail, cwd) + else + {:path, _} -> + {:error, "Unsafe path escaping cwd: #{path}"} + + {:stat, e} -> + {:error, "Unable to check file type of #{path}: #{inspect(e)}"} + + {:type, _} -> + {:error, "Unsafe type at #{path}"} + end + end + + defp check_safe_file_list(_, _), do: {:error, "Malformed file_list"} + + @doc """ + Checks whether the archive data contais file entries for all paths from fset + + Note this really only accepts entries corresponding to regular _files_, + if a path is contained as for example an directory, this does not count as a match. + """ + @spec contains_all_data?(binary(), MapSet.t()) :: true | false + def contains_all_data?(archive_data, fset) do + with {:ok, table} <- :zip.table(archive_data) do + remaining = + Enum.reduce(table, fset, fn + {:zip_file, path, info, _comment, _offset, _comp_size}, fset -> + if elem(info, 2) == :regular do + MapSet.delete(fset, path) + else + fset + end + + _, _ -> + fset + end) + |> MapSet.size() + + if remaining == 0, do: true, else: false + else + _ -> false + end + end + + @doc """ + List all file entries in ZIP, or error if invalid or unsafe. + + Note this really only lists regular files, no directories, ZIP comments or other types! + """ + @spec list_dir_file(text()) :: {:ok, [String.t()]} | {:error, reason :: term()} + def list_dir_file(archive) do + path = to_charlist(archive) + check_safe_archive_and_list_files(path) + end + + defp stringify_zip({:ok, {fname, data}}), do: {:ok, {to_string(fname), data}} + defp stringify_zip({:ok, fname}), do: {:ok, to_string(fname)} + defp stringify_zip(ret), do: ret + + @spec zip(text(), text(), [text()], boolean()) :: + {:ok, file_name :: String.t()} + | {:ok, {file_name :: String.t(), file_data :: binary()}} + | {:error, reason :: term()} + def zip(name, file_list, cwd, memory \\ false) do + opts = [{:cwd, to_charlist(cwd)}] + opts = if memory, do: [:memory | opts], else: opts + + with :ok <- check_safe_file_list(file_list, cwd) do + file_list = for f <- file_list, do: to_charlist(f) + name = to_charlist(name) + stringify_zip(:zip.zip(name, file_list, opts)) + end + end + + @spec unzip_file(text(), text(), [text()] | nil) :: + {:ok, [String.t()]} + | {:error, reason :: term()} + | {:error, {name :: text(), reason :: term()}} + def unzip_file(archive, target_dir, file_list \\ nil) do + do_unzip(to_charlist(archive), to_charlist(target_dir), file_list) + end + + @spec unzip_data(binary(), text(), [text()] | nil) :: + {:ok, [String.t()]} + | {:error, reason :: term()} + | {:error, {name :: text(), reason :: term()}} + def unzip_data(archive, target_dir, file_list \\ nil) do + do_unzip(archive, to_charlist(target_dir), file_list) + end + + defp stringify_unzip({:ok, [{_fname, _data} | _] = filebinlist}), + do: {:ok, Enum.map(filebinlist, fn {fname, data} -> {to_string(fname), data} end)} + + defp stringify_unzip({:ok, [_fname | _] = filelist}), + do: {:ok, Enum.map(filelist, fn fname -> to_string(fname) end)} + + defp stringify_unzip({:error, {fname, term}}), do: {:error, {to_string(fname), term}} + defp stringify_unzip(ret), do: ret + + @spec do_unzip(binary() | [char()], text(), [text()] | nil) :: + {:ok, [String.t()]} + | {:error, reason :: term()} + | {:error, {name :: text(), reason :: term()}} + defp do_unzip(archive, target_dir, file_list) do + opts = + if file_list != nil do + [ + file_list: for(f <- file_list, do: to_charlist(f)), + cwd: target_dir + ] + else + [cwd: target_dir] + end + + with :ok <- check_safe_archive(archive) do + stringify_unzip(:zip.unzip(archive, opts)) + end + end +end From 96fe080e6eea08aeedf0cd982e997168d3d10d48 Mon Sep 17 00:00:00 2001 From: Oneric Date: Wed, 30 Oct 2024 23:24:05 +0100 Subject: [PATCH 06/26] Convert all raw :zip usage to SafeZip Notably at least two instances were not properly guarded from path traversal attack before and are only now fixed by using SafeZip: - frontend installation did never check for malicious paths. But given a malicious froontend could already, e.g. steal all user tokens even without this, in the real world admins should only use frontends from trusted sources and the practical implications are minimal - the emoji pack update/upload API taking a ZIP file did not protect against path traversal. While atm only admins can use these emoji endpoints, emoji packs are typically considered "harmless" and used without prior verification from various sources. Thus this appears more concerning. --- lib/mix/tasks/pleroma/emoji.ex | 15 ++----- lib/pleroma/emoji/pack.ex | 75 +++++++++++++++------------------- lib/pleroma/frontend.ex | 23 +++-------- lib/pleroma/user/backup.ex | 7 ++-- 4 files changed, 43 insertions(+), 77 deletions(-) diff --git a/lib/mix/tasks/pleroma/emoji.ex b/lib/mix/tasks/pleroma/emoji.ex index aa8131254..2b13b3257 100644 --- a/lib/mix/tasks/pleroma/emoji.ex +++ b/lib/mix/tasks/pleroma/emoji.ex @@ -93,6 +93,7 @@ def run(["get-packs" | args]) do ) files = fetch_and_decode!(files_loc) + files_to_unzip = for({_, f} <- files, do: f) shell_info(IO.ANSI.format(["Unpacking ", :bright, pack_name])) @@ -103,17 +104,7 @@ def run(["get-packs" | args]) do pack_name ]) - files_to_unzip = - Enum.map( - files, - fn {_, f} -> to_charlist(f) end - ) - - {:ok, _} = - :zip.unzip(binary_archive, - cwd: to_charlist(pack_path), - file_list: files_to_unzip - ) + {:ok, _} = Pleroma.SafeZip.unzip_data(binary_archive, pack_path, files_to_unzip) shell_info(IO.ANSI.format(["Writing pack.json for ", :bright, pack_name])) @@ -202,7 +193,7 @@ def run(["gen-pack" | args]) do tmp_pack_dir = Path.join(System.tmp_dir!(), "emoji-pack-#{name}") - {:ok, _} = :zip.unzip(binary_archive, cwd: String.to_charlist(tmp_pack_dir)) + {:ok, _} = Pleroma.SafeZip.unzip_data(binary_archive, tmp_pack_dir) emoji_map = Pleroma.Emoji.Loader.make_shortcode_to_file_map(tmp_pack_dir, exts) diff --git a/lib/pleroma/emoji/pack.ex b/lib/pleroma/emoji/pack.ex index e95320a07..fdd31d165 100644 --- a/lib/pleroma/emoji/pack.ex +++ b/lib/pleroma/emoji/pack.ex @@ -25,6 +25,7 @@ defmodule Pleroma.Emoji.Pack do alias Pleroma.Emoji alias Pleroma.Emoji.Pack alias Pleroma.Utils + alias Pleroma.SafeZip # Invalid/Malicious names are supposed to be filtered out before path joining, # but there are many entrypoints to affected functions so as the code changes @@ -95,22 +96,20 @@ def delete(name) do end end - @spec unpack_zip_emojies(list(tuple())) :: list(map()) - defp unpack_zip_emojies(zip_files) do - Enum.reduce(zip_files, [], fn - {_, path, s, _, _, _}, acc when elem(s, 2) == :regular -> - with( - filename <- Path.basename(path), - shortcode <- Path.basename(filename, Path.extname(filename)), - false <- Emoji.exist?(shortcode) - ) do - [%{path: path, filename: path, shortcode: shortcode} | acc] - else - _ -> acc - end - - _, acc -> - acc + @spec map_zip_emojies(list(String.t())) :: list(map()) + defp map_zip_emojies(zip_files) do + Enum.reduce(zip_files, [], fn path, acc -> + with( + filename <- Path.basename(path), + shortcode <- Path.basename(filename, Path.extname(filename)), + # note: this only checks the shortcode, if an emoji already exists on the same path, but + # with a different shortcode, the existing one will be degraded to an alias of the new + false <- Emoji.exist?(shortcode) + ) do + [%{path: path, filename: path, shortcode: shortcode} | acc] + else + _ -> acc + end end) end @@ -118,15 +117,12 @@ defp unpack_zip_emojies(zip_files) do {:ok, t()} | {:error, File.posix() | atom()} def add_file(%Pack{} = pack, _, _, %Plug.Upload{content_type: "application/zip"} = file) do - with {:ok, zip_files} <- :zip.table(to_charlist(file.path)), - [_ | _] = emojies <- unpack_zip_emojies(zip_files), + with {:ok, zip_files} <- SafeZip.list_dir_file(file.path), + [_ | _] = emojies <- map_zip_emojies(zip_files), {:ok, tmp_dir} <- Utils.tmp_dir("emoji") do try do {:ok, _emoji_files} = - :zip.unzip( - to_charlist(file.path), - [{:file_list, Enum.map(emojies, & &1[:path])}, {:cwd, to_charlist(tmp_dir)}] - ) + SafeZip.unzip_file(file.path, tmp_dir, Enum.map(emojies, & &1[:path])) {_, updated_pack} = Enum.map_reduce(emojies, pack, fn item, emoji_pack -> @@ -446,16 +442,9 @@ defp downloadable?(pack) do end defp create_archive_and_cache(pack, hash) do - files = [ - ~c"pack.json" - | Enum.map(pack.files, fn {_, file} -> - {:ok, file} = Path.safe_relative(file) - to_charlist(file) - end) - ] - - {:ok, {_, result}} = - :zip.zip(~c"#{pack.name}.zip", files, [:memory, cwd: to_charlist(pack.path)]) + pack_file_list = Enum.into(pack.files, [], fn {_, f} -> f end) + files = ["pack.json" | pack_file_list] + {:ok, {_, result}} = SafeZip.zip("#{pack.name}.zip", files, pack.path, true) ttl_per_file = Pleroma.Config.get!([:emoji, :shared_pack_cache_seconds_per_file]) overall_ttl = :timer.seconds(ttl_per_file * Enum.count(files)) @@ -626,11 +615,10 @@ defp copy_as(remote_pack, local_name) do defp unzip(archive, pack_info, remote_pack, local_pack) do with :ok <- File.mkdir_p!(local_pack.path) do - files = Enum.map(remote_pack["files"], fn {_, path} -> to_charlist(path) end) + files = Enum.map(remote_pack["files"], fn {_, path} -> path end) # Fallback cannot contain a pack.json file - files = if pack_info[:fallback], do: files, else: [~c"pack.json" | files] - - :zip.unzip(archive, cwd: to_charlist(local_pack.path), file_list: files) + files = if pack_info[:fallback], do: files, else: ["pack.json" | files] + SafeZip.unzip_data(archive, local_pack.path, files) end end @@ -693,13 +681,14 @@ defp update_sha_and_save_metadata(pack, data) do end defp validate_has_all_files(pack, zip) do - with {:ok, f_list} <- :zip.unzip(zip, [:memory]) do - # Check if all files from the pack.json are in the archive - pack.files - |> Enum.all?(fn {_, from_manifest} -> - List.keyfind(f_list, to_charlist(from_manifest), 0) + # Check if all files from the pack.json are in the archive + eset = + Enum.reduce(pack.files, MapSet.new(), fn + {_, file}, s -> MapSet.put(s, to_charlist(file)) end) - |> if(do: :ok, else: {:error, :incomplete}) - end + + if SafeZip.contains_all_data?(zip, eset), + do: :ok, + else: {:error, :incomplete} end end diff --git a/lib/pleroma/frontend.ex b/lib/pleroma/frontend.ex index a309d8467..3ce7910cf 100644 --- a/lib/pleroma/frontend.ex +++ b/lib/pleroma/frontend.ex @@ -70,25 +70,12 @@ defp download_or_unzip(_frontend_info, temp_dir, file) do end def unzip(zip, dest) do - with {:ok, unzipped} <- :zip.unzip(zip, [:memory]) do - File.rm_rf!(dest) - File.mkdir_p!(dest) + File.rm_rf!(dest) + File.mkdir_p!(dest) - Enum.each(unzipped, fn {filename, data} -> - path = filename - - new_file_path = Path.join(dest, path) - - new_file_path - |> Path.dirname() - |> File.rm() - - new_file_path - |> Path.dirname() - |> File.mkdir_p!() - - File.write!(new_file_path, data) - end) + case Pleroma.SafeZip.unzip_data(zip, dest) do + {:ok, _} -> :ok + error -> error end end diff --git a/lib/pleroma/user/backup.ex b/lib/pleroma/user/backup.ex index de967abe3..21c2948a0 100644 --- a/lib/pleroma/user/backup.ex +++ b/lib/pleroma/user/backup.ex @@ -119,7 +119,7 @@ def process(%__MODULE__{} = backup) do end end - @files [~c"actor.json", ~c"outbox.json", ~c"likes.json", ~c"bookmarks.json"] + @files ["actor.json", "outbox.json", "likes.json", "bookmarks.json"] def export(%__MODULE__{} = backup) do backup = Repo.preload(backup, :user) name = String.trim_trailing(backup.file_name, ".zip") @@ -130,10 +130,9 @@ def export(%__MODULE__{} = backup) do :ok <- statuses(dir, backup.user), :ok <- likes(dir, backup.user), :ok <- bookmarks(dir, backup.user), - {:ok, zip_path} <- - :zip.create(String.to_charlist(dir <> ".zip"), @files, cwd: String.to_charlist(dir)), + {:ok, zip_path} <- Pleroma.SafeZip.zip(dir <> ".zip", @files, dir), {:ok, _} <- File.rm_rf(dir) do - {:ok, to_string(zip_path)} + {:ok, zip_path} end end From 4231345f4e44438aca8f01bf49cf3419a21440a2 Mon Sep 17 00:00:00 2001 From: Oneric Date: Fri, 14 Feb 2025 21:52:29 +0100 Subject: [PATCH 07/26] cosmetic/emoji/pack: fix spelling There might be further debate about "emoji" vs "emojis" for the plural but a grep shows the latter is already widely used in our codebase. --- lib/pleroma/emoji/pack.ex | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/pleroma/emoji/pack.ex b/lib/pleroma/emoji/pack.ex index fdd31d165..a841f5ac6 100644 --- a/lib/pleroma/emoji/pack.ex +++ b/lib/pleroma/emoji/pack.ex @@ -96,8 +96,8 @@ def delete(name) do end end - @spec map_zip_emojies(list(String.t())) :: list(map()) - defp map_zip_emojies(zip_files) do + @spec map_zip_emojis(list(String.t())) :: list(map()) + defp map_zip_emojis(zip_files) do Enum.reduce(zip_files, [], fn path, acc -> with( filename <- Path.basename(path), @@ -118,14 +118,14 @@ defp map_zip_emojies(zip_files) do | {:error, File.posix() | atom()} def add_file(%Pack{} = pack, _, _, %Plug.Upload{content_type: "application/zip"} = file) do with {:ok, zip_files} <- SafeZip.list_dir_file(file.path), - [_ | _] = emojies <- map_zip_emojies(zip_files), + [_ | _] = emojis <- map_zip_emojis(zip_files), {:ok, tmp_dir} <- Utils.tmp_dir("emoji") do try do {:ok, _emoji_files} = - SafeZip.unzip_file(file.path, tmp_dir, Enum.map(emojies, & &1[:path])) + SafeZip.unzip_file(file.path, tmp_dir, Enum.map(emojis, & &1[:path])) {_, updated_pack} = - Enum.map_reduce(emojies, pack, fn item, emoji_pack -> + Enum.map_reduce(emojis, pack, fn item, emoji_pack -> emoji_file = %Plug.Upload{ filename: item[:filename], path: path_join_safe(tmp_dir, item[:path]) From d68a5f6c56c2cb68456a3a375460ae1a189e9417 Mon Sep 17 00:00:00 2001 From: Oneric Date: Mon, 30 Dec 2024 17:59:21 +0100 Subject: [PATCH 08/26] Protected against counterfeit local docs being posted Only possible if actor keys leaked first thus log with alert level --- lib/pleroma/web/federator.ex | 8 ++++ .../activity_pub_controller_test.exs | 8 ++-- test/pleroma/workers/receiver_worker_test.exs | 37 ++++++++++++++++++- 3 files changed, 48 insertions(+), 5 deletions(-) diff --git a/lib/pleroma/web/federator.ex b/lib/pleroma/web/federator.ex index ce1dd8fa8..ab0cabb56 100644 --- a/lib/pleroma/web/federator.ex +++ b/lib/pleroma/web/federator.ex @@ -94,6 +94,7 @@ def perform(:incoming_ap_doc, params) do with nil <- Activity.normalize(params["id"]), {_, :ok} <- {:correct_origin?, Containment.contain_origin_from_id(actor, params)}, + {_, :ok, _} <- {:local, Containment.contain_local_fetch(actor), actor}, {:ok, activity} <- Transmogrifier.handle_incoming(params) do {:ok, activity} else @@ -101,6 +102,13 @@ def perform(:incoming_ap_doc, params) do Logger.debug("Origin containment failure for #{params["id"]}") {:error, :origin_containment_failed} + {:local, _, actor} -> + Logger.alert( + "Received incoming AP doc with valid signature for local actor #{actor}! Likely key leak!\n#{inspect(params)}" + ) + + {:error, :origin_containment_failed} + %Activity{} -> Logger.debug("Already had #{params["id"]}") {:error, :already_present} diff --git a/test/pleroma/web/activity_pub/activity_pub_controller_test.exs b/test/pleroma/web/activity_pub/activity_pub_controller_test.exs index 4cc7f93f5..7cf280a17 100644 --- a/test/pleroma/web/activity_pub/activity_pub_controller_test.exs +++ b/test/pleroma/web/activity_pub/activity_pub_controller_test.exs @@ -16,7 +16,6 @@ defmodule Pleroma.Web.ActivityPub.ActivityPubControllerTest do alias Pleroma.Web.ActivityPub.ObjectView alias Pleroma.Web.ActivityPub.Relay alias Pleroma.Web.ActivityPub.UserView - alias Pleroma.Web.ActivityPub.Utils alias Pleroma.Web.CommonAPI alias Pleroma.Web.Endpoint alias Pleroma.Workers.ReceiverWorker @@ -1113,7 +1112,8 @@ test "it clears `unreachable` federation status of the sender", %{conn: conn, da end test "it removes all follower collections but actor's", %{conn: conn} do - [actor, recipient] = insert_pair(:user) + actor = insert(:user, local: false) + recipient = insert(:user, local: true) actor = with_signing_key(actor) to = [ @@ -1127,7 +1127,7 @@ test "it removes all follower collections but actor's", %{conn: conn} do data = %{ "@context" => ["https://www.w3.org/ns/activitystreams"], "type" => "Create", - "id" => Utils.generate_activity_id(), + "id" => actor.ap_id <> "/create/12345", "to" => to, "cc" => cc, "actor" => actor.ap_id, @@ -1137,7 +1137,7 @@ test "it removes all follower collections but actor's", %{conn: conn} do "cc" => cc, "content" => "It's a note", "attributedTo" => actor.ap_id, - "id" => Utils.generate_object_id() + "id" => actor.ap_id <> "/note/12345" } } diff --git a/test/pleroma/workers/receiver_worker_test.exs b/test/pleroma/workers/receiver_worker_test.exs index e816d8db5..9fd0e1fa8 100644 --- a/test/pleroma/workers/receiver_worker_test.exs +++ b/test/pleroma/workers/receiver_worker_test.exs @@ -7,13 +7,16 @@ defmodule Pleroma.Workers.ReceiverWorkerTest do use Oban.Testing, repo: Pleroma.Repo @moduletag :mocked + import ExUnit.CaptureLog import Mock import Pleroma.Factory + alias Pleroma.Web.ActivityPub.Utils alias Pleroma.Workers.ReceiverWorker test "it ignores MRF reject" do - params = insert(:note).data + user = insert(:user, local: false) + params = insert(:note, user: user, data: %{"id" => user.ap_id <> "/note/1"}).data with_mock Pleroma.Web.ActivityPub.Transmogrifier, handle_incoming: fn _ -> {:reject, "MRF"} end do @@ -23,4 +26,36 @@ test "it ignores MRF reject" do }) end end + + test "it errors on receiving local documents" do + actor = insert(:user, local: true) + recipient = insert(:user, local: true) + + to = [recipient.ap_id] + cc = [] + + params = %{ + "@context" => ["https://www.w3.org/ns/activitystreams"], + "type" => "Create", + "id" => Utils.generate_activity_id(), + "to" => to, + "cc" => cc, + "actor" => actor.ap_id, + "object" => %{ + "type" => "Note", + "to" => to, + "cc" => cc, + "content" => "It's a note", + "attributedTo" => actor.ap_id, + "id" => Utils.generate_object_id() + } + } + + assert capture_log(fn -> + assert {:discard, :origin_containment_failed} == + ReceiverWorker.perform(%Oban.Job{ + args: %{"op" => "incoming_ap_doc", "params" => params} + }) + end) =~ "[alert] Received incoming AP doc with valid signature for local actor" + end end From b5fa8c6d09af4c19b7b75bd932de1576c7cb070f Mon Sep 17 00:00:00 2001 From: Oneric Date: Mon, 30 Dec 2024 19:11:51 +0100 Subject: [PATCH 09/26] readme: drop mention of YunoHost package MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It’s no longer listed in the catalogue and the git repo wasn't updated in over a year --- README.md | 3 --- docs/docs/installation/yunohost_en.md | 9 --------- 2 files changed, 12 deletions(-) delete mode 100644 docs/docs/installation/yunohost_en.md diff --git a/README.md b/README.md index e4aa25715..8d35212aa 100644 --- a/README.md +++ b/README.md @@ -54,9 +54,6 @@ If your platform is not supported, or you just want to be able to edit the sourc ### Docker Docker installation is supported via [this setup](https://docs.akkoma.dev/stable/installation/docker_en/) -### Packages -Akkoma is packaged for [YunoHost](https://yunohost.org) and can be found and installed from the [YunoHost app catalogue](https://yunohost.org/#/apps). - ### Compilation Troubleshooting If you ever encounter compilation issues during the updating of Akkoma, you can try these commands and see if they fix things: diff --git a/docs/docs/installation/yunohost_en.md b/docs/docs/installation/yunohost_en.md deleted file mode 100644 index 0d3adb4fe..000000000 --- a/docs/docs/installation/yunohost_en.md +++ /dev/null @@ -1,9 +0,0 @@ -# Installing on Yunohost - -[YunoHost](https://yunohost.org) is a server operating system aimed at self-hosting. The YunoHost community maintains a package of Akkoma which allows you to install Akkoma on YunoHost. You can install it via the normal way through the admin web interface, or through the CLI. More information can be found at [the repo of the package](https://github.com/YunoHost-Apps/akkoma_ynh). - -## Questions - -Questions and problems related to the YunoHost parts can be done through the [YunoHost channels](https://yunohost.org/en/help). - -For questions about Akkoma, check out the [Akkoma community channels](../../#community-channels). From 366065c0f635fc14b6b086783ffb844485f73d52 Mon Sep 17 00:00:00 2001 From: Oneric Date: Sat, 11 Jan 2025 21:25:00 +0100 Subject: [PATCH 10/26] fetcher: split out core object fetch validation To allow reuse for adapted key validation logic --- lib/pleroma/object/fetcher.ex | 41 +++++++++++++++++++++++++---------- 1 file changed, 29 insertions(+), 12 deletions(-) diff --git a/lib/pleroma/object/fetcher.ex b/lib/pleroma/object/fetcher.ex index 11ed57ed5..0f129f318 100644 --- a/lib/pleroma/object/fetcher.ex +++ b/lib/pleroma/object/fetcher.ex @@ -265,6 +265,28 @@ def fetch_and_contain_remote_object_from_id(%{"id" => id}, is_ap_id), def fetch_and_contain_remote_object_from_id(id, is_ap_id) when is_binary(id) do Logger.debug("Fetching object #{id} via AP [ap_id=#{is_ap_id}]") + fetch_and_contain_remote_ap_doc( + id, + is_ap_id, + fn final_uri, data -> {Containment.contain_id_to_fetch(final_uri, data), data["id"]} end + ) + end + + def fetch_and_contain_remote_object_from_id(_id, _is_ap_id), + do: {:error, :invalid_id} + + # Fetches an AP document and performing variable security checks on it. + # + # Note that the received documents "id" matching the final host domain + # is always enforced before the custom ID check runs. + @spec fetch_and_contain_remote_ap_doc( + String.t(), + boolean(), + (String.t(), Map.t() -> {:ok | :error, String.t() | term()}) + ) :: {:ok, Map.t()} | {:reject, term()} | {:error, term()} + defp fetch_and_contain_remote_ap_doc(id, is_ap_id, verify_id) do + Logger.debug("Dereferencing AP doc #{}") + with {:valid_uri_scheme, true} <- {:valid_uri_scheme, String.starts_with?(id, "http")}, %URI{} = uri <- URI.parse(id), {:mrf_reject_check, {:ok, nil}} <- @@ -277,7 +299,7 @@ def fetch_and_contain_remote_object_from_id(id, is_ap_id) when is_binary(id) do true <- !is_ap_id || final_id == id, {:ok, data} <- safe_json_decode(body), {_, :ok} <- {:containment, Containment.contain_origin(final_id, data)}, - {_, _, :ok} <- {:strict_id, data["id"], Containment.contain_id_to_fetch(final_id, data)} do + {_, {:ok, _}} <- {:strict_id, verify_id.(final_id, data)} do unless Instances.reachable?(final_id) do Instances.set_reachable(final_id) end @@ -289,14 +311,12 @@ def fetch_and_contain_remote_object_from_id(id, is_ap_id) when is_binary(id) do # Similarly keys, either use a fragment ID and are a subobjects or a distinct ID # but for compatibility are still a subobject presenting their owning actors ID at the toplevel. # Refetching _once_ from the listed id, should yield a strict match afterwards. - {:strict_id, ap_id, _} = e -> - case is_ap_id do - false -> - fetch_and_contain_remote_object_from_id(ap_id, true) - - true -> - log_fetch_error(id, e) - {:error, :id_mismatch} + {:strict_id, {_error, ap_id}} = e -> + if !is_ap_id and is_binary(ap_id) do + fetch_and_contain_remote_ap_doc(ap_id, true, verify_id) + else + log_fetch_error(id, e) + {:error, :id_mismatch} end {:mrf_reject_check, _} = e -> @@ -327,9 +347,6 @@ def fetch_and_contain_remote_object_from_id(id, is_ap_id) when is_binary(id) do end end - def fetch_and_contain_remote_object_from_id(_id, _is_ap_id), - do: {:error, :invalid_id} - # HOPEFULLY TEMPORARY # Basically none of our Tesla mocks in tests set the (supposed to # exist for Tesla proper) url parameter for their responses From 70fe99d1967723edce96e8b842d88eb770c8628a Mon Sep 17 00:00:00 2001 From: Oneric Date: Sat, 11 Jan 2025 21:25:00 +0100 Subject: [PATCH 11/26] Prevent key-actor mapping poisoning and key take overs Previously there were mainly two attack vectors: - for raw keys the owner <-> key mapping wasn't verified at all - keys were retrieved with refetching allowed and only the top-level ID was sanitised while usually keys are but a subobject This reintroduces public key checks in the user actor, previously removed in 9728e2f8f71e3a33f9f6ae60da79145c09b08e06 but now adapted to account for the new mapping mechanism. --- lib/pleroma/object/containment.ex | 12 +++++ lib/pleroma/object/fetcher.ex | 36 +++++++++++++ lib/pleroma/user/signing_key.ex | 2 +- .../object_validators/user_validator.ex | 18 ++++++- ..._keyid_from_admin@mastdon.example.org.json | 54 +++++++++++++++++++ test/pleroma/user_test.exs | 30 +++++++++++ .../activity_pub_controller_test.exs | 20 +++---- test/support/http_request_mock.ex | 15 +++++- 8 files changed, 174 insertions(+), 13 deletions(-) create mode 100644 test/fixtures/tesla_mock/evil@remote.example_with_keyid_from_admin@mastdon.example.org.json diff --git a/lib/pleroma/object/containment.ex b/lib/pleroma/object/containment.ex index 7b1cc37bd..b0944e782 100644 --- a/lib/pleroma/object/containment.ex +++ b/lib/pleroma/object/containment.ex @@ -157,4 +157,16 @@ def same_origin(id1, id2) do compare_uris(uri1, uri2) end + + @doc """ + Checks whether a key_id - owner_id pair are acceptable. + + While in theory keys and actors on different domain could be verified + by fetching both and checking the links on both ends (if different at all), + this requires extra fetches and there are no known implementations with split + actor and key domains, thus atm this simply requires same domain. + """ + def contain_key_user(key_id, user_id) do + same_origin(key_id, user_id) + end end diff --git a/lib/pleroma/object/fetcher.ex b/lib/pleroma/object/fetcher.ex index 0f129f318..c76974adc 100644 --- a/lib/pleroma/object/fetcher.ex +++ b/lib/pleroma/object/fetcher.ex @@ -275,6 +275,42 @@ def fetch_and_contain_remote_object_from_id(id, is_ap_id) when is_binary(id) do def fetch_and_contain_remote_object_from_id(_id, _is_ap_id), do: {:error, :invalid_id} + def fetch_and_contain_remote_key(id) do + Logger.debug("Fetching remote actor key #{id}") + + fetch_and_contain_remote_ap_doc( + id, + # key IDs are alwas AP IDs which should resolve directly and exactly + true, + fn + final_uri, %{"id" => user_id, "publicKey" => %{"id" => key_id}} -> + # For non-fragment keys as used e.g. by GTS, the "id" won't match the fetch URI, + # but the key ID will. Thus do NOT strict check the top-lelve id, but byte-exact + # check the key ID (since for later lookups we need byte-exact matches). + # This relies on fetching already enforcing a domain match for toplevel id and host + with :ok <- Containment.contain_key_user(key_id, user_id), + true <- key_id == final_uri do + {:ok, key_id} + else + _ -> {:error, key_id} + end + + final_uri, + %{"type" => "CryptographicKey", "id" => key_id, "owner" => user_id, "publicKeyPem" => _} -> + # XXX: refactor into separate function isntead of duplicating + with :ok <- Containment.contain_key_user(key_id, user_id), + true <- key_id == final_uri do + {:ok, key_id} + else + _ -> {:error, nil} + end + + _, _ -> + {:error, nil} + end + ) + end + # Fetches an AP document and performing variable security checks on it. # # Note that the received documents "id" matching the final host domain diff --git a/lib/pleroma/user/signing_key.ex b/lib/pleroma/user/signing_key.ex index 87149aa58..04cdced59 100644 --- a/lib/pleroma/user/signing_key.ex +++ b/lib/pleroma/user/signing_key.ex @@ -196,7 +196,7 @@ def fetch_remote_key(key_id) do Logger.debug("Fetching remote key: #{key_id}") with {:ok, _body} = resp <- - Pleroma.Object.Fetcher.fetch_and_contain_remote_object_from_id(key_id), + Pleroma.Object.Fetcher.fetch_and_contain_remote_key(key_id), {:ok, ap_id, public_key_pem} <- handle_signature_response(resp) do Logger.debug("Fetched remote key: #{ap_id}") # fetch the user diff --git a/lib/pleroma/web/activity_pub/object_validators/user_validator.ex b/lib/pleroma/web/activity_pub/object_validators/user_validator.ex index b80068e37..f75e2f87e 100644 --- a/lib/pleroma/web/activity_pub/object_validators/user_validator.ex +++ b/lib/pleroma/web/activity_pub/object_validators/user_validator.ex @@ -22,7 +22,8 @@ def validate(object, meta) def validate(%{"type" => type, "id" => _id} = data, meta) when type in Pleroma.Constants.actor_types() do - with :ok <- validate_inbox(data), + with :ok <- validate_pubkey(data), + :ok <- validate_inbox(data), :ok <- contain_collection_origin(data) do {:ok, data, meta} else @@ -33,6 +34,21 @@ def validate(%{"type" => type, "id" => _id} = data, meta) def validate(_, _), do: {:error, "Not a user object"} + defp validate_pubkey(%{"id" => user_id, "publicKey" => %{"id" => pk_id, "publicKeyPem" => _key}}) do + with {_, true} <- {:user, is_binary(user_id)}, + {_, true} <- {:key, is_binary(pk_id)}, + :ok <- Containment.contain_key_user(pk_id, user_id) do + :ok + else + {:user, _} -> {:error, "Invalid user id: #{inspect(user_id)}"} + {:key, _} -> {:error, "Invalid key id: #{inspect(pk_id)}"} + :error -> {:error, "Problematic actor-key pairing: #{user_id} - #{pk_id}"} + end + end + + # pubkey is optional atm + defp validate_pubkey(_data), do: :ok + defp validate_inbox(%{"id" => id, "inbox" => inbox}) do case Containment.same_origin(id, inbox) do :ok -> :ok diff --git a/test/fixtures/tesla_mock/evil@remote.example_with_keyid_from_admin@mastdon.example.org.json b/test/fixtures/tesla_mock/evil@remote.example_with_keyid_from_admin@mastdon.example.org.json new file mode 100644 index 000000000..f6c801096 --- /dev/null +++ b/test/fixtures/tesla_mock/evil@remote.example_with_keyid_from_admin@mastdon.example.org.json @@ -0,0 +1,54 @@ +{ + "@context": [ + "https://www.w3.org/ns/activitystreams", + "https://w3id.org/security/v1", + { + "manuallyApprovesFollowers": "as:manuallyApprovesFollowers", + "sensitive": "as:sensitive", + "movedTo": "as:movedTo", + "Hashtag": "as:Hashtag", + "ostatus": "http://ostatus.org#", + "atomUri": "ostatus:atomUri", + "inReplyToAtomUri": "ostatus:inReplyToAtomUri", + "conversation": "ostatus:conversation", + "toot": "http://joinmastodon.org/ns#", + "Emoji": "toot:Emoji", + "alsoKnownAs": { + "@id": "as:alsoKnownAs", + "@type": "@id" + } + } + ], + "id": "http://remote.example/users/with_key_id_of_admin-mastodon.example.org", + "type": "Person", + "following": "http://remote.example/users/evil/following", + "followers": "http://remote.example/users/evil/followers", + "inbox": "http://remote.example/users/evil/inbox", + "outbox": "http://remote.example/users/evil/outbox", + "preferredUsername": "evil", + "name": null, + "discoverable": "true", + "summary": "hii", + "url": "http://remote.example/@evil", + "manuallyApprovesFollowers": false, + "publicKey": { + "id": "http://mastodon.example.org/users/admin#main-key", + "owner": "http://remote.example/users/evil", + "publicKeyPem": "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtc4Tir+3ADhSNF6VKrtW\nOU32T01w7V0yshmQei38YyiVwVvFu8XOP6ACchkdxbJ+C9mZud8qWaRJKVbFTMUG\nNX4+6Q+FobyuKrwN7CEwhDALZtaN2IPbaPd6uG1B7QhWorrY+yFa8f2TBM3BxnUy\nI4T+bMIZIEYG7KtljCBoQXuTQmGtuffO0UwJksidg2ffCF5Q+K//JfQagJ3UzrR+\nZXbKMJdAw4bCVJYs4Z5EhHYBwQWiXCyMGTd7BGlmMkY6Av7ZqHKC/owp3/0EWDNz\nNqF09Wcpr3y3e8nA10X40MJqp/wR+1xtxp+YGbq/Cj5hZGBG7etFOmIpVBrDOhry\nBwIDAQAB\n-----END PUBLIC KEY-----\n" + }, + "attachment": [], + "endpoints": { + "sharedInbox": "http://remote.example/inbox" + }, + "icon": { + "type": "Image", + "mediaType": "image/jpeg", + "url": "https://cdn.niu.moe/accounts/avatars/000/033/323/original/fd7f8ae0b3ffedc9.jpeg" + }, + "image": { + "type": "Image", + "mediaType": "image/png", + "url": "https://cdn.niu.moe/accounts/headers/000/033/323/original/850b3448fa5fd477.png" + }, + "alsoKnownAs": [] +} diff --git a/test/pleroma/user_test.exs b/test/pleroma/user_test.exs index 0d5c9faec..b280a269c 100644 --- a/test/pleroma/user_test.exs +++ b/test/pleroma/user_test.exs @@ -10,6 +10,7 @@ defmodule Pleroma.UserTest do alias Pleroma.Repo alias Pleroma.Tests.ObanHelpers alias Pleroma.User + alias Pleroma.User.SigningKey alias Pleroma.Web.ActivityPub.ActivityPub alias Pleroma.Web.CommonAPI @@ -908,6 +909,35 @@ test "it doesn't fail on invalid alsoKnownAs entries" do assert {:ok, %User{also_known_as: []}} = User.get_or_fetch_by_ap_id("https://mbp.example.com/") end + + test "doesn't allow key_id poisoning" do + {:error, {:validate, {:error, "Problematic actor-key pairing:" <> _}}} = + User.fetch_by_ap_id( + "http://remote.example/users/with_key_id_of_admin-mastodon.example.org" + ) + + used_key_id = "http://mastodon.example.org/users/admin#main-key" + refute Repo.get_by(SigningKey, key_id: used_key_id) + + {:ok, user} = User.fetch_by_ap_id("http://mastodon.example.org/users/admin") + user = SigningKey.load_key(user) + + # ensure we checked for the right key before + assert user.signing_key.key_id == used_key_id + end + + test "doesn't allow key_id takeovers" do + {:ok, user} = User.fetch_by_ap_id("http://mastodon.example.org/users/admin") + user = SigningKey.load_key(user) + + {:error, {:validate, {:error, "Problematic actor-key pairing:" <> _}}} = + User.fetch_by_ap_id( + "http://remote.example/users/with_key_id_of_admin-mastodon.example.org" + ) + + refreshed_sk = Repo.get_by(SigningKey, key_id: user.signing_key.key_id) + assert refreshed_sk.user_id == user.id + end end test "returns an ap_id for a user" do diff --git a/test/pleroma/web/activity_pub/activity_pub_controller_test.exs b/test/pleroma/web/activity_pub/activity_pub_controller_test.exs index 7cf280a17..49bbb9d63 100644 --- a/test/pleroma/web/activity_pub/activity_pub_controller_test.exs +++ b/test/pleroma/web/activity_pub/activity_pub_controller_test.exs @@ -561,7 +561,7 @@ test "it inserts an incoming activity into the database", %{conn: conn} do |> assign(:valid_signature, true) |> put_req_header( "signature", - "keyId=\"http://mastodon.example.org/users/admin/main-key\"" + "keyId=\"http://mastodon.example.org/users/admin#main-key\"" ) |> put_req_header("content-type", "application/activity+json") |> post("/inbox", data) @@ -679,7 +679,7 @@ test "accepts Add/Remove activities", %{conn: conn} do |> String.replace("{{nickname}}", "lain") actor = "https://example.com/users/lain" - key_id = "#{actor}/main-key" + key_id = "#{actor}#main-key" insert(:user, ap_id: actor, @@ -741,7 +741,7 @@ test "accepts Add/Remove activities", %{conn: conn} do assert "ok" == conn |> assign(:valid_signature, true) - |> put_req_header("signature", "keyId=\"#{actor}/main-key\"") + |> put_req_header("signature", "keyId=\"#{actor}#main-key\"") |> put_req_header("content-type", "application/activity+json") |> post("/inbox", data) |> json_response(200) @@ -764,7 +764,7 @@ test "accepts Add/Remove activities", %{conn: conn} do assert "ok" == conn |> assign(:valid_signature, true) - |> put_req_header("signature", "keyId=\"#{actor}/main-key\"") + |> put_req_header("signature", "keyId=\"#{actor}#main-key\"") |> put_req_header("content-type", "application/activity+json") |> post("/inbox", data) |> json_response(200) @@ -790,7 +790,7 @@ test "mastodon pin/unpin", %{conn: conn} do |> String.replace("{{nickname}}", "lain") actor = "https://example.com/users/lain" - key_id = "#{actor}/main-key" + key_id = "#{actor}#main-key" sender = insert(:user, @@ -883,7 +883,7 @@ test "mastodon pin/unpin", %{conn: conn} do assert "ok" == conn |> assign(:valid_signature, true) - |> put_req_header("signature", "keyId=\"#{actor}/main-key\"") + |> put_req_header("signature", "keyId=\"#{actor}#main-key\"") |> put_req_header("content-type", "application/activity+json") |> post("/inbox", data) |> json_response(200) @@ -915,7 +915,7 @@ test "it inserts an incoming activity into the database", %{conn: conn, data: da conn = conn |> assign(:valid_signature, true) - |> put_req_header("signature", "keyId=\"#{data["actor"]}/main-key\"") + |> put_req_header("signature", "keyId=\"#{data["actor"]}#main-key\"") |> put_req_header("content-type", "application/activity+json") |> post("/users/#{user.nickname}/inbox", data) @@ -939,7 +939,7 @@ test "it accepts messages with to as string instead of array", %{conn: conn, dat conn = conn |> assign(:valid_signature, true) - |> put_req_header("signature", "keyId=\"#{data["actor"]}/main-key\"") + |> put_req_header("signature", "keyId=\"#{data["actor"]}#main-key\"") |> put_req_header("content-type", "application/activity+json") |> post("/users/#{user.nickname}/inbox", data) @@ -961,7 +961,7 @@ test "it accepts messages with cc as string instead of array", %{conn: conn, dat conn = conn |> assign(:valid_signature, true) - |> put_req_header("signature", "keyId=\"#{data["actor"]}/main-key\"") + |> put_req_header("signature", "keyId=\"#{data["actor"]}#main-key\"") |> put_req_header("content-type", "application/activity+json") |> post("/users/#{user.nickname}/inbox", data) @@ -988,7 +988,7 @@ test "it accepts messages with bcc as string instead of array", %{conn: conn, da conn = conn |> assign(:valid_signature, true) - |> put_req_header("signature", "keyId=\"#{data["actor"]}/main-key\"") + |> put_req_header("signature", "keyId=\"#{data["actor"]}#main-key\"") |> put_req_header("content-type", "application/activity+json") |> post("/users/#{user.nickname}/inbox", data) diff --git a/test/support/http_request_mock.ex b/test/support/http_request_mock.ex index d14434333..4d2ad559d 100644 --- a/test/support/http_request_mock.ex +++ b/test/support/http_request_mock.ex @@ -424,7 +424,7 @@ def get("http://mastodon.example.org/users/admin", _, _, _) do }} end - def get("http://mastodon.example.org/users/admin/main-key", _, _, _) do + def get("http://mastodon.example.org/users/admin#main-key", _, _, _) do {:ok, %Tesla.Env{ status: 200, @@ -433,6 +433,19 @@ def get("http://mastodon.example.org/users/admin/main-key", _, _, _) do }} end + def get("http://remote.example/users/with_key_id_of_admin-mastodon.example.org" = url, _, _, _) do + {:ok, + %Tesla.Env{ + status: 200, + url: url, + body: + File.read!( + "test/fixtures/tesla_mock/evil@remote.example_with_keyid_from_admin@mastdon.example.org.json" + ), + headers: activitypub_object_headers() + }} + end + def get( "http://mastodon.example.org/users/admin/statuses/99512778738411822/replies?min_id=99512778738411824&page=true", _, From cc5c1bb10c99fbd90ca396caa01115be973fb27d Mon Sep 17 00:00:00 2001 From: Oneric Date: Sat, 11 Jan 2025 21:25:00 +0100 Subject: [PATCH 12/26] signing_key: cleanup code In particular this avoids an unecessary roundtrip over user_id when searching a key via its primary key_id --- lib/pleroma/user/signing_key.ex | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/lib/pleroma/user/signing_key.ex b/lib/pleroma/user/signing_key.ex index 04cdced59..69e13355a 100644 --- a/lib/pleroma/user/signing_key.ex +++ b/lib/pleroma/user/signing_key.ex @@ -174,12 +174,12 @@ def private_key(%User{} = user) do Will either return the key if it exists locally, or fetch it from the remote instance. """ def get_or_fetch_by_key_id(key_id) do - case key_id_to_user_id(key_id) do + case Repo.get_by(__MODULE__, key_id: key_id) do nil -> fetch_remote_key(key_id) - user_id -> - {:ok, Repo.get_by(__MODULE__, user_id: user_id)} + key -> + {:ok, key} end end @@ -195,12 +195,11 @@ def get_or_fetch_by_key_id(key_id) do def fetch_remote_key(key_id) do Logger.debug("Fetching remote key: #{key_id}") - with {:ok, _body} = resp <- + with {:ok, resp_body} <- Pleroma.Object.Fetcher.fetch_and_contain_remote_key(key_id), - {:ok, ap_id, public_key_pem} <- handle_signature_response(resp) do + {:ok, ap_id, public_key_pem} <- handle_signature_response(resp_body), + {:ok, user} <- User.get_or_fetch_by_ap_id(ap_id) do Logger.debug("Fetched remote key: #{ap_id}") - # fetch the user - {:ok, user} = User.get_or_fetch_by_ap_id(ap_id) # store the key key = %__MODULE__{ user_id: user.id, @@ -227,7 +226,7 @@ defp extract_key_details(%{"id" => ap_id, "publicKey" => public_key}) do end end - defp handle_signature_response({:ok, body}) do + defp handle_signature_response(body) do case body do %{ "type" => "CryptographicKey", @@ -247,9 +246,9 @@ defp handle_signature_response({:ok, body}) do %{"error" => error} -> {:error, error} + + other -> + {:error, "Could not process key: #{inspect(other)}"} end end - - defp handle_signature_response({:error, e}), do: {:error, e} - defp handle_signature_response(other), do: {:error, "Could not fetch key: #{inspect(other)}"} end From 3460f417769487cf25e2a022666882a9d595ef0d Mon Sep 17 00:00:00 2001 From: Oneric Date: Sat, 11 Jan 2025 21:25:00 +0100 Subject: [PATCH 13/26] Fix user updates User updates broke with the migration to separate signing keys since user data carries signing keys but we didn't allow the association data to be updated. --- lib/pleroma/user.ex | 4 ++- test/pleroma/user_test.exs | 50 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 1 deletion(-) diff --git a/lib/pleroma/user.ex b/lib/pleroma/user.ex index 697597e1b..5a54ab842 100644 --- a/lib/pleroma/user.ex +++ b/lib/pleroma/user.ex @@ -222,7 +222,9 @@ defmodule Pleroma.User do # FOR THE FUTURE: We might want to make this a one-to-many relationship # it's entirely possible right now, but we don't have a use case for it - has_one(:signing_key, SigningKey, foreign_key: :user_id) + # XXX: in the future we’d also like to parse and honour key expiration times + # instead of blindly accepting any change in signing keys + has_one(:signing_key, SigningKey, foreign_key: :user_id, on_replace: :update) timestamps() end diff --git a/test/pleroma/user_test.exs b/test/pleroma/user_test.exs index b280a269c..39f3ee020 100644 --- a/test/pleroma/user_test.exs +++ b/test/pleroma/user_test.exs @@ -2232,6 +2232,56 @@ test "removes report notifs when user isn't superuser any more" do # is not a superuser any more assert [] = Notification.for_user(user) end + + test "updates public key pem" do + # note: in the future key updates might be limited to announced expirations + user_org = + insert(:user, local: false) + |> with_signing_key() + + old_pub = user_org.signing_key.public_key + new_pub = "BEGIN RSA public key" + refute old_pub == new_pub + + {:ok, %User{} = user} = + User.update_and_set_cache(user_org, %{signing_key: %{public_key: new_pub}}) + + user = SigningKey.load_key(user) + + assert user.signing_key.public_key == new_pub + end + + test "updates public key id if valid" do + # note: in the future key updates might be limited to announced expirations + user_org = + insert(:user, local: false) + |> with_signing_key() + + old_kid = user_org.signing_key.key_id + new_kid = old_kid <> "_v2" + + {:ok, %User{} = user} = + User.update_and_set_cache(user_org, %{signing_key: %{key_id: new_kid}}) + + user = SigningKey.load_key(user) + + assert user.signing_key.key_id == new_kid + refute Repo.get_by(SigningKey, key_id: old_kid) + end + + test "refuses to sever existing key-user mappings" do + user1 = + insert(:user, local: false) + |> with_signing_key() + + user2 = + insert(:user, local: false) + |> with_signing_key() + + assert_raise Ecto.ConstraintError, fn -> + User.update_and_set_cache(user2, %{signing_key: %{key_id: user1.signing_key.key_id}}) + end + end end describe "following/followers synchronization" do From 2a4587f201b06a0a0a2dacaacd77ba3b82955902 Mon Sep 17 00:00:00 2001 From: Oneric Date: Sat, 11 Jan 2025 21:25:00 +0100 Subject: [PATCH 14/26] Fix SigningKey db schema --- ...20250112000000_signing_key_nullability.exs | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 priv/repo/migrations/20250112000000_signing_key_nullability.exs diff --git a/priv/repo/migrations/20250112000000_signing_key_nullability.exs b/priv/repo/migrations/20250112000000_signing_key_nullability.exs new file mode 100644 index 000000000..02862a5c0 --- /dev/null +++ b/priv/repo/migrations/20250112000000_signing_key_nullability.exs @@ -0,0 +1,24 @@ +defmodule Pleroma.Repo.Migrations.SigningKeyNullability do + use Ecto.Migration + + import Ecto.Query + + def up() do + # Delete existing NULL entries; they are useless + Pleroma.User.SigningKey + |> where([s], is_nil(s.user_id) or is_nil(s.public_key)) + |> Pleroma.Repo.delete_all() + + alter table(:signing_keys) do + modify :user_id, :uuid, null: false + modify :public_key, :text, null: false + end + end + + def down() do + alter table(:signing_keys) do + modify :user_id, :uuid, null: true + modify :public_key, :text, null: true + end + end +end From ea2de1f28a1c783c553289637e6d54b6009a501e Mon Sep 17 00:00:00 2001 From: Oneric Date: Sat, 11 Jan 2025 21:25:00 +0100 Subject: [PATCH 15/26] signing_key: ensure only one key per user exists Fixes: AkkomaGang/akkoma issue 858 --- lib/pleroma/user/signing_key.ex | 13 +++++-- ...50112000001_signing_key_unique_user_id.exs | 36 +++++++++++++++++++ 2 files changed, 47 insertions(+), 2 deletions(-) create mode 100644 priv/repo/migrations/20250112000001_signing_key_unique_user_id.exs diff --git a/lib/pleroma/user/signing_key.ex b/lib/pleroma/user/signing_key.ex index 69e13355a..705302013 100644 --- a/lib/pleroma/user/signing_key.ex +++ b/lib/pleroma/user/signing_key.ex @@ -201,13 +201,22 @@ def fetch_remote_key(key_id) do {:ok, user} <- User.get_or_fetch_by_ap_id(ap_id) do Logger.debug("Fetched remote key: #{ap_id}") # store the key - key = %__MODULE__{ + key = %{ user_id: user.id, public_key: public_key_pem, key_id: key_id } - Repo.insert(key, on_conflict: :replace_all, conflict_target: :key_id) + key_cs = + cast(%__MODULE__{}, key, [:user_id, :public_key, :key_id]) + |> unique_constraint(:user_id) + + Repo.insert(key_cs, + # while this should never run for local users anyway, etc make sure we really never loose privkey info! + on_conflict: {:replace_all_except, [:inserted_at, :private_key]}, + # if the key owner overlaps with a distinct existing key entry, this intetionally still errros + conflict_target: :key_id + ) else e -> Logger.debug("Failed to fetch remote key: #{inspect(e)}") diff --git a/priv/repo/migrations/20250112000001_signing_key_unique_user_id.exs b/priv/repo/migrations/20250112000001_signing_key_unique_user_id.exs new file mode 100644 index 000000000..00a716db8 --- /dev/null +++ b/priv/repo/migrations/20250112000001_signing_key_unique_user_id.exs @@ -0,0 +1,36 @@ +defmodule Pleroma.Repo.Migrations.SigningKeyUniqueUserId do + use Ecto.Migration + + import Ecto.Query + + def up() do + # If dupes exists for any local user we do NOT want to delete the genuine privkey alongside the fake. + # Instead just filter out anything pertaining to local users, if dupes exists manual intervention + # is required anyway and index creation will just fail later (check against legacy field in users table) + dupes = + Pleroma.User.SigningKey + |> join(:inner, [s], u in Pleroma.User, on: s.user_id == u.id) + |> group_by([s], s.user_id) + |> having([], count() > 1) + |> having([_s, u], not fragment("bool_or(?)", u.local)) + |> select([s], s.user_id) + + # Delete existing remote duplicates + # they’ll be reinserted on the next user update + # or proactively fetched when receiving a signature from it + Pleroma.User.SigningKey + |> where([s], s.user_id in subquery(dupes)) + |> Pleroma.Repo.delete_all() + + drop_if_exists(index(:signing_keys, [:user_id])) + + create_if_not_exists( + index(:signing_keys, [:user_id], name: :signing_keys_user_id_index, unique: true) + ) + end + + def down() do + drop_if_exists(index(:signing_keys, [:user_id])) + create_if_not_exists(index(:signing_keys, [:user_id], name: :signing_keys_user_id_index)) + end +end From 898b98e5ddb7e8acaeac93ecb1ca0948a8c7a923 Mon Sep 17 00:00:00 2001 From: Oneric Date: Sat, 11 Jan 2025 21:25:00 +0100 Subject: [PATCH 16/26] db: drop legacy key fields in users table --- lib/pleroma/user.ex | 1 - .../20240625220752_move_signing_keys.exs | 2 +- ...112000002_users_drop_legacy_key_fields.exs | 31 +++++++++++++++++++ test/pleroma/user_test.exs | 1 - 4 files changed, 32 insertions(+), 3 deletions(-) create mode 100644 priv/repo/migrations/20250112000002_users_drop_legacy_key_fields.exs diff --git a/lib/pleroma/user.ex b/lib/pleroma/user.ex index 5a54ab842..666464cab 100644 --- a/lib/pleroma/user.ex +++ b/lib/pleroma/user.ex @@ -100,7 +100,6 @@ defmodule Pleroma.User do field(:password_hash, :string) field(:password, :string, virtual: true) field(:password_confirmation, :string, virtual: true) - field(:keys, :string) field(:ap_id, :string) field(:avatar, :map, default: %{}) field(:local, :boolean, default: true) diff --git a/priv/repo/migrations/20240625220752_move_signing_keys.exs b/priv/repo/migrations/20240625220752_move_signing_keys.exs index f5569ce09..743b2585a 100644 --- a/priv/repo/migrations/20240625220752_move_signing_keys.exs +++ b/priv/repo/migrations/20240625220752_move_signing_keys.exs @@ -11,7 +11,7 @@ def up do # Also this MUST use select, else the migration will fail in future installs with new user fields! from(u in Pleroma.User, where: u.local == true, - select: {u.id, u.keys, u.ap_id} + select: {u.id, fragment("?.keys", u), u.ap_id} ) |> Repo.stream(timeout: :infinity) |> Enum.each(fn diff --git a/priv/repo/migrations/20250112000002_users_drop_legacy_key_fields.exs b/priv/repo/migrations/20250112000002_users_drop_legacy_key_fields.exs new file mode 100644 index 000000000..c65817369 --- /dev/null +++ b/priv/repo/migrations/20250112000002_users_drop_legacy_key_fields.exs @@ -0,0 +1,31 @@ +defmodule Pleroma.Repo.Migrations.UsersDropLegacyKeyFields do + use Ecto.Migration + + def up() do + alter table(:users) do + remove :keys, :text + remove :public_key, :text + end + end + + def down() do + # Using raw query since the "keys" field may not exist in the Elixir Ecto schema + # causing issues when migrating data back and this requires column adds to be raw query too + """ + ALTER TABLE public.users + ADD COLUMN keys text, + ADD COLUMN public_key text; + """ + |> Pleroma.Repo.query!([], timeout: :infinity) + + """ + UPDATE public.users AS u + SET keys = s.private_key + FROM public.signing_keys AS s + WHERE s.user_id = u.id AND + u.local AND + s.private_key IS NOT NULL; + """ + |> Pleroma.Repo.query!([], timeout: :infinity) + end +end diff --git a/test/pleroma/user_test.exs b/test/pleroma/user_test.exs index 39f3ee020..d69551985 100644 --- a/test/pleroma/user_test.exs +++ b/test/pleroma/user_test.exs @@ -1708,7 +1708,6 @@ test "delete/1 purges a user when they wouldn't be fully deleted" do bio: "eyy lmao", name: "qqqqqqq", password_hash: "pdfk2$1b3n159001", - keys: "RSA begin buplic key", avatar: %{"a" => "b"}, tags: ["qqqqq"], banner: %{"a" => "b"}, From 8a0d1309768a7b47215b3bb32b12fee4fb7f1699 Mon Sep 17 00:00:00 2001 From: Oneric Date: Sat, 11 Jan 2025 21:25:00 +0100 Subject: [PATCH 17/26] Add tests for SigninKey module --- test/pleroma/user/signing_key_tests.ex | 305 +++++++++++++++++++++++++ 1 file changed, 305 insertions(+) create mode 100644 test/pleroma/user/signing_key_tests.ex diff --git a/test/pleroma/user/signing_key_tests.ex b/test/pleroma/user/signing_key_tests.ex new file mode 100644 index 000000000..f4ea245d9 --- /dev/null +++ b/test/pleroma/user/signing_key_tests.ex @@ -0,0 +1,305 @@ +# Akkoma: Magically expressive social media +# Copyright © 2024 Akkoma Authors +# SPDX-License-Identifier: AGPL-3.0-only + +defmodule Pleroma.User.SigningKeyTests do + alias Pleroma.User + alias Pleroma.User.SigningKey + alias Pleroma.Repo + + use Pleroma.DataCase + use Oban.Testing, repo: Pleroma.Repo + + import Pleroma.Factory + + defp maybe_put(map, _, nil), do: map + defp maybe_put(map, key, val), do: Kernel.put_in(map, key, val) + + defp get_body_actor(key_id \\ nil, user_id \\ nil, owner_id \\ nil) do + owner_id = owner_id || user_id + + File.read!("test/fixtures/tesla_mock/admin@mastdon.example.org.json") + |> Jason.decode!() + |> maybe_put(["id"], user_id) + |> maybe_put(["publicKey", "id"], key_id) + |> maybe_put(["publicKey", "owner"], owner_id) + |> Jason.encode!() + end + + defp get_body_rawkey(key_id, owner, pem \\ "RSA begin buplic key") do + %{ + "type" => "CryptographicKey", + "id" => key_id, + "owner" => owner, + "publicKeyPem" => pem + } + |> Jason.encode!() + end + + defmacro mock_tesla( + url, + get_body, + status \\ 200, + headers \\ [] + ) do + quote do + Tesla.Mock.mock(fn + %{method: :get, url: unquote(url)} -> + %Tesla.Env{ + status: unquote(status), + body: unquote(get_body), + url: unquote(url), + headers: [ + {"content-type", + "application/ld+json; profile=\"https://www.w3.org/ns/activitystreams\""} + | unquote(headers) + ] + } + end) + end + end + + describe "succesfully" do + test "inserts key and new user on fetch" do + ap_id_actor = "https://mastodon.example.org/signing-key-test/actor" + ap_id_key = ap_id_actor <> "#main-key" + ap_doc = get_body_actor(ap_id_key, ap_id_actor) + mock_tesla(ap_id_actor, ap_doc) + + {:ok, %SigningKey{} = key} = SigningKey.fetch_remote_key(ap_id_key) + user = User.get_by_id(key.user_id) + + assert match?(%User{}, user) + user = SigningKey.load_key(user) + + assert user.ap_id == ap_id_actor + assert user.signing_key.key_id == ap_id_key + assert user.signing_key.key_id == key.key_id + assert user.signing_key.private_key == nil + end + + test "updates existing key" do + user = + insert(:user, local: false, domain: "mastodon.example.org") + |> with_signing_key() + + ap_id_actor = user.ap_id + ap_doc = get_body_actor(user.signing_key.key_id, ap_id_actor) + mock_tesla(ap_id_actor, ap_doc) + + old_pem = user.signing_key.public_key + old_priv = user.signing_key.private_key + + # note: the returned value does not fully match the value stored in the database + # since inserted_at isn't changed on upserts + {:ok, %SigningKey{} = key} = SigningKey.fetch_remote_key(user.signing_key.key_id) + + refreshed_key = Repo.get_by(SigningKey, key_id: key.key_id) + assert match?(%SigningKey{}, refreshed_key) + refute refreshed_key.public_key == old_pem + assert refreshed_key.private_key == old_priv + assert refreshed_key.user_id == user.id + assert key.public_key == refreshed_key.public_key + end + + test "finds known key by key_id" do + sk = insert(:signing_key, key_id: "https://remote.example/signing-key-test/some-kown-key") + {:ok, key} = SigningKey.get_or_fetch_by_key_id(sk.key_id) + assert sk == key + end + + test "finds key for remote user" do + user_with_preload = + insert(:user, local: false) + |> with_signing_key() + + user = User.get_by_id(user_with_preload.id) + assert !match?(%SigningKey{}, user.signing_key) + + user = SigningKey.load_key(user) + assert match?(%SigningKey{}, user.signing_key) + + # the initial "with_signing_key" doesn't set timestamps, and meta differs (loaded vs built) + # thus clear affected fields before comparison + found_sk = %{user.signing_key | inserted_at: nil, updated_at: nil, __meta__: nil} + ref_sk = %{user_with_preload.signing_key | __meta__: nil} + assert found_sk == ref_sk + end + + test "finds remote user id by key id" do + user = + insert(:user, local: false) + |> with_signing_key() + + uid = SigningKey.key_id_to_user_id(user.signing_key.key_id) + assert uid == user.id + end + + test "finds remote user ap id by key id" do + user = + insert(:user, local: false) + |> with_signing_key() + + uapid = SigningKey.key_id_to_ap_id(user.signing_key.key_id) + assert uapid == user.ap_id + end + end + + test "won't fetch keys for local users" do + user = + insert(:user, local: true) + |> with_signing_key() + + {:error, _} = SigningKey.fetch_remote_key(user.signing_key.key_id) + end + + test "fails insert with overlapping key owner" do + user = + insert(:user, local: false) + |> with_signing_key() + + second_key_id = + user.signing_key.key_id + |> URI.parse() + |> Map.put(:fragment, nil) + |> Map.put(:query, nil) + |> URI.to_string() + |> then(fn id -> id <> "/second_key" end) + + ap_doc = get_body_rawkey(second_key_id, user.ap_id) + mock_tesla(second_key_id, ap_doc) + + res = SigningKey.fetch_remote_key(second_key_id) + + assert match?({:error, %{errors: _}}, res) + {:error, cs} = res + assert Keyword.has_key?(cs.errors, :user_id) + end + + test "Fetched raw SigningKeys cannot take over arbitrary users" do + # in theory cross-domain key and actor are fine, IF and ONLY IF + # the actor also links back to this key, but this isn’t supported atm anyway + user = + insert(:user, local: false) + |> with_signing_key() + + remote_key_id = "https://remote.example/keys/for_local" + keydoc = get_body_rawkey(remote_key_id, user.ap_id) + mock_tesla(remote_key_id, keydoc) + + {:error, _} = SigningKey.fetch_remote_key(remote_key_id) + + refreshed_org_key = Repo.get_by(SigningKey, key_id: user.signing_key.key_id) + refreshed_user_key = Repo.get_by(SigningKey, user_id: user.id) + assert match?(%SigningKey{}, refreshed_org_key) + assert match?(%SigningKey{}, refreshed_user_key) + + actor_host = URI.parse(user.ap_id).host + org_key_host = URI.parse(refreshed_org_key.key_id).host + usr_key_host = URI.parse(refreshed_user_key.key_id).host + assert actor_host == org_key_host + assert actor_host == usr_key_host + refute usr_key_host == "remote.example" + + assert refreshed_user_key == refreshed_org_key + assert user.signing_key.key_id == refreshed_org_key.key_id + end + + test "Fetched non-raw SigningKey cannot take over arbitrary users" do + # this actually comes free with our fetch ID checks, but lets verify it here too for good measure + user = + insert(:user, local: false) + |> with_signing_key() + + remote_key_id = "https://remote.example/keys#for_local" + keydoc = get_body_actor(remote_key_id, user.ap_id, user.ap_id) + mock_tesla(remote_key_id, keydoc) + + {:error, _} = SigningKey.fetch_remote_key(remote_key_id) + + refreshed_org_key = Repo.get_by(SigningKey, key_id: user.signing_key.key_id) + refreshed_user_key = Repo.get_by(SigningKey, user_id: user.id) + assert match?(%SigningKey{}, refreshed_org_key) + assert match?(%SigningKey{}, refreshed_user_key) + + actor_host = URI.parse(user.ap_id).host + org_key_host = URI.parse(refreshed_org_key.key_id).host + usr_key_host = URI.parse(refreshed_user_key.key_id).host + assert actor_host == org_key_host + assert actor_host == usr_key_host + refute usr_key_host == "remote.example" + + assert refreshed_user_key == refreshed_org_key + assert user.signing_key.key_id == refreshed_org_key.key_id + end + + test "remote users sharing signing key ID don't break our database" do + # in principle a valid setup using this can be cosntructed, + # but so far not observed in practice and our db scheme cannot handle it. + # Thus make sure it doesn't break our db anything but gets rejected + key_id = "https://mastodon.example.org/the_one_key" + + user1 = + insert(:user, local: false, domain: "mastodon.example.org") + |> with_signing_key(%{key_id: key_id}) + + key_owner = "https://mastodon.example.org/#" + + user2_ap_id = user1.ap_id <> "22" + user2_doc = get_body_actor(user1.signing_key.key_id, user2_ap_id, key_owner) + + user3_ap_id = user1.ap_id <> "333" + user3_doc = get_body_actor(user1.signing_key.key_id, user2_ap_id) + + standalone_key_doc = + get_body_rawkey(key_id, "https://mastodon.example.org/#", user1.signing_key.public_key) + + ap_headers = [ + {"content-type", "application/ld+json; profile=\"https://www.w3.org/ns/activitystreams\""} + ] + + Tesla.Mock.mock(fn + %{method: :get, url: ^key_id} -> + %Tesla.Env{ + status: 200, + body: standalone_key_doc, + url: key_id, + headers: ap_headers + } + + %{method: :get, url: ^user2_ap_id} -> + %Tesla.Env{ + status: 200, + body: user2_doc, + url: user2_ap_id, + headers: ap_headers + } + + %{method: :get, url: ^user3_ap_id} -> + %Tesla.Env{ + status: 200, + body: user3_doc, + url: user3_ap_id, + headers: ap_headers + } + end) + + {:error, _} = SigningKey.fetch_remote_key(key_id) + + {:ok, user2} = User.get_or_fetch_by_ap_id(user2_ap_id) + {:ok, user3} = User.get_or_fetch_by_ap_id(user3_ap_id) + + {:ok, db_key} = SigningKey.get_or_fetch_by_key_id(key_id) + + keys = + from(s in SigningKey, where: s.key_id == ^key_id) + |> Repo.all() + + assert match?([%SigningKey{}], keys) + assert [db_key] == keys + assert db_key.user_id == user1.id + assert match?({:ok, _}, SigningKey.public_key(user1)) + assert {:error, "key not found"} == SigningKey.public_key(user2) + assert {:error, "key not found"} == SigningKey.public_key(user3) + end +end From ee61ce61a77421eedc19c1bcde07a7cf001331c5 Mon Sep 17 00:00:00 2001 From: Oneric Date: Sat, 11 Jan 2025 21:25:00 +0100 Subject: [PATCH 18/26] changelog: summarise preceeding changes --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6172134e2..43fff4dda 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ## Added ## Fixed +- fixed some holes in SigningKey verification potentially allowing they key-user mapping to be poisoned +- frontend ZIP files can no longer traverse to paths outside their install dir +- fixed user updates trying but failing to renew signing key information ## Changed - Dropped obsolete `ap_enabled` indicator from user table and associated buggy logic From bc79bd0edf27e01675a5b392e6da5352bcd9ce7c Mon Sep 17 00:00:00 2001 From: Oneric Date: Sat, 11 Jan 2025 21:25:00 +0100 Subject: [PATCH 19/26] cosmetic/test/user: replace deprecated clear_config syntax --- test/pleroma/user_test.exs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/test/pleroma/user_test.exs b/test/pleroma/user_test.exs index d69551985..f68bf29bc 100644 --- a/test/pleroma/user_test.exs +++ b/test/pleroma/user_test.exs @@ -461,12 +461,7 @@ test "it sends a welcome message if it is set" do } ) - setup do: - clear_config(:mrf, - policies: [ - Pleroma.Web.ActivityPub.MRF.SimplePolicy - ] - ) + setup do: clear_config([:mrf, :policies], [Pleroma.Web.ActivityPub.MRF.SimplePolicy]) test "it sends a welcome email message if it is set" do welcome_user = insert(:user) From 51642a90c523379ae4982dbe0e5e99bff1019ff1 Mon Sep 17 00:00:00 2001 From: Oneric Date: Sun, 12 Jan 2025 23:43:03 +0100 Subject: [PATCH 20/26] signature: drop unecessary round trip over user We already got the key. --- lib/pleroma/signature.ex | 27 +++++--------------- lib/pleroma/user.ex | 11 -------- lib/pleroma/user/signing_key.ex | 24 +++++++---------- lib/pleroma/web/plugs/http_signature_plug.ex | 10 +++++--- test/pleroma/signature_test.exs | 10 -------- test/pleroma/user_test.exs | 4 --- 6 files changed, 22 insertions(+), 64 deletions(-) diff --git a/lib/pleroma/signature.ex b/lib/pleroma/signature.ex index bc3baf433..86065a603 100644 --- a/lib/pleroma/signature.ex +++ b/lib/pleroma/signature.ex @@ -9,24 +9,11 @@ defmodule Pleroma.Signature do alias Pleroma.User.SigningKey require Logger - def key_id_to_actor_id(key_id) do - case SigningKey.key_id_to_ap_id(key_id) do - nil -> - # hm, we SHOULD have gotten this in the pipeline before we hit here! - Logger.error("Could not figure out who owns the key #{key_id}") - {:error, :key_owner_not_found} - - key -> - {:ok, key} - end - end - def fetch_public_key(conn) do with %{"keyId" => kid} <- HTTPSignatures.signature_for_conn(conn), - {:ok, %SigningKey{}} <- SigningKey.get_or_fetch_by_key_id(kid), - {:ok, actor_id} <- key_id_to_actor_id(kid), - {:ok, public_key} <- User.get_public_key_for_ap_id(actor_id) do - {:ok, public_key} + {:ok, %SigningKey{} = sk} <- SigningKey.get_or_fetch_by_key_id(kid), + {:ok, decoded_key} <- SigningKey.public_key_decoded(sk) do + {:ok, decoded_key} else e -> {:error, e} @@ -35,10 +22,10 @@ def fetch_public_key(conn) do def refetch_public_key(conn) do with %{"keyId" => kid} <- HTTPSignatures.signature_for_conn(conn), - {:ok, %SigningKey{}} <- SigningKey.get_or_fetch_by_key_id(kid), - {:ok, actor_id} <- key_id_to_actor_id(kid), - {:ok, public_key} <- User.get_public_key_for_ap_id(actor_id) do - {:ok, public_key} + # TODO: force a refetch of stale keys (perhaps with a backoff time based on updated_at) + {:ok, %SigningKey{} = sk} <- SigningKey.get_or_fetch_by_key_id(kid), + {:ok, decoded_key} <- SigningKey.public_key_decoded(sk) do + {:ok, decoded_key} else e -> {:error, e} diff --git a/lib/pleroma/user.ex b/lib/pleroma/user.ex index 666464cab..5f3ddf64a 100644 --- a/lib/pleroma/user.ex +++ b/lib/pleroma/user.ex @@ -2068,17 +2068,6 @@ defp create_service_actor(uri, nickname) do defdelegate public_key(user), to: SigningKey - def get_public_key_for_ap_id(ap_id) do - with {:ok, %User{} = user} <- get_or_fetch_by_ap_id(ap_id), - {:ok, public_key} <- SigningKey.public_key(user) do - {:ok, public_key} - else - e -> - Logger.error("Could not get public key for #{ap_id}.\n#{inspect(e)}") - {:error, e} - end - end - @doc "Gets or fetch a user by uri or nickname." @spec get_or_fetch(String.t()) :: {:ok, User.t()} | {:error, String.t()} def get_or_fetch("http://" <> _host = uri), do: get_or_fetch_by_ap_id(uri) diff --git a/lib/pleroma/user/signing_key.ex b/lib/pleroma/user/signing_key.ex index 705302013..708bbbe19 100644 --- a/lib/pleroma/user/signing_key.ex +++ b/lib/pleroma/user/signing_key.ex @@ -115,24 +115,18 @@ def private_pem_to_public_pem(private_pem) do {:ok, :public_key.pem_encode([public_key])} end - @spec public_key(User.t()) :: {:ok, binary()} | {:error, String.t()} + @spec public_key(__MODULE__) :: {:ok, binary()} | {:error, String.t()} @doc """ - Given a user, return the public key for that user in binary format. + Return public key data in binary format. """ - def public_key(%User{} = user) do - case Repo.preload(user, :signing_key) do - %User{signing_key: %__MODULE__{public_key: public_key_pem}} -> - key = - public_key_pem - |> :public_key.pem_decode() - |> hd() - |> :public_key.pem_entry_decode() + def public_key_decoded(%__MODULE__{public_key: public_key_pem}) do + decoded = + public_key_pem + |> :public_key.pem_decode() + |> hd() + |> :public_key.pem_entry_decode() - {:ok, key} - - _ -> - {:error, "key not found"} - end + {:ok, decoded} end def public_key(_), do: {:error, "key not found"} diff --git a/lib/pleroma/web/plugs/http_signature_plug.ex b/lib/pleroma/web/plugs/http_signature_plug.ex index 06527cada..c8df805fe 100644 --- a/lib/pleroma/web/plugs/http_signature_plug.ex +++ b/lib/pleroma/web/plugs/http_signature_plug.ex @@ -8,8 +8,8 @@ defmodule Pleroma.Web.Plugs.HTTPSignaturePlug do use Pleroma.Web, :verified_routes alias Pleroma.Activity - alias Pleroma.Signature alias Pleroma.Instances + alias Pleroma.User.SigningKey require Logger @cachex Pleroma.Config.get([:cachex, :provider], Cachex) @@ -140,15 +140,17 @@ defp maybe_require_signature(conn), do: conn defp signature_host(conn) do with {:key_id, %{"keyId" => kid}} <- {:key_id, HTTPSignatures.signature_for_conn(conn)}, - {:actor_id, {:ok, actor_id}} <- {:actor_id, Signature.key_id_to_actor_id(kid)} do + {:actor_id, actor_id, _} when actor_id != nil <- + {:actor_id, SigningKey.key_id_to_ap_id(kid), kid} do actor_id else {:key_id, e} -> Logger.error("Failed to extract key_id from signature: #{inspect(e)}") nil - {:actor_id, e} -> - Logger.error("Failed to extract actor_id from signature: #{inspect(e)}") + {:actor_id, _, kid} -> + # SigningKeys SHOULD have been fetched before this gets called! + Logger.error("Failed to extract actor_id from signature: signing key #{kid} not known") nil end end diff --git a/test/pleroma/signature_test.exs b/test/pleroma/signature_test.exs index 86e2b138a..0e0105534 100644 --- a/test/pleroma/signature_test.exs +++ b/test/pleroma/signature_test.exs @@ -114,16 +114,6 @@ test "it returns signature headers" do end end - describe "key_id_to_actor_id/1" do - test "it reverses the key id to actor id" do - user = - insert(:user) - |> with_signing_key() - - assert Signature.key_id_to_actor_id(user.signing_key.key_id) == {:ok, user.ap_id} - end - end - describe "signed_date" do test "it returns formatted current date" do with_mock(NaiveDateTime, utc_now: fn -> ~N[2019-08-23 18:11:24.822233] end) do diff --git a/test/pleroma/user_test.exs b/test/pleroma/user_test.exs index f68bf29bc..be8e21dcb 100644 --- a/test/pleroma/user_test.exs +++ b/test/pleroma/user_test.exs @@ -1820,10 +1820,6 @@ test "unsuggests a user" do end end - test "get_public_key_for_ap_id fetches a user that's not in the db" do - assert {:ok, _key} = User.get_public_key_for_ap_id("http://mastodon.example.org/users/admin") - end - describe "per-user rich-text filtering" do test "html_filter_policy returns default policies, when rich-text is enabled" do user = insert(:user) From a7b4e4bfd99f4c6b86f276b5245c21f3a08149d1 Mon Sep 17 00:00:00 2001 From: Oneric Date: Mon, 13 Jan 2025 00:05:42 +0100 Subject: [PATCH 21/26] signature: distinguish error sources and log fetch issues --- lib/pleroma/signature.ex | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/lib/pleroma/signature.ex b/lib/pleroma/signature.ex index 86065a603..39de8e9f1 100644 --- a/lib/pleroma/signature.ex +++ b/lib/pleroma/signature.ex @@ -10,23 +10,33 @@ defmodule Pleroma.Signature do require Logger def fetch_public_key(conn) do - with %{"keyId" => kid} <- HTTPSignatures.signature_for_conn(conn), - {:ok, %SigningKey{} = sk} <- SigningKey.get_or_fetch_by_key_id(kid), - {:ok, decoded_key} <- SigningKey.public_key_decoded(sk) do + with {_, %{"keyId" => kid}} <- {:keyid, HTTPSignatures.signature_for_conn(conn)}, + {_, {:ok, %SigningKey{} = sk}, _} <- + {:fetch, SigningKey.get_or_fetch_by_key_id(kid), kid}, + {_, {:ok, decoded_key}} <- {:decode, SigningKey.public_key_decoded(sk)} do {:ok, decoded_key} else + {:fetch, error, kid} -> + Logger.error("Failed to acquire key from signature: #{kid} #{inspect(error)}") + {:error, {:fetch, error}} + e -> {:error, e} end end def refetch_public_key(conn) do - with %{"keyId" => kid} <- HTTPSignatures.signature_for_conn(conn), + with {_, %{"keyId" => kid}} <- {:keyid, HTTPSignatures.signature_for_conn(conn)}, # TODO: force a refetch of stale keys (perhaps with a backoff time based on updated_at) - {:ok, %SigningKey{} = sk} <- SigningKey.get_or_fetch_by_key_id(kid), - {:ok, decoded_key} <- SigningKey.public_key_decoded(sk) do + {_, {:ok, %SigningKey{} = sk}, _} <- + {:fetch, SigningKey.get_or_fetch_by_key_id(kid), kid}, + {_, {:ok, decoded_key}} <- {:decode, SigningKey.public_key_decoded(sk)} do {:ok, decoded_key} else + {:fetch, error, kid} -> + Logger.error("Failed to refresh stale key from signature: #{kid} #{inspect(error)}") + {:error, {:fetch, error}} + e -> {:error, e} end From 9cc5fe9a5fb98130b627733c01ed4de0ca7e9aa3 Mon Sep 17 00:00:00 2001 From: Oneric Date: Mon, 13 Jan 2025 00:24:01 +0100 Subject: [PATCH 22/26] signature: refetch key upon verification failure This matches behaviour prioir to the SigningKey migration and the expected semantics of the http_signatures lib. Additionally add a min interval paramter, to avoid refetch floods on bugs causing incompatible signatures (like e.g. currently with Bridgy) --- config/config.exs | 1 + lib/pleroma/signature.ex | 12 +++++++++--- lib/pleroma/user/signing_key.ex | 17 +++++++++++++++++ test/pleroma/signature_test.exs | 11 +++++++++++ 4 files changed, 38 insertions(+), 3 deletions(-) diff --git a/config/config.exs b/config/config.exs index 48ebe91d2..be6a0d81c 100644 --- a/config/config.exs +++ b/config/config.exs @@ -370,6 +370,7 @@ note_replies_output_limit: 5, sign_object_fetches: true, authorized_fetch_mode: false, + min_key_refetch_interval: 86_400, max_collection_objects: 50 config :pleroma, :streamer, diff --git a/lib/pleroma/signature.ex b/lib/pleroma/signature.ex index 39de8e9f1..f926f1aa6 100644 --- a/lib/pleroma/signature.ex +++ b/lib/pleroma/signature.ex @@ -27,12 +27,18 @@ def fetch_public_key(conn) do def refetch_public_key(conn) do with {_, %{"keyId" => kid}} <- {:keyid, HTTPSignatures.signature_for_conn(conn)}, - # TODO: force a refetch of stale keys (perhaps with a backoff time based on updated_at) - {_, {:ok, %SigningKey{} = sk}, _} <- - {:fetch, SigningKey.get_or_fetch_by_key_id(kid), kid}, + {_, {:ok, %SigningKey{} = sk}, _} <- {:fetch, SigningKey.refresh_by_key_id(kid), kid}, {_, {:ok, decoded_key}} <- {:decode, SigningKey.public_key_decoded(sk)} do {:ok, decoded_key} else + {:fetch, {:error, :too_young}, kid} -> + Logger.debug("Refusing to refetch recently updated key: #{kid}") + {:error, {:fetch, :too_young}} + + {:fetch, {:error, :unknown}, kid} -> + Logger.warning("Attempted to refresh unknown key; this should not happen: #{kid}") + {:error, {:fetch, :unknown}} + {:fetch, error, kid} -> Logger.error("Failed to refresh stale key from signature: #{kid} #{inspect(error)}") {:error, {:fetch, error}} diff --git a/lib/pleroma/user/signing_key.ex b/lib/pleroma/user/signing_key.ex index 708bbbe19..91aa25a4e 100644 --- a/lib/pleroma/user/signing_key.ex +++ b/lib/pleroma/user/signing_key.ex @@ -218,6 +218,23 @@ def fetch_remote_key(key_id) do end end + defp refresh_key(%__MODULE__{} = key) do + min_backoff = Pleroma.Config.get!([:activitypub, :min_key_refetch_interval]) + + if Timex.diff(Timex.now(), key.updated_at, :seconds) >= min_backoff do + fetch_remote_key(key.key_id) + else + {:error, :too_young} + end + end + + def refresh_by_key_id(key_id) do + case Repo.get_by(__MODULE__, key_id: key_id) do + nil -> {:error, :unknown} + key -> refresh_key(key) + end + end + # Take the response from the remote instance and extract the key details # will check if the key ID matches the owner of the key, if not, error defp extract_key_details(%{"id" => ap_id, "publicKey" => public_key}) do diff --git a/test/pleroma/signature_test.exs b/test/pleroma/signature_test.exs index 0e0105534..731605804 100644 --- a/test/pleroma/signature_test.exs +++ b/test/pleroma/signature_test.exs @@ -56,8 +56,19 @@ test "it returns error if public key is nil" do describe "refetch_public_key/1" do test "it returns key" do + clear_config([:activitypub, :min_key_refetch_interval], 0) ap_id = "https://mastodon.social/users/lambadalambda" + %Pleroma.User{signing_key: sk} = + Pleroma.User.get_or_fetch_by_ap_id(ap_id) + |> then(fn {:ok, u} -> u end) + |> Pleroma.User.SigningKey.load_key() + + {:ok, _} = + %{sk | public_key: "-----BEGIN PUBLIC KEY-----\nasdfghjkl"} + |> Ecto.Changeset.change() + |> Pleroma.Repo.update() + assert Signature.refetch_public_key(make_fake_conn(ap_id)) == {:ok, @rsa_public_key} end end From d8e40173bfce556d7bc12d1f7685ff6b46c3df10 Mon Sep 17 00:00:00 2001 From: Oneric Date: Sun, 19 Jan 2025 05:06:06 +0100 Subject: [PATCH 23/26] http_signatures: tweak order of route aliases We expect most requests to be made for the actual canonical ID, so check this one first (starting without query headers matching the predominant albeit spec-breaking version). Also avoid unnecessary rerewrites of the digest header on each route alias by just setting it once before iterating through aliases. --- lib/pleroma/web/plugs/http_signature_plug.ex | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/lib/pleroma/web/plugs/http_signature_plug.ex b/lib/pleroma/web/plugs/http_signature_plug.ex index c8df805fe..195a9dc1c 100644 --- a/lib/pleroma/web/plugs/http_signature_plug.ex +++ b/lib/pleroma/web/plugs/http_signature_plug.ex @@ -77,10 +77,6 @@ defp assign_valid_signature_on_route_aliases(conn, [path | rest]) do |> put_req_header("(request-target)", request_target) |> maybe_put_created_psudoheader() |> maybe_put_expires_psudoheader() - |> case do - %{assigns: %{digest: digest}} = conn -> put_req_header(conn, "digest", digest) - conn -> conn - end conn |> assign(:valid_signature, HTTPSignatures.validate_conn(conn)) @@ -93,7 +89,13 @@ defp maybe_assign_valid_signature(conn) do # set (request-target) header to the appropriate value # we also replace the digest header with the one we computed possible_paths = - route_aliases(conn) ++ [conn.request_path, conn.request_path <> "?#{conn.query_string}"] + [conn.request_path, conn.request_path <> "?#{conn.query_string}" | route_aliases(conn)] + + conn = + case conn do + %{assigns: %{digest: digest}} = conn -> put_req_header(conn, "digest", digest) + conn -> conn + end assign_valid_signature_on_route_aliases(conn, possible_paths) else From 11ad4711ebf5a8f23a94b5d9e3956b8c073a1b1d Mon Sep 17 00:00:00 2001 From: Oneric Date: Sun, 19 Jan 2025 05:57:33 +0100 Subject: [PATCH 24/26] signing_key: don't retrieve superfluous fields when loading ap_id --- lib/pleroma/user/signing_key.ex | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/lib/pleroma/user/signing_key.ex b/lib/pleroma/user/signing_key.ex index 91aa25a4e..9d052b6bf 100644 --- a/lib/pleroma/user/signing_key.ex +++ b/lib/pleroma/user/signing_key.ex @@ -56,16 +56,10 @@ def key_id_to_user_id(key_id) do def key_id_to_ap_id(key_id) do Logger.debug("Looking up key ID: #{key_id}") - result = - from(sk in __MODULE__, where: sk.key_id == ^key_id) - |> join(:inner, [sk], u in User, on: sk.user_id == u.id) - |> select([sk, u], %{user: u}) - |> Repo.one() - - case result do - %{user: %User{ap_id: ap_id}} -> ap_id - _ -> nil - end + from(sk in __MODULE__, where: sk.key_id == ^key_id) + |> join(:inner, [sk], u in User, on: sk.user_id == u.id) + |> select([sk, u], u.ap_id) + |> Repo.one() end @spec generate_rsa_pem() :: {:ok, binary()} From 8243fc0ef482a28daf2bcae2c64a9510bdb76489 Mon Sep 17 00:00:00 2001 From: Oneric Date: Fri, 31 Jan 2025 19:51:53 +0100 Subject: [PATCH 25/26] federation: strip internal fields from incoming updates and history When note editing support was added, it was omitted to strip internal fields from edited notes and their history. This was uncovered due to Mastodon inlining the like count as a "likes" collection conflicting with our internal "likes" list causing validation failures. In a spot check with likes/like_count it was not possible to inject those internal fields into the local db via Update, but this was not extensively tested for all fields and avenues. Similarly address normalisation did not normalise addressing in the object history, although this was never at risk of being exploitable. The revision history of the Pleroma MR adding edit support reveals recusrive stripping was intentionally avoided, since it will end up removing e.g. emoji from outgoing activities. This appears to still be true. However, all current internal fields ("pleroma_interal" appears to be unused) contain data already publicised otherwise anyway. In the interest of fixing a federation bug (and at worst potential data injection) quickly outgoing stripping is left non-recursive for now. Of course the ultimate fix here is to not mix remote and internal data into the same map in the first place, but unfortunately having a single map of all truth is a core assumption of *oma's AP doc processing. Changing this is a masive undertaking and not suitable for providing a short-term fix. --- .../web/activity_pub/transmogrifier.ex | 54 ++++-- .../web/activity_pub/transmogrifier_test.exs | 177 ++++++++++++++++++ 2 files changed, 219 insertions(+), 12 deletions(-) diff --git a/lib/pleroma/web/activity_pub/transmogrifier.ex b/lib/pleroma/web/activity_pub/transmogrifier.ex index ecf6d9cd8..9ed54fa6e 100644 --- a/lib/pleroma/web/activity_pub/transmogrifier.ex +++ b/lib/pleroma/web/activity_pub/transmogrifier.ex @@ -29,6 +29,7 @@ defmodule Pleroma.Web.ActivityPub.Transmogrifier do @doc """ Modifies an incoming AP object (mastodon format) to our internal format. + (This only deals with non-activity AP objects) """ def fix_object(object, options \\ []) do object @@ -44,6 +45,38 @@ def fix_object(object, options \\ []) do |> fix_content_map() |> fix_addressing() |> fix_summary() + |> fix_history(&fix_object/1) + end + + defp maybe_fix_object(%{"attributedTo" => _} = object), do: fix_object(object) + defp maybe_fix_object(object), do: object + + defp fix_history(%{"formerRepresentations" => %{"orderedItems" => list}} = obj, fix_fun) + when is_list(list) do + update_in(obj["formerRepresentations"]["orderedItems"], fn h -> Enum.map(h, fix_fun) end) + end + + defp fix_history(obj, _), do: obj + + defp fix_recursive(obj, fun) do + # unlike Erlang, Elixir does not support recursive inline functions + # which would allow us to avoid reconstructing this on every recursion + rec_fun = fn + obj when is_map(obj) -> fix_recursive(obj, fun) + # there may be simple AP IDs in history (or object field) + obj -> obj + end + + obj + |> fun.() + |> fix_history(rec_fun) + |> then(fn + %{"object" => object} = doc when is_map(object) -> + update_in(doc["object"], rec_fun) + + apdoc -> + apdoc + end) end def fix_summary(%{"summary" => nil} = object) do @@ -414,17 +447,10 @@ defp get_reported(objects) do end def handle_incoming(data, options \\ []) do - data = normalise_addressing_public(data) - - data = - if data["object"] != nil do - object = normalise_addressing_public(data["object"]) - Map.put(data, "object", object) - else - data - end - - handle_incoming_normalised(data, options) + data + |> fix_recursive(&normalise_addressing_public/1) + |> fix_recursive(&strip_internal_fields/1) + |> handle_incoming_normalised(options) end defp handle_incoming_normalised(data, options) @@ -490,7 +516,6 @@ defp handle_incoming_normalised( object = data["object"] - |> strip_internal_fields() |> fix_type(fetch_options) |> fix_in_reply_to(fetch_options) |> fix_quote_url(fetch_options) @@ -545,6 +570,9 @@ defp handle_incoming_normalised( _options ) when type in ~w{Update Block Follow Accept Reject} do + fixed_obj = maybe_fix_object(data["object"]) + data = if fixed_obj != nil, do: %{data | "object" => fixed_obj}, else: data + with {:ok, %User{}} <- ObjectValidator.fetch_actor(data), {:ok, activity, _} <- Pipeline.common_pipeline(data, local: false) do @@ -1043,6 +1071,8 @@ def prepare_attachments(object) do Map.put(object, "attachment", attachments) end + # for outgoing docs immediately stripping internal fields recursively breaks later emoji transformations + # (XXX: it would be better to reorder operations so we can always use recursive stripping) def strip_internal_fields(object) do Map.drop(object, Pleroma.Constants.object_internal_fields()) end diff --git a/test/pleroma/web/activity_pub/transmogrifier_test.exs b/test/pleroma/web/activity_pub/transmogrifier_test.exs index be07a0fe4..b90692370 100644 --- a/test/pleroma/web/activity_pub/transmogrifier_test.exs +++ b/test/pleroma/web/activity_pub/transmogrifier_test.exs @@ -164,6 +164,183 @@ test "it accepts quote posts" do # It fetched the quoted post assert Object.normalize("https://misskey.io/notes/8vs6wxufd0") end + + test "doesn't allow remote edits to fake local likes" do + # as a spot check for no internal fields getting injected + now = DateTime.utc_now() + pub_date = DateTime.to_iso8601(Timex.subtract(now, Timex.Duration.from_minutes(3))) + edit_date = DateTime.to_iso8601(now) + + local_user = insert(:user) + + create_data = %{ + "type" => "Create", + "id" => "http://mastodon.example.org/users/admin/statuses/2619539638/activity", + "actor" => "http://mastodon.example.org/users/admin", + "to" => ["as:Public"], + "cc" => [], + "object" => %{ + "type" => "Note", + "id" => "http://mastodon.example.org/users/admin/statuses/2619539638", + "attributedTo" => "http://mastodon.example.org/users/admin", + "to" => ["as:Public"], + "cc" => [], + "published" => pub_date, + "content" => "miaow", + "likes" => [local_user.ap_id] + } + } + + update_data = + create_data + |> Map.put("type", "Update") + |> Map.put("id", create_data["object"]["id"] <> "/update/1") + |> put_in(["object", "content"], "miaow :3") + |> put_in(["object", "updated"], edit_date) + |> put_in(["object", "formerRepresentations"], %{ + "type" => "OrderedCollection", + "totalItems" => 1, + "orderedItems" => [create_data["object"]] + }) + + {:ok, %Pleroma.Activity{} = activity} = Transmogrifier.handle_incoming(create_data) + %Pleroma.Object{} = object = Object.get_by_ap_id(activity.data["object"]) + assert object.data["content"] == "miaow" + assert object.data["likes"] == [] + assert object.data["like_count"] == 0 + + {:ok, %Pleroma.Activity{} = activity} = Transmogrifier.handle_incoming(update_data) + %Pleroma.Object{} = object = Object.get_by_ap_id(activity.data["object"]["id"]) + assert object.data["content"] == "miaow :3" + assert object.data["likes"] == [] + assert object.data["like_count"] == 0 + end + + test "doesn't trip over remote likes in notes" do + now = DateTime.utc_now() + pub_date = DateTime.to_iso8601(Timex.subtract(now, Timex.Duration.from_minutes(3))) + edit_date = DateTime.to_iso8601(now) + + create_data = %{ + "type" => "Create", + "id" => "http://mastodon.example.org/users/admin/statuses/3409297097/activity", + "actor" => "http://mastodon.example.org/users/admin", + "to" => ["as:Public"], + "cc" => [], + "object" => %{ + "type" => "Note", + "id" => "http://mastodon.example.org/users/admin/statuses/3409297097", + "attributedTo" => "http://mastodon.example.org/users/admin", + "to" => ["as:Public"], + "cc" => [], + "published" => pub_date, + "content" => "miaow", + "likes" => %{ + "id" => "http://mastodon.example.org/users/admin/statuses/3409297097/likes", + "totalItems" => 0, + "type" => "Collection" + } + } + } + + update_data = + create_data + |> Map.put("type", "Update") + |> Map.put("id", create_data["object"]["id"] <> "/update/1") + |> put_in(["object", "content"], "miaow :3") + |> put_in(["object", "updated"], edit_date) + |> put_in(["object", "likes", "totalItems"], 666) + |> put_in(["object", "formerRepresentations"], %{ + "type" => "OrderedCollection", + "totalItems" => 1, + "orderedItems" => [create_data["object"]] + }) + + {:ok, %Pleroma.Activity{} = activity} = Transmogrifier.handle_incoming(create_data) + %Pleroma.Object{} = object = Object.get_by_ap_id(activity.data["object"]) + assert object.data["content"] == "miaow" + assert object.data["likes"] == [] + assert object.data["like_count"] == 0 + + {:ok, %Pleroma.Activity{} = activity} = Transmogrifier.handle_incoming(update_data) + %Pleroma.Object{} = object = Object.get_by_ap_id(activity.data["object"]["id"]) + assert object.data["content"] == "miaow :3" + assert object.data["likes"] == [] + # in the future this should retain remote likes, but for now: + assert object.data["like_count"] == 0 + end + + test "doesn't trip over remote likes in polls" do + now = DateTime.utc_now() + pub_date = DateTime.to_iso8601(Timex.subtract(now, Timex.Duration.from_minutes(3))) + edit_date = DateTime.to_iso8601(now) + + create_data = %{ + "type" => "Create", + "id" => "http://mastodon.example.org/users/admin/statuses/2471790073/activity", + "actor" => "http://mastodon.example.org/users/admin", + "to" => ["as:Public"], + "cc" => [], + "object" => %{ + "type" => "Question", + "id" => "http://mastodon.example.org/users/admin/statuses/2471790073", + "attributedTo" => "http://mastodon.example.org/users/admin", + "to" => ["as:Public"], + "cc" => [], + "published" => pub_date, + "content" => "vote!", + "anyOf" => [ + %{ + "type" => "Note", + "name" => "a", + "replies" => %{ + "type" => "Collection", + "totalItems" => 3 + } + }, + %{ + "type" => "Note", + "name" => "b", + "replies" => %{ + "type" => "Collection", + "totalItems" => 1 + } + } + ], + "likes" => %{ + "id" => "http://mastodon.example.org/users/admin/statuses/2471790073/likes", + "totalItems" => 0, + "type" => "Collection" + } + } + } + + update_data = + create_data + |> Map.put("type", "Update") + |> Map.put("id", create_data["object"]["id"] <> "/update/1") + |> put_in(["object", "content"], "vote now!") + |> put_in(["object", "updated"], edit_date) + |> put_in(["object", "likes", "totalItems"], 666) + |> put_in(["object", "formerRepresentations"], %{ + "type" => "OrderedCollection", + "totalItems" => 1, + "orderedItems" => [create_data["object"]] + }) + + {:ok, %Pleroma.Activity{} = activity} = Transmogrifier.handle_incoming(create_data) + %Pleroma.Object{} = object = Object.get_by_ap_id(activity.data["object"]) + assert object.data["content"] == "vote!" + assert object.data["likes"] == [] + assert object.data["like_count"] == 0 + + {:ok, %Pleroma.Activity{} = activity} = Transmogrifier.handle_incoming(update_data) + %Pleroma.Object{} = object = Object.get_by_ap_id(activity.data["object"]["id"]) + assert object.data["content"] == "vote now!" + assert object.data["likes"] == [] + # in the future this should retain remote likes, but for now: + assert object.data["like_count"] == 0 + end end describe "prepare outgoing" do From 7c23793e55a9f66f8d8491f3642265c47d4cfa95 Mon Sep 17 00:00:00 2001 From: Oneric Date: Sat, 15 Feb 2025 14:18:04 +0100 Subject: [PATCH 26/26] changelog: add entries for preceding commits --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 43fff4dda..949fc32f0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,12 +12,14 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). - fixed some holes in SigningKey verification potentially allowing they key-user mapping to be poisoned - frontend ZIP files can no longer traverse to paths outside their install dir - fixed user updates trying but failing to renew signing key information +- fixed signing key refresh on key rotation ## Changed - Dropped obsolete `ap_enabled` indicator from user table and associated buggy logic - The remote user count in prometheus metrics is now an estimate instead of an exact number since the latter proved unreasonably costly to obtain for a merely nice-to-have statistic - Various other tweaks improving stat query performance and avoiding unecessary work on received AP documents +- HTTP signatures now test the most likely request-target alias first cutting down on overhead ## 2025.01.01