{{ user.display_name }}
\n{{ user.display_name }}
\nPassword reset token was generated: {{ passwordResetToken }}
\nYou can also use this link to reset password:\n {{ passwordResetLink }}\n
\nReported Account: #{account.nickname}
#{comment_html} #{statuses_html} +
+ View Reports in AdminFE
"""
new()
diff --git a/lib/pleroma/emoji/loader.ex b/lib/pleroma/emoji/loader.ex
index 3de2dc762..03a6bca0b 100644
--- a/lib/pleroma/emoji/loader.ex
+++ b/lib/pleroma/emoji/loader.ex
@@ -108,7 +108,7 @@ defp load_pack(pack_dir, emoji_groups) do
if File.exists?(emoji_txt) do
load_from_file(emoji_txt, emoji_groups)
else
- extensions = Pleroma.Config.get([:emoji, :pack_extensions])
+ extensions = Config.get([:emoji, :pack_extensions])
Logger.info(
"No emoji.txt found for pack \"#{pack_name}\", assuming all #{
diff --git a/lib/pleroma/filter.ex b/lib/pleroma/filter.ex
index 4d61b3650..5d6df9530 100644
--- a/lib/pleroma/filter.ex
+++ b/lib/pleroma/filter.ex
@@ -34,10 +34,18 @@ def get(id, %{id: user_id} = _user) do
Repo.one(query)
end
- def get_filters(%User{id: user_id} = _user) do
+ def get_active(query) do
+ from(f in query, where: is_nil(f.expires_at) or f.expires_at > ^NaiveDateTime.utc_now())
+ end
+
+ def get_irreversible(query) do
+ from(f in query, where: f.hide)
+ end
+
+ def get_filters(query \\ __MODULE__, %User{id: user_id}) do
query =
from(
- f in Pleroma.Filter,
+ f in query,
where: f.user_id == ^user_id,
order_by: [desc: :id]
)
@@ -95,4 +103,34 @@ def update(%Pleroma.Filter{} = filter, params) do
|> validate_required([:phrase, :context])
|> Repo.update()
end
+
+ def compose_regex(user_or_filters, format \\ :postgres)
+
+ def compose_regex(%User{} = user, format) do
+ __MODULE__
+ |> get_active()
+ |> get_irreversible()
+ |> get_filters(user)
+ |> compose_regex(format)
+ end
+
+ def compose_regex([_ | _] = filters, format) do
+ phrases =
+ filters
+ |> Enum.map(& &1.phrase)
+ |> Enum.join("|")
+
+ case format do
+ :postgres ->
+ "\\y(#{phrases})\\y"
+
+ :re ->
+ ~r/\b#{phrases}\b/i
+
+ _ ->
+ nil
+ end
+ end
+
+ def compose_regex(_, _), do: nil
end
diff --git a/lib/pleroma/gun/api.ex b/lib/pleroma/gun/api.ex
index f51cd7db8..09be74392 100644
--- a/lib/pleroma/gun/api.ex
+++ b/lib/pleroma/gun/api.ex
@@ -19,7 +19,8 @@ defmodule Pleroma.Gun.API do
:tls_opts,
:tcp_opts,
:socks_opts,
- :ws_opts
+ :ws_opts,
+ :supervise
]
@impl Gun
diff --git a/lib/pleroma/gun/conn.ex b/lib/pleroma/gun/conn.ex
index cd25a2e74..a3f75a4bb 100644
--- a/lib/pleroma/gun/conn.ex
+++ b/lib/pleroma/gun/conn.ex
@@ -3,85 +3,33 @@
# SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.Gun.Conn do
- @moduledoc """
- Struct for gun connection data
- """
alias Pleroma.Gun
- alias Pleroma.Pool.Connections
require Logger
- @type gun_state :: :up | :down
- @type conn_state :: :active | :idle
-
- @type t :: %__MODULE__{
- conn: pid(),
- gun_state: gun_state(),
- conn_state: conn_state(),
- used_by: [pid()],
- last_reference: pos_integer(),
- crf: float(),
- retries: pos_integer()
- }
-
- defstruct conn: nil,
- gun_state: :open,
- conn_state: :init,
- used_by: [],
- last_reference: 0,
- crf: 1,
- retries: 0
-
- @spec open(String.t() | URI.t(), atom(), keyword()) :: :ok | nil
- def open(url, name, opts \\ [])
- def open(url, name, opts) when is_binary(url), do: open(URI.parse(url), name, opts)
-
- def open(%URI{} = uri, name, opts) do
+ def open(%URI{} = uri, opts) do
pool_opts = Pleroma.Config.get([:connections_pool], [])
opts =
opts
|> Enum.into(%{})
- |> Map.put_new(:retry, pool_opts[:retry] || 1)
- |> Map.put_new(:retry_timeout, pool_opts[:retry_timeout] || 1000)
|> Map.put_new(:await_up_timeout, pool_opts[:await_up_timeout] || 5_000)
+ |> Map.put_new(:supervise, false)
|> maybe_add_tls_opts(uri)
- key = "#{uri.scheme}:#{uri.host}:#{uri.port}"
-
- max_connections = pool_opts[:max_connections] || 250
-
- conn_pid =
- if Connections.count(name) < max_connections do
- do_open(uri, opts)
- else
- close_least_used_and_do_open(name, uri, opts)
- end
-
- if is_pid(conn_pid) do
- conn = %Pleroma.Gun.Conn{
- conn: conn_pid,
- gun_state: :up,
- conn_state: :active,
- last_reference: :os.system_time(:second)
- }
-
- :ok = Gun.set_owner(conn_pid, Process.whereis(name))
- Connections.add_conn(name, key, conn)
- end
+ do_open(uri, opts)
end
defp maybe_add_tls_opts(opts, %URI{scheme: "http"}), do: opts
- defp maybe_add_tls_opts(opts, %URI{scheme: "https", host: host}) do
+ defp maybe_add_tls_opts(opts, %URI{scheme: "https"}) do
tls_opts = [
verify: :verify_peer,
cacertfile: CAStore.file_path(),
depth: 20,
reuse_sessions: false,
- verify_fun:
- {&:ssl_verify_hostname.verify_fun/3,
- [check_hostname: Pleroma.HTTP.Connection.format_host(host)]}
+ log_level: :warning,
+ customize_hostname_check: [match_fun: :public_key.pkix_verify_hostname_match_fun(:https)]
]
tls_opts =
@@ -105,7 +53,7 @@ defp do_open(uri, %{proxy: {proxy_host, proxy_port}} = opts) do
{:ok, _} <- Gun.await_up(conn, opts[:await_up_timeout]),
stream <- Gun.connect(conn, connect_opts),
{:response, :fin, 200, _} <- Gun.await(conn, stream) do
- conn
+ {:ok, conn}
else
error ->
Logger.warn(
@@ -141,7 +89,7 @@ defp do_open(uri, %{proxy: {proxy_type, proxy_host, proxy_port}} = opts) do
with {:ok, conn} <- Gun.open(proxy_host, proxy_port, opts),
{:ok, _} <- Gun.await_up(conn, opts[:await_up_timeout]) do
- conn
+ {:ok, conn}
else
error ->
Logger.warn(
@@ -155,11 +103,11 @@ defp do_open(uri, %{proxy: {proxy_type, proxy_host, proxy_port}} = opts) do
end
defp do_open(%URI{host: host, port: port} = uri, opts) do
- host = Pleroma.HTTP.Connection.parse_host(host)
+ host = Pleroma.HTTP.AdapterHelper.parse_host(host)
with {:ok, conn} <- Gun.open(host, port, opts),
{:ok, _} <- Gun.await_up(conn, opts[:await_up_timeout]) do
- conn
+ {:ok, conn}
else
error ->
Logger.warn(
@@ -171,7 +119,7 @@ defp do_open(%URI{host: host, port: port} = uri, opts) do
end
defp destination_opts(%URI{host: host, port: port}) do
- host = Pleroma.HTTP.Connection.parse_host(host)
+ host = Pleroma.HTTP.AdapterHelper.parse_host(host)
%{host: host, port: port}
end
@@ -181,17 +129,6 @@ defp add_http2_opts(opts, "https", tls_opts) do
defp add_http2_opts(opts, _, _), do: opts
- defp close_least_used_and_do_open(name, uri, opts) do
- with [{key, conn} | _conns] <- Connections.get_unused_conns(name),
- :ok <- Gun.close(conn.conn) do
- Connections.remove_conn(name, key)
-
- do_open(uri, opts)
- else
- [] -> {:error, :pool_overflowed}
- end
- end
-
def compose_uri_log(%URI{scheme: scheme, host: host, path: path}) do
"#{scheme}://#{host}#{path}"
end
diff --git a/lib/pleroma/gun/connection_pool.ex b/lib/pleroma/gun/connection_pool.ex
new file mode 100644
index 000000000..8b41a668c
--- /dev/null
+++ b/lib/pleroma/gun/connection_pool.ex
@@ -0,0 +1,79 @@
+defmodule Pleroma.Gun.ConnectionPool do
+ @registry __MODULE__
+
+ alias Pleroma.Gun.ConnectionPool.WorkerSupervisor
+
+ def children do
+ [
+ {Registry, keys: :unique, name: @registry},
+ Pleroma.Gun.ConnectionPool.WorkerSupervisor
+ ]
+ end
+
+ def get_conn(uri, opts) do
+ key = "#{uri.scheme}:#{uri.host}:#{uri.port}"
+
+ case Registry.lookup(@registry, key) do
+ # The key has already been registered, but connection is not up yet
+ [{worker_pid, nil}] ->
+ get_gun_pid_from_worker(worker_pid, true)
+
+ [{worker_pid, {gun_pid, _used_by, _crf, _last_reference}}] ->
+ GenServer.cast(worker_pid, {:add_client, self(), false})
+ {:ok, gun_pid}
+
+ [] ->
+ # :gun.set_owner fails in :connected state for whatevever reason,
+ # so we open the connection in the process directly and send it's pid back
+ # We trust gun to handle timeouts by itself
+ case WorkerSupervisor.start_worker([key, uri, opts, self()]) do
+ {:ok, worker_pid} ->
+ get_gun_pid_from_worker(worker_pid, false)
+
+ {:error, {:already_started, worker_pid}} ->
+ get_gun_pid_from_worker(worker_pid, true)
+
+ err ->
+ err
+ end
+ end
+ end
+
+ defp get_gun_pid_from_worker(worker_pid, register) do
+ # GenServer.call will block the process for timeout length if
+ # the server crashes on startup (which will happen if gun fails to connect)
+ # so instead we use cast + monitor
+
+ ref = Process.monitor(worker_pid)
+ if register, do: GenServer.cast(worker_pid, {:add_client, self(), true})
+
+ receive do
+ {:conn_pid, pid} ->
+ Process.demonitor(ref)
+ {:ok, pid}
+
+ {:DOWN, ^ref, :process, ^worker_pid, reason} ->
+ case reason do
+ {:shutdown, error} -> error
+ _ -> {:error, reason}
+ end
+ end
+ end
+
+ def release_conn(conn_pid) do
+ # :ets.fun2ms(fn {_, {worker_pid, {gun_pid, _, _, _}}} when gun_pid == conn_pid ->
+ # worker_pid end)
+ query_result =
+ Registry.select(@registry, [
+ {{:_, :"$1", {:"$2", :_, :_, :_}}, [{:==, :"$2", conn_pid}], [:"$1"]}
+ ])
+
+ case query_result do
+ [worker_pid] ->
+ GenServer.cast(worker_pid, {:remove_client, self()})
+
+ [] ->
+ :ok
+ end
+ end
+end
diff --git a/lib/pleroma/gun/connection_pool/reclaimer.ex b/lib/pleroma/gun/connection_pool/reclaimer.ex
new file mode 100644
index 000000000..cea800882
--- /dev/null
+++ b/lib/pleroma/gun/connection_pool/reclaimer.ex
@@ -0,0 +1,85 @@
+defmodule Pleroma.Gun.ConnectionPool.Reclaimer do
+ use GenServer, restart: :temporary
+
+ @registry Pleroma.Gun.ConnectionPool
+
+ def start_monitor do
+ pid =
+ case :gen_server.start(__MODULE__, [], name: {:via, Registry, {@registry, "reclaimer"}}) do
+ {:ok, pid} ->
+ pid
+
+ {:error, {:already_registered, pid}} ->
+ pid
+ end
+
+ {pid, Process.monitor(pid)}
+ end
+
+ @impl true
+ def init(_) do
+ {:ok, nil, {:continue, :reclaim}}
+ end
+
+ @impl true
+ def handle_continue(:reclaim, _) do
+ max_connections = Pleroma.Config.get([:connections_pool, :max_connections])
+
+ reclaim_max =
+ [:connections_pool, :reclaim_multiplier]
+ |> Pleroma.Config.get()
+ |> Kernel.*(max_connections)
+ |> round
+ |> max(1)
+
+ :telemetry.execute([:pleroma, :connection_pool, :reclaim, :start], %{}, %{
+ max_connections: max_connections,
+ reclaim_max: reclaim_max
+ })
+
+ # :ets.fun2ms(
+ # fn {_, {worker_pid, {_, used_by, crf, last_reference}}} when used_by == [] ->
+ # {worker_pid, crf, last_reference} end)
+ unused_conns =
+ Registry.select(
+ @registry,
+ [
+ {{:_, :"$1", {:_, :"$2", :"$3", :"$4"}}, [{:==, :"$2", []}], [{{:"$1", :"$3", :"$4"}}]}
+ ]
+ )
+
+ case unused_conns do
+ [] ->
+ :telemetry.execute(
+ [:pleroma, :connection_pool, :reclaim, :stop],
+ %{reclaimed_count: 0},
+ %{
+ max_connections: max_connections
+ }
+ )
+
+ {:stop, :no_unused_conns, nil}
+
+ unused_conns ->
+ reclaimed =
+ unused_conns
+ |> Enum.sort(fn {_pid1, crf1, last_reference1}, {_pid2, crf2, last_reference2} ->
+ crf1 <= crf2 and last_reference1 <= last_reference2
+ end)
+ |> Enum.take(reclaim_max)
+
+ reclaimed
+ |> Enum.each(fn {pid, _, _} ->
+ DynamicSupervisor.terminate_child(Pleroma.Gun.ConnectionPool.WorkerSupervisor, pid)
+ end)
+
+ :telemetry.execute(
+ [:pleroma, :connection_pool, :reclaim, :stop],
+ %{reclaimed_count: Enum.count(reclaimed)},
+ %{max_connections: max_connections}
+ )
+
+ {:stop, :normal, nil}
+ end
+ end
+end
diff --git a/lib/pleroma/gun/connection_pool/worker.ex b/lib/pleroma/gun/connection_pool/worker.ex
new file mode 100644
index 000000000..f33447cb6
--- /dev/null
+++ b/lib/pleroma/gun/connection_pool/worker.ex
@@ -0,0 +1,127 @@
+defmodule Pleroma.Gun.ConnectionPool.Worker do
+ alias Pleroma.Gun
+ use GenServer, restart: :temporary
+
+ @registry Pleroma.Gun.ConnectionPool
+
+ def start_link([key | _] = opts) do
+ GenServer.start_link(__MODULE__, opts, name: {:via, Registry, {@registry, key}})
+ end
+
+ @impl true
+ def init([_key, _uri, _opts, _client_pid] = opts) do
+ {:ok, nil, {:continue, {:connect, opts}}}
+ end
+
+ @impl true
+ def handle_continue({:connect, [key, uri, opts, client_pid]}, _) do
+ with {:ok, conn_pid} <- Gun.Conn.open(uri, opts),
+ Process.link(conn_pid) do
+ time = :erlang.monotonic_time(:millisecond)
+
+ {_, _} =
+ Registry.update_value(@registry, key, fn _ ->
+ {conn_pid, [client_pid], 1, time}
+ end)
+
+ send(client_pid, {:conn_pid, conn_pid})
+
+ {:noreply,
+ %{key: key, timer: nil, client_monitors: %{client_pid => Process.monitor(client_pid)}},
+ :hibernate}
+ else
+ err ->
+ {:stop, {:shutdown, err}, nil}
+ end
+ end
+
+ @impl true
+ def handle_cast({:add_client, client_pid, send_pid_back}, %{key: key} = state) do
+ time = :erlang.monotonic_time(:millisecond)
+
+ {{conn_pid, _, _, _}, _} =
+ Registry.update_value(@registry, key, fn {conn_pid, used_by, crf, last_reference} ->
+ {conn_pid, [client_pid | used_by], crf(time - last_reference, crf), time}
+ end)
+
+ if send_pid_back, do: send(client_pid, {:conn_pid, conn_pid})
+
+ state =
+ if state.timer != nil do
+ Process.cancel_timer(state[:timer])
+ %{state | timer: nil}
+ else
+ state
+ end
+
+ ref = Process.monitor(client_pid)
+
+ state = put_in(state.client_monitors[client_pid], ref)
+ {:noreply, state, :hibernate}
+ end
+
+ @impl true
+ def handle_cast({:remove_client, client_pid}, %{key: key} = state) do
+ {{_conn_pid, used_by, _crf, _last_reference}, _} =
+ Registry.update_value(@registry, key, fn {conn_pid, used_by, crf, last_reference} ->
+ {conn_pid, List.delete(used_by, client_pid), crf, last_reference}
+ end)
+
+ {ref, state} = pop_in(state.client_monitors[client_pid])
+ Process.demonitor(ref)
+
+ timer =
+ if used_by == [] do
+ max_idle = Pleroma.Config.get([:connections_pool, :max_idle_time], 30_000)
+ Process.send_after(self(), :idle_close, max_idle)
+ else
+ nil
+ end
+
+ {:noreply, %{state | timer: timer}, :hibernate}
+ end
+
+ @impl true
+ def handle_info(:idle_close, state) do
+ # Gun monitors the owner process, and will close the connection automatically
+ # when it's terminated
+ {:stop, :normal, state}
+ end
+
+ # Gracefully shutdown if the connection got closed without any streams left
+ @impl true
+ def handle_info({:gun_down, _pid, _protocol, _reason, []}, state) do
+ {:stop, :normal, state}
+ end
+
+ # Otherwise, shutdown with an error
+ @impl true
+ def handle_info({:gun_down, _pid, _protocol, _reason, _killed_streams} = down_message, state) do
+ {:stop, {:error, down_message}, state}
+ end
+
+ @impl true
+ def handle_info({:DOWN, _ref, :process, pid, reason}, state) do
+ # Sometimes the client is dead before we demonitor it in :remove_client, so the message
+ # arrives anyway
+
+ case state.client_monitors[pid] do
+ nil ->
+ {:noreply, state, :hibernate}
+
+ _ref ->
+ :telemetry.execute(
+ [:pleroma, :connection_pool, :client_death],
+ %{client_pid: pid, reason: reason},
+ %{key: state.key}
+ )
+
+ handle_cast({:remove_client, pid}, state)
+ end
+ end
+
+ # LRFU policy: https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.55.1478
+ defp crf(time_delta, prev_crf) do
+ 1 + :math.pow(0.5, 0.0001 * time_delta) * prev_crf
+ end
+end
diff --git a/lib/pleroma/gun/connection_pool/worker_supervisor.ex b/lib/pleroma/gun/connection_pool/worker_supervisor.ex
new file mode 100644
index 000000000..39615c956
--- /dev/null
+++ b/lib/pleroma/gun/connection_pool/worker_supervisor.ex
@@ -0,0 +1,45 @@
+defmodule Pleroma.Gun.ConnectionPool.WorkerSupervisor do
+ @moduledoc "Supervisor for pool workers. Does not do anything except enforce max connection limit"
+
+ use DynamicSupervisor
+
+ def start_link(opts) do
+ DynamicSupervisor.start_link(__MODULE__, opts, name: __MODULE__)
+ end
+
+ def init(_opts) do
+ DynamicSupervisor.init(
+ strategy: :one_for_one,
+ max_children: Pleroma.Config.get([:connections_pool, :max_connections])
+ )
+ end
+
+ def start_worker(opts, retry \\ false) do
+ case DynamicSupervisor.start_child(__MODULE__, {Pleroma.Gun.ConnectionPool.Worker, opts}) do
+ {:error, :max_children} ->
+ if retry or free_pool() == :error do
+ :telemetry.execute([:pleroma, :connection_pool, :provision_failure], %{opts: opts})
+ {:error, :pool_full}
+ else
+ start_worker(opts, true)
+ end
+
+ res ->
+ res
+ end
+ end
+
+ defp free_pool do
+ wait_for_reclaimer_finish(Pleroma.Gun.ConnectionPool.Reclaimer.start_monitor())
+ end
+
+ defp wait_for_reclaimer_finish({pid, mon}) do
+ receive do
+ {:DOWN, ^mon, :process, ^pid, :no_unused_conns} ->
+ :error
+
+ {:DOWN, ^mon, :process, ^pid, :normal} ->
+ :ok
+ end
+ end
+end
diff --git a/lib/pleroma/html.ex b/lib/pleroma/html.ex
index d78c5f202..dc1b9b840 100644
--- a/lib/pleroma/html.ex
+++ b/lib/pleroma/html.ex
@@ -109,7 +109,7 @@ def extract_first_external_url(object, content) do
result =
content
|> Floki.parse_fragment!()
- |> Floki.filter_out("a.mention,a.hashtag,a[rel~=\"tag\"]")
+ |> Floki.filter_out("a.mention,a.hashtag,a.attachment,a[rel~=\"tag\"]")
|> Floki.attribute("a", "href")
|> Enum.at(0)
diff --git a/lib/pleroma/http/adapter_helper.ex b/lib/pleroma/http/adapter_helper.ex
index 510722ff9..9ec3836b0 100644
--- a/lib/pleroma/http/adapter_helper.ex
+++ b/lib/pleroma/http/adapter_helper.ex
@@ -3,32 +3,30 @@
# SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.HTTP.AdapterHelper do
- alias Pleroma.HTTP.Connection
+ @moduledoc """
+ Configure Tesla.Client with default and customized adapter options.
+ """
+ @defaults [pool: :federation]
+
+ @type proxy_type() :: :socks4 | :socks5
+ @type host() :: charlist() | :inet.ip_address()
+
+ alias Pleroma.Config
+ alias Pleroma.HTTP.AdapterHelper
+ require Logger
@type proxy ::
{Connection.host(), pos_integer()}
| {Connection.proxy_type(), Connection.host(), pos_integer()}
@callback options(keyword(), URI.t()) :: keyword()
- @callback after_request(keyword()) :: :ok
-
- @spec options(keyword(), URI.t()) :: keyword()
- def options(opts, _uri) do
- proxy = Pleroma.Config.get([:http, :proxy_url], nil)
- maybe_add_proxy(opts, format_proxy(proxy))
- end
-
- @spec maybe_get_conn(URI.t(), keyword()) :: keyword()
- def maybe_get_conn(_uri, opts), do: opts
-
- @spec after_request(keyword()) :: :ok
- def after_request(_opts), do: :ok
+ @callback get_conn(URI.t(), keyword()) :: {:ok, term()} | {:error, term()}
@spec format_proxy(String.t() | tuple() | nil) :: proxy() | nil
def format_proxy(nil), do: nil
def format_proxy(proxy_url) do
- case Connection.parse_proxy(proxy_url) do
+ case parse_proxy(proxy_url) do
{:ok, host, port} -> {host, port}
{:ok, type, host, port} -> {type, host, port}
_ -> nil
@@ -38,4 +36,105 @@ def format_proxy(proxy_url) do
@spec maybe_add_proxy(keyword(), proxy() | nil) :: keyword()
def maybe_add_proxy(opts, nil), do: opts
def maybe_add_proxy(opts, proxy), do: Keyword.put_new(opts, :proxy, proxy)
+
+ @doc """
+ Merge default connection & adapter options with received ones.
+ """
+
+ @spec options(URI.t(), keyword()) :: keyword()
+ def options(%URI{} = uri, opts \\ []) do
+ @defaults
+ |> put_timeout()
+ |> Keyword.merge(opts)
+ |> adapter_helper().options(uri)
+ end
+
+ # For Hackney, this is the time a connection can stay idle in the pool.
+ # For Gun, this is the timeout to receive a message from Gun.
+ defp put_timeout(opts) do
+ {config_key, default} =
+ if adapter() == Tesla.Adapter.Gun do
+ {:pools, Config.get([:pools, :default, :timeout], 5_000)}
+ else
+ {:hackney_pools, 10_000}
+ end
+
+ timeout = Config.get([config_key, opts[:pool], :timeout], default)
+
+ Keyword.merge(opts, timeout: timeout)
+ end
+
+ def get_conn(uri, opts), do: adapter_helper().get_conn(uri, opts)
+ defp adapter, do: Application.get_env(:tesla, :adapter)
+
+ defp adapter_helper do
+ case adapter() do
+ Tesla.Adapter.Gun -> AdapterHelper.Gun
+ Tesla.Adapter.Hackney -> AdapterHelper.Hackney
+ _ -> AdapterHelper.Default
+ end
+ end
+
+ @spec parse_proxy(String.t() | tuple() | nil) ::
+ {:ok, host(), pos_integer()}
+ | {:ok, proxy_type(), host(), pos_integer()}
+ | {:error, atom()}
+ | nil
+
+ def parse_proxy(nil), do: nil
+
+ def parse_proxy(proxy) when is_binary(proxy) do
+ with [host, port] <- String.split(proxy, ":"),
+ {port, ""} <- Integer.parse(port) do
+ {:ok, parse_host(host), port}
+ else
+ {_, _} ->
+ Logger.warn("Parsing port failed #{inspect(proxy)}")
+ {:error, :invalid_proxy_port}
+
+ :error ->
+ Logger.warn("Parsing port failed #{inspect(proxy)}")
+ {:error, :invalid_proxy_port}
+
+ _ ->
+ Logger.warn("Parsing proxy failed #{inspect(proxy)}")
+ {:error, :invalid_proxy}
+ end
+ end
+
+ def parse_proxy(proxy) when is_tuple(proxy) do
+ with {type, host, port} <- proxy do
+ {:ok, type, parse_host(host), port}
+ else
+ _ ->
+ Logger.warn("Parsing proxy failed #{inspect(proxy)}")
+ {:error, :invalid_proxy}
+ end
+ end
+
+ @spec parse_host(String.t() | atom() | charlist()) :: charlist() | :inet.ip_address()
+ def parse_host(host) when is_list(host), do: host
+ def parse_host(host) when is_atom(host), do: to_charlist(host)
+
+ def parse_host(host) when is_binary(host) do
+ host = to_charlist(host)
+
+ case :inet.parse_address(host) do
+ {:error, :einval} -> host
+ {:ok, ip} -> ip
+ end
+ end
+
+ @spec format_host(String.t()) :: charlist()
+ def format_host(host) do
+ host_charlist = to_charlist(host)
+
+ case :inet.parse_address(host_charlist) do
+ {:error, :einval} ->
+ :idna.encode(host_charlist)
+
+ {:ok, _ip} ->
+ host_charlist
+ end
+ end
end
diff --git a/lib/pleroma/http/adapter_helper/default.ex b/lib/pleroma/http/adapter_helper/default.ex
new file mode 100644
index 000000000..e13441316
--- /dev/null
+++ b/lib/pleroma/http/adapter_helper/default.ex
@@ -0,0 +1,14 @@
+defmodule Pleroma.HTTP.AdapterHelper.Default do
+ alias Pleroma.HTTP.AdapterHelper
+
+ @behaviour Pleroma.HTTP.AdapterHelper
+
+ @spec options(keyword(), URI.t()) :: keyword()
+ def options(opts, _uri) do
+ proxy = Pleroma.Config.get([:http, :proxy_url], nil)
+ AdapterHelper.maybe_add_proxy(opts, AdapterHelper.format_proxy(proxy))
+ end
+
+ @spec get_conn(URI.t(), keyword()) :: {:ok, keyword()}
+ def get_conn(_uri, opts), do: {:ok, opts}
+end
diff --git a/lib/pleroma/http/adapter_helper/gun.ex b/lib/pleroma/http/adapter_helper/gun.ex
index ead7cdc6b..b4ff8306c 100644
--- a/lib/pleroma/http/adapter_helper/gun.ex
+++ b/lib/pleroma/http/adapter_helper/gun.ex
@@ -5,8 +5,8 @@
defmodule Pleroma.HTTP.AdapterHelper.Gun do
@behaviour Pleroma.HTTP.AdapterHelper
+ alias Pleroma.Gun.ConnectionPool
alias Pleroma.HTTP.AdapterHelper
- alias Pleroma.Pool.Connections
require Logger
@@ -14,7 +14,7 @@ defmodule Pleroma.HTTP.AdapterHelper.Gun do
connect_timeout: 5_000,
domain_lookup_timeout: 5_000,
tls_handshake_timeout: 5_000,
- retry: 1,
+ retry: 0,
retry_timeout: 1000,
await_up_timeout: 5_000
]
@@ -31,16 +31,7 @@ def options(incoming_opts \\ [], %URI{} = uri) do
|> Keyword.merge(config_opts)
|> add_scheme_opts(uri)
|> AdapterHelper.maybe_add_proxy(proxy)
- |> maybe_get_conn(uri, incoming_opts)
- end
-
- @spec after_request(keyword()) :: :ok
- def after_request(opts) do
- if opts[:conn] && opts[:body_as] != :chunks do
- Connections.checkout(opts[:conn], self(), :gun_connections)
- end
-
- :ok
+ |> Keyword.merge(incoming_opts)
end
defp add_scheme_opts(opts, %{scheme: "http"}), do: opts
@@ -48,30 +39,40 @@ defp add_scheme_opts(opts, %{scheme: "http"}), do: opts
defp add_scheme_opts(opts, %{scheme: "https"}) do
opts
|> Keyword.put(:certificates_verification, true)
- |> Keyword.put(:tls_opts, log_level: :warning)
end
- defp maybe_get_conn(adapter_opts, uri, incoming_opts) do
- {receive_conn?, opts} =
- adapter_opts
- |> Keyword.merge(incoming_opts)
- |> Keyword.pop(:receive_conn, true)
-
- if Connections.alive?(:gun_connections) and receive_conn? do
- checkin_conn(uri, opts)
- else
- opts
+ @spec get_conn(URI.t(), keyword()) :: {:ok, keyword()} | {:error, atom()}
+ def get_conn(uri, opts) do
+ case ConnectionPool.get_conn(uri, opts) do
+ {:ok, conn_pid} -> {:ok, Keyword.merge(opts, conn: conn_pid, close_conn: false)}
+ err -> err
end
end
- defp checkin_conn(uri, opts) do
- case Connections.checkin(uri, :gun_connections) do
- nil ->
- Task.start(Pleroma.Gun.Conn, :open, [uri, :gun_connections, opts])
- opts
+ @prefix Pleroma.Gun.ConnectionPool
+ def limiter_setup do
+ wait = Pleroma.Config.get([:connections_pool, :connection_acquisition_wait])
+ retries = Pleroma.Config.get([:connections_pool, :connection_acquisition_retries])
- conn when is_pid(conn) ->
- Keyword.merge(opts, conn: conn, close_conn: false)
- end
+ :pools
+ |> Pleroma.Config.get([])
+ |> Enum.each(fn {name, opts} ->
+ max_running = Keyword.get(opts, :size, 50)
+ max_waiting = Keyword.get(opts, :max_waiting, 10)
+
+ result =
+ ConcurrentLimiter.new(:"#{@prefix}.#{name}", max_running, max_waiting,
+ wait: wait,
+ max_retries: retries
+ )
+
+ case result do
+ :ok -> :ok
+ {:error, :existing} -> :ok
+ e -> raise e
+ end
+ end)
+
+ :ok
end
end
diff --git a/lib/pleroma/http/adapter_helper/hackney.ex b/lib/pleroma/http/adapter_helper/hackney.ex
index 3972a03a9..cd569422b 100644
--- a/lib/pleroma/http/adapter_helper/hackney.ex
+++ b/lib/pleroma/http/adapter_helper/hackney.ex
@@ -24,5 +24,6 @@ def options(connection_opts \\ [], %URI{} = uri) do
defp add_scheme_opts(opts, _), do: opts
- def after_request(_), do: :ok
+ @spec get_conn(URI.t(), keyword()) :: {:ok, keyword()}
+ def get_conn(_uri, opts), do: {:ok, opts}
end
diff --git a/lib/pleroma/http/connection.ex b/lib/pleroma/http/connection.ex
deleted file mode 100644
index ebacf7902..000000000
--- a/lib/pleroma/http/connection.ex
+++ /dev/null
@@ -1,124 +0,0 @@
-# Pleroma: A lightweight social networking server
-# Copyright © 2017-2020 Pleroma Authors {{ $t('invites.inviteViaEmailAlert') }} {{ $t('users.passwordResetTokenGenerated') }} {{ passwordResetToken }} {{ $t('users.linkToResetPassword') }}\n {{ passwordResetLink }}\n {{ $t('userProfile.noStatuses') }} {{ $t('userProfile.securitySettings.passwordChangeWarning1') }} {{ $t('userProfile.securitySettings.passwordChangeWarning2') }} Password reset token was generated: {{ passwordResetToken }} You can also use this link to reset password:\n {{ passwordResetLink }}\n {{ $t('emoji.selectLocalPack') }} {{ $t('emoji.specifyShortcode') }} {{ $t('emoji.specifyFilename') }} or \n {{ $t('emoji.thisWillDownload') }} \"{{ name }}\" {{ $t('emoji.downloadToCurrentInstance') }}\n \"{{ downloadSharedAs.trim() === '' ? name : downloadSharedAs }}\" ({{ $t('emoji.canBeChanged') }}).\n {{ $t('emoji.willBeUsable') }}.\n "+e+" "+e+" An error occurred: ' + text + ' An error occurred: ' + text + ' {{ $t('userProfile.securitySettings.passwordChangeWarning1') }} {{ $t('userProfile.securitySettings.passwordChangeWarning2') }} {{ $t('users.passwordResetTokenGenerated') }} {{ passwordResetToken }} {{ $t('users.linkToResetPassword') }}\n {{ passwordResetLink }}\n {{ $t('users.passwordResetTokenGenerated') }} {{ passwordResetToken }} {{ $t('users.linkToResetPassword') }}\n {{ passwordResetLink }}\n {{ $t('userProfile.noStatuses') }} {{ $t('invites.inviteViaEmailAlert') }} {{ $t('emoji.selectLocalPack') }} {{ $t('emoji.specifyShortcode') }} {{ $t('emoji.specifyFilename') }} or \n {{ $t('emoji.thisWillDownload') }} \"{{ name }}\" {{ $t('emoji.downloadToCurrentInstance') }}\n \"{{ downloadSharedAs.trim() === '' ? name : downloadSharedAs }}\" ({{ $t('emoji.canBeChanged') }}).\n {{ $t('emoji.willBeUsable') }}.\n Password reset token was generated: {{ passwordResetToken }} You can also use this link to reset password:\n {{ passwordResetLink }}\n {{ $t('userProfile.noStatuses') }} \n {{ importFailedText }}\n \n
\n
\n
\n
\n
\n
\n
\n
{{ $t('invites.inviteTokens') }}
\n \n {{ $t('users.users') }}\n ({{ normalizedUsersCount }})\n
\n {{ $t('moderationLog.moderationLog') }}
\n \n {{ $t('statuses.statuses') }}\n
\n {{ user.display_name }}
\n {{ user.display_name }}
\n \n
\n \n
\n
\n \n
{{ $t('reports.statusDeleted') }}
\n \n
\n
{{ $t('emoji.emojiPacks') }}
\n {{ $t('settings.settings') }}
\n {{ $t('settings.settings') }}
\n \n {{ $t('login.title') }}\n
\n \n {{ $t('login.title') }}\n
\n "+p(e.message+"",!0)+"
";throw e}}f.options=f.setOptions=function(e){return h(f.defaults,e),g(f.defaults),f},f.getDefaults=u,f.defaults=d,f.Parser=r,f.parser=r.parse,f.Renderer=i,f.TextRenderer=l,f.Lexer=n,f.lexer=n.lex,f.InlineLexer=a,f.inlineLexer=a.output,f.Slugger=o,f.parse=f,e.exports=f},"66f7":function(e,t,s){const n=s("SbYC"),r=s("J7Ao"),i=s("hyX7"),l=s("+/fp"),{defaults:a}=s("vbtb"),{merge:o,unescape:h}=s("rUJ1");e.exports=class e{constructor(e){this.tokens=[],this.token=null,this.options=e||a,this.options.renderer=this.options.renderer||new n,this.renderer=this.options.renderer,this.renderer.options=this.options,this.slugger=new r}static parse(t,s){return new e(s).parse(t)}parse(e){this.inline=new i(e.links,this.options),this.inlineText=new i(e.links,o({},this.options,{renderer:new l})),this.tokens=e.reverse();let t="";for(;this.next();)t+=this.tok();return t}next(){return this.token=this.tokens.pop(),this.token}peek(){return this.tokens[this.tokens.length-1]||0}parseText(){let e=this.token.text;for(;"text"===this.peek().type;)e+="\n"+this.next().text;return this.inline.output(e)}tok(){let e="";switch(this.token.type){case"space":return"";case"hr":return this.renderer.hr();case"heading":return this.renderer.heading(this.inline.output(this.token.text),this.token.depth,h(this.inlineText.output(this.token.text)),this.slugger);case"code":return this.renderer.code(this.token.text,this.token.lang,this.token.escaped);case"table":{let t,s,n,r,i="";for(n="",t=0;t
\n":"'+(s?e:i(e,!0))+"
"}blockquote(e){return""+(s?e:i(e,!0))+"
\n"+e+"
\n"}html(e){return e}heading(e,t,s,n){return this.options.headerIds?"
\n":"
\n"}list(e,t,s){const n=t?"ol":"ul";return"<"+n+(t&&1!==s?' start="'+s+'"':"")+">\n"+e+""+n+">\n"}listitem(e){return"\n\n"+e+"\n"+t+"
\n"}tablerow(e){return"\n"+e+" \n"}tablecell(e,t){const s=t.header?"th":"td";return(t.align?"<"+s+' align="'+t.align+'">':"<"+s+">")+e+""+s+">\n"}strong(e){return""+e+""}em(e){return""+e+""}codespan(e){return""+e+"
"}br(){return this.options.xhtml?"
":"
"}del(e){return""+e+""}link(e,t,s){if(null===(e=r(this.options.sanitize,this.options.baseUrl,e)))return s;let n='"+s+""}image(e,t,s){if(null===(e=r(this.options.sanitize,this.options.baseUrl,e)))return s;let n='":">"}text(e){return e}}},T9Ld:function(e,t,s){const{defaults:n}=s("vbtb"),{block:r}=s("e56X"),{rtrim:i,splitCells:l,escape:a}=s("rUJ1");e.exports=class e{constructor(e){this.tokens=[],this.tokens.links=Object.create(null),this.options=e||n,this.rules=r.normal,this.options.pedantic?this.rules=r.pedantic:this.options.gfm&&(this.rules=r.gfm)}static get rules(){return r}static lex(t,s){return new e(s).lex(t)}lex(e){return e=e.replace(/\r\n|\r/g,"\n").replace(/\t/g," "),this.token(e,!0)}token(e,t){let s,n,o,h,c,p,u,g,d,f,b,k,m,x,_,w;for(e=e.replace(/^ +$/gm,"");e;)if((o=this.rules.newline.exec(e))&&(e=e.substring(o[0].length),o[0].length>1&&this.tokens.push({type:"space"})),o=this.rules.code.exec(e)){const t=this.tokens[this.tokens.length-1];e=e.substring(o[0].length),t&&"paragraph"===t.type?t.text+="\n"+o[0].trimRight():(o=o[0].replace(/^ {4}/gm,""),this.tokens.push({type:"code",codeBlockStyle:"indented",text:this.options.pedantic?o:i(o,"\n")}))}else if(o=this.rules.fences.exec(e))e=e.substring(o[0].length),this.tokens.push({type:"code",lang:o[2]?o[2].trim():o[2],text:o[3]||""});else if(o=this.rules.heading.exec(e))e=e.substring(o[0].length),this.tokens.push({type:"heading",depth:o[1].length,text:o[2]});else if((o=this.rules.nptable.exec(e))&&(p={type:"table",header:l(o[1].replace(/^ *| *\| *$/g,"")),align:o[2].replace(/^ *|\| *$/g,"").split(/ *\| */),cells:o[3]?o[3].replace(/\n$/,"").split("\n"):[]}).header.length===p.align.length){for(e=e.substring(o[0].length),b=0;b
"+p(e.message+"",!0)+"
";throw e}}f.options=f.setOptions=function(e){return h(f.defaults,e),g(f.defaults),f},f.getDefaults=u,f.defaults=d,f.Parser=r,f.parser=r.parse,f.Renderer=i,f.TextRenderer=l,f.Lexer=n,f.lexer=n.lex,f.InlineLexer=a,f.inlineLexer=a.output,f.Slugger=o,f.parse=f,e.exports=f},"66f7":function(e,t,s){const n=s("SbYC"),r=s("J7Ao"),i=s("hyX7"),l=s("+/fp"),{defaults:a}=s("vbtb"),{merge:o,unescape:h}=s("rUJ1");e.exports=class e{constructor(e){this.tokens=[],this.token=null,this.options=e||a,this.options.renderer=this.options.renderer||new n,this.renderer=this.options.renderer,this.renderer.options=this.options,this.slugger=new r}static parse(t,s){return new e(s).parse(t)}parse(e){this.inline=new i(e.links,this.options),this.inlineText=new i(e.links,o({},this.options,{renderer:new l})),this.tokens=e.reverse();let t="";for(;this.next();)t+=this.tok();return t}next(){return this.token=this.tokens.pop(),this.token}peek(){return this.tokens[this.tokens.length-1]||0}parseText(){let e=this.token.text;for(;"text"===this.peek().type;)e+="\n"+this.next().text;return this.inline.output(e)}tok(){let e="";switch(this.token.type){case"space":return"";case"hr":return this.renderer.hr();case"heading":return this.renderer.heading(this.inline.output(this.token.text),this.token.depth,h(this.inlineText.output(this.token.text)),this.slugger);case"code":return this.renderer.code(this.token.text,this.token.lang,this.token.escaped);case"table":{let t,s,n,r,i="";for(n="",t=0;t
\n":"'+(s?e:i(e,!0))+"
"}blockquote(e){return""+(s?e:i(e,!0))+"
\n"+e+"
\n"}html(e){return e}heading(e,t,s,n){return this.options.headerIds?"
\n":"
\n"}list(e,t,s){const n=t?"ol":"ul";return"<"+n+(t&&1!==s?' start="'+s+'"':"")+">\n"+e+""+n+">\n"}listitem(e){return"\n\n"+e+"\n"+t+"
\n"}tablerow(e){return"\n"+e+" \n"}tablecell(e,t){const s=t.header?"th":"td";return(t.align?"<"+s+' align="'+t.align+'">':"<"+s+">")+e+""+s+">\n"}strong(e){return""+e+""}em(e){return""+e+""}codespan(e){return""+e+"
"}br(){return this.options.xhtml?"
":"
"}del(e){return""+e+""}link(e,t,s){if(null===(e=r(this.options.sanitize,this.options.baseUrl,e)))return s;let n='"+s+""}image(e,t,s){if(null===(e=r(this.options.sanitize,this.options.baseUrl,e)))return s;let n='":">"}text(e){return e}}},T9Ld:function(e,t,s){const{defaults:n}=s("vbtb"),{block:r}=s("e56X"),{rtrim:i,splitCells:l,escape:a}=s("rUJ1");e.exports=class e{constructor(e){this.tokens=[],this.tokens.links=Object.create(null),this.options=e||n,this.rules=r.normal,this.options.pedantic?this.rules=r.pedantic:this.options.gfm&&(this.rules=r.gfm)}static get rules(){return r}static lex(t,s){return new e(s).lex(t)}lex(e){return e=e.replace(/\r\n|\r/g,"\n").replace(/\t/g," "),this.token(e,!0)}token(e,t){let s,n,o,h,c,p,u,g,d,f,b,k,m,x,_,w;for(e=e.replace(/^ +$/gm,"");e;)if((o=this.rules.newline.exec(e))&&(e=e.substring(o[0].length),o[0].length>1&&this.tokens.push({type:"space"})),o=this.rules.code.exec(e)){const t=this.tokens[this.tokens.length-1];e=e.substring(o[0].length),t&&"paragraph"===t.type?t.text+="\n"+o[0].trimRight():(o=o[0].replace(/^ {4}/gm,""),this.tokens.push({type:"code",codeBlockStyle:"indented",text:this.options.pedantic?o:i(o,"\n")}))}else if(o=this.rules.fences.exec(e))e=e.substring(o[0].length),this.tokens.push({type:"code",lang:o[2]?o[2].trim():o[2],text:o[3]||""});else if(o=this.rules.heading.exec(e))e=e.substring(o[0].length),this.tokens.push({type:"heading",depth:o[1].length,text:o[2]});else if((o=this.rules.nptable.exec(e))&&(p={type:"table",header:l(o[1].replace(/^ *| *\| *$/g,"")),align:o[2].replace(/^ *|\| *$/g,"").split(/ *\| */),cells:o[3]?o[3].replace(/\n$/,"").split("\n"):[]}).header.length===p.align.length){for(e=e.substring(o[0].length),b=0;b
'\n + escape(e.message + '', true)\n + '
';\n }\n throw e;\n }\n}\n\n/**\n * Options\n */\n\nmarked.options =\nmarked.setOptions = function(opt) {\n merge(marked.defaults, opt);\n changeDefaults(marked.defaults);\n return marked;\n};\n\nmarked.getDefaults = getDefaults;\n\nmarked.defaults = defaults;\n\n/**\n * Expose\n */\n\nmarked.Parser = Parser;\nmarked.parser = Parser.parse;\n\nmarked.Renderer = Renderer;\nmarked.TextRenderer = TextRenderer;\n\nmarked.Lexer = Lexer;\nmarked.lexer = Lexer.lex;\n\nmarked.InlineLexer = InlineLexer;\nmarked.inlineLexer = InlineLexer.output;\n\nmarked.Slugger = Slugger;\n\nmarked.parse = marked;\n\nmodule.exports = marked;\n","const Renderer = require('./Renderer.js');\nconst Slugger = require('./Slugger.js');\nconst InlineLexer = require('./InlineLexer.js');\nconst TextRenderer = require('./TextRenderer.js');\nconst { defaults } = require('./defaults.js');\nconst {\n merge,\n unescape\n} = require('./helpers.js');\n\n/**\n * Parsing & Compiling\n */\nmodule.exports = class Parser {\n constructor(options) {\n this.tokens = [];\n this.token = null;\n this.options = options || defaults;\n this.options.renderer = this.options.renderer || new Renderer();\n this.renderer = this.options.renderer;\n this.renderer.options = this.options;\n this.slugger = new Slugger();\n }\n\n /**\n * Static Parse Method\n */\n static parse(tokens, options) {\n const parser = new Parser(options);\n return parser.parse(tokens);\n };\n\n /**\n * Parse Loop\n */\n parse(tokens) {\n this.inline = new InlineLexer(tokens.links, this.options);\n // use an InlineLexer with a TextRenderer to extract pure text\n this.inlineText = new InlineLexer(\n tokens.links,\n merge({}, this.options, { renderer: new TextRenderer() })\n );\n this.tokens = tokens.reverse();\n\n let out = '';\n while (this.next()) {\n out += this.tok();\n }\n\n return out;\n };\n\n /**\n * Next Token\n */\n next() {\n this.token = this.tokens.pop();\n return this.token;\n };\n\n /**\n * Preview Next Token\n */\n peek() {\n return this.tokens[this.tokens.length - 1] || 0;\n };\n\n /**\n * Parse Text Tokens\n */\n parseText() {\n let body = this.token.text;\n\n while (this.peek().type === 'text') {\n body += '\\n' + this.next().text;\n }\n\n return this.inline.output(body);\n };\n\n /**\n * Parse Current Token\n */\n tok() {\n let body = '';\n switch (this.token.type) {\n case 'space': {\n return '';\n }\n case 'hr': {\n return this.renderer.hr();\n }\n case 'heading': {\n return this.renderer.heading(\n this.inline.output(this.token.text),\n this.token.depth,\n unescape(this.inlineText.output(this.token.text)),\n this.slugger);\n }\n case 'code': {\n return this.renderer.code(this.token.text,\n this.token.lang,\n this.token.escaped);\n }\n case 'table': {\n let header = '',\n i,\n row,\n cell,\n j;\n\n // header\n cell = '';\n for (i = 0; i < this.token.header.length; i++) {\n cell += this.renderer.tablecell(\n this.inline.output(this.token.header[i]),\n { header: true, align: this.token.align[i] }\n );\n }\n header += this.renderer.tablerow(cell);\n\n for (i = 0; i < this.token.cells.length; i++) {\n row = this.token.cells[i];\n\n cell = '';\n for (j = 0; j < row.length; j++) {\n cell += this.renderer.tablecell(\n this.inline.output(row[j]),\n { header: false, align: this.token.align[j] }\n );\n }\n\n body += this.renderer.tablerow(cell);\n }\n return this.renderer.table(header, body);\n }\n case 'blockquote_start': {\n body = '';\n\n while (this.next().type !== 'blockquote_end') {\n body += this.tok();\n }\n\n return this.renderer.blockquote(body);\n }\n case 'list_start': {\n body = '';\n const ordered = this.token.ordered,\n start = this.token.start;\n\n while (this.next().type !== 'list_end') {\n body += this.tok();\n }\n\n return this.renderer.list(body, ordered, start);\n }\n case 'list_item_start': {\n body = '';\n const loose = this.token.loose;\n const checked = this.token.checked;\n const task = this.token.task;\n\n if (this.token.task) {\n if (loose) {\n if (this.peek().type === 'text') {\n const nextToken = this.peek();\n nextToken.text = this.renderer.checkbox(checked) + ' ' + nextToken.text;\n } else {\n this.tokens.push({\n type: 'text',\n text: this.renderer.checkbox(checked)\n });\n }\n } else {\n body += this.renderer.checkbox(checked);\n }\n }\n\n while (this.next().type !== 'list_item_end') {\n body += !loose && this.token.type === 'text'\n ? this.parseText()\n : this.tok();\n }\n return this.renderer.listitem(body, task, checked);\n }\n case 'html': {\n // TODO parse inline content if parameter markdown=1\n return this.renderer.html(this.token.text);\n }\n case 'paragraph': {\n return this.renderer.paragraph(this.inline.output(this.token.text));\n }\n case 'text': {\n return this.renderer.paragraph(this.parseText());\n }\n default: {\n const errMsg = 'Token with \"' + this.token.type + '\" type was not found.';\n if (this.options.silent) {\n console.log(errMsg);\n } else {\n throw new Error(errMsg);\n }\n }\n }\n };\n};\n","/**\n * Slugger generates header id\n */\nmodule.exports = class Slugger {\n constructor() {\n this.seen = {};\n }\n\n /**\n * Convert string to unique id\n */\n slug(value) {\n let slug = value\n .toLowerCase()\n .trim()\n .replace(/[\\u2000-\\u206F\\u2E00-\\u2E7F\\\\'!\"#$%&()*+,./:;<=>?@[\\]^`{|}~]/g, '')\n .replace(/\\s/g, '-');\n\n if (this.seen.hasOwnProperty(slug)) {\n const originalSlug = slug;\n do {\n this.seen[originalSlug]++;\n slug = originalSlug + '-' + this.seen[originalSlug];\n } while (this.seen.hasOwnProperty(slug));\n }\n this.seen[slug] = 0;\n\n return slug;\n };\n};\n","const { defaults } = require('./defaults.js');\nconst {\n cleanUrl,\n escape\n} = require('./helpers.js');\n\n/**\n * Renderer\n */\nmodule.exports = class Renderer {\n constructor(options) {\n this.options = options || defaults;\n }\n\n code(code, infostring, escaped) {\n const lang = (infostring || '').match(/\\S*/)[0];\n if (this.options.highlight) {\n const out = this.options.highlight(code, lang);\n if (out != null && out !== code) {\n escaped = true;\n code = out;\n }\n }\n\n if (!lang) {\n return '
';\n }\n\n return ''\n + (escaped ? code : escape(code, true))\n + '
\\n';\n };\n\n blockquote(quote) {\n return ''\n + (escaped ? code : escape(code, true))\n + '
\\n' + quote + '
\\n';\n };\n\n html(html) {\n return html;\n };\n\n heading(text, level, raw, slugger) {\n if (this.options.headerIds) {\n return '
\\n' : '
\\n';\n };\n\n list(body, ordered, start) {\n const type = ordered ? 'ol' : 'ul',\n startatt = (ordered && start !== 1) ? (' start=\"' + start + '\"') : '';\n return '<' + type + startatt + '>\\n' + body + '' + type + '>\\n';\n };\n\n listitem(text) {\n return '\\n'\n + '\\n'\n + header\n + '\\n'\n + body\n + '
\\n';\n };\n\n tablerow(content) {\n return '\\n' + content + ' \\n';\n };\n\n tablecell(content, flags) {\n const type = flags.header ? 'th' : 'td';\n const tag = flags.align\n ? '<' + type + ' align=\"' + flags.align + '\">'\n : '<' + type + '>';\n return tag + content + '' + type + '>\\n';\n };\n\n // span level renderer\n strong(text) {\n return '' + text + '';\n };\n\n em(text) {\n return '' + text + '';\n };\n\n codespan(text) {\n return '' + text + '
';\n };\n\n br() {\n return this.options.xhtml ? '
' : '
';\n };\n\n del(text) {\n return '' + text + '';\n };\n\n link(href, title, text) {\n href = cleanUrl(this.options.sanitize, this.options.baseUrl, href);\n if (href === null) {\n return text;\n }\n let out = '' + text + '';\n return out;\n };\n\n image(href, title, text) {\n href = cleanUrl(this.options.sanitize, this.options.baseUrl, href);\n if (href === null) {\n return text;\n }\n\n let out = '' : '>';\n return out;\n };\n\n text(text) {\n return text;\n };\n};\n","const { defaults } = require('./defaults.js');\nconst { block } = require('./rules.js');\nconst {\n rtrim,\n splitCells,\n escape\n} = require('./helpers.js');\n\n/**\n * Block Lexer\n */\nmodule.exports = class Lexer {\n constructor(options) {\n this.tokens = [];\n this.tokens.links = Object.create(null);\n this.options = options || defaults;\n this.rules = block.normal;\n\n if (this.options.pedantic) {\n this.rules = block.pedantic;\n } else if (this.options.gfm) {\n this.rules = block.gfm;\n }\n }\n\n /**\n * Expose Block Rules\n */\n static get rules() {\n return block;\n }\n\n /**\n * Static Lex Method\n */\n static lex(src, options) {\n const lexer = new Lexer(options);\n return lexer.lex(src);\n };\n\n /**\n * Preprocessing\n */\n lex(src) {\n src = src\n .replace(/\\r\\n|\\r/g, '\\n')\n .replace(/\\t/g, ' ');\n\n return this.token(src, true);\n };\n\n /**\n * Lexing\n */\n token(src, top) {\n src = src.replace(/^ +$/gm, '');\n let next,\n loose,\n cap,\n bull,\n b,\n item,\n listStart,\n listItems,\n t,\n space,\n i,\n tag,\n l,\n isordered,\n istask,\n ischecked;\n\n while (src) {\n // newline\n if (cap = this.rules.newline.exec(src)) {\n src = src.substring(cap[0].length);\n if (cap[0].length > 1) {\n this.tokens.push({\n type: 'space'\n });\n }\n }\n\n // code\n if (cap = this.rules.code.exec(src)) {\n const lastToken = this.tokens[this.tokens.length - 1];\n src = src.substring(cap[0].length);\n // An indented code block cannot interrupt a paragraph.\n if (lastToken && lastToken.type === 'paragraph') {\n lastToken.text += '\\n' + cap[0].trimRight();\n } else {\n cap = cap[0].replace(/^ {4}/gm, '');\n this.tokens.push({\n type: 'code',\n codeBlockStyle: 'indented',\n text: !this.options.pedantic\n ? rtrim(cap, '\\n')\n : cap\n });\n }\n continue;\n }\n\n // fences\n if (cap = this.rules.fences.exec(src)) {\n src = src.substring(cap[0].length);\n this.tokens.push({\n type: 'code',\n lang: cap[2] ? cap[2].trim() : cap[2],\n text: cap[3] || ''\n });\n continue;\n }\n\n // heading\n if (cap = this.rules.heading.exec(src)) {\n src = src.substring(cap[0].length);\n this.tokens.push({\n type: 'heading',\n depth: cap[1].length,\n text: cap[2]\n });\n continue;\n }\n\n // table no leading pipe (gfm)\n if (cap = this.rules.nptable.exec(src)) {\n item = {\n type: 'table',\n header: splitCells(cap[1].replace(/^ *| *\\| *$/g, '')),\n align: cap[2].replace(/^ *|\\| *$/g, '').split(/ *\\| */),\n cells: cap[3] ? cap[3].replace(/\\n$/, '').split('\\n') : []\n };\n\n if (item.header.length === item.align.length) {\n src = src.substring(cap[0].length);\n\n for (i = 0; i < item.align.length; i++) {\n if (/^ *-+: *$/.test(item.align[i])) {\n item.align[i] = 'right';\n } else if (/^ *:-+: *$/.test(item.align[i])) {\n item.align[i] = 'center';\n } else if (/^ *:-+ *$/.test(item.align[i])) {\n item.align[i] = 'left';\n } else {\n item.align[i] = null;\n }\n }\n\n for (i = 0; i < item.cells.length; i++) {\n item.cells[i] = splitCells(item.cells[i], item.header.length);\n }\n\n this.tokens.push(item);\n\n continue;\n }\n }\n\n // hr\n if (cap = this.rules.hr.exec(src)) {\n src = src.substring(cap[0].length);\n this.tokens.push({\n type: 'hr'\n });\n continue;\n }\n\n // blockquote\n if (cap = this.rules.blockquote.exec(src)) {\n src = src.substring(cap[0].length);\n\n this.tokens.push({\n type: 'blockquote_start'\n });\n\n cap = cap[0].replace(/^ *> ?/gm, '');\n\n // Pass `top` to keep the current\n // \"toplevel\" state. This is exactly\n // how markdown.pl works.\n this.token(cap, top);\n\n this.tokens.push({\n type: 'blockquote_end'\n });\n\n continue;\n }\n\n // list\n if (cap = this.rules.list.exec(src)) {\n src = src.substring(cap[0].length);\n bull = cap[2];\n isordered = bull.length > 1;\n\n listStart = {\n type: 'list_start',\n ordered: isordered,\n start: isordered ? +bull : '',\n loose: false\n };\n\n this.tokens.push(listStart);\n\n // Get each top-level item.\n cap = cap[0].match(this.rules.item);\n\n listItems = [];\n next = false;\n l = cap.length;\n i = 0;\n\n for (; i < l; i++) {\n item = cap[i];\n\n // Remove the list item's bullet\n // so it is seen as the next token.\n space = item.length;\n item = item.replace(/^ *([*+-]|\\d+\\.) */, '');\n\n // Outdent whatever the\n // list item contains. Hacky.\n if (~item.indexOf('\\n ')) {\n space -= item.length;\n item = !this.options.pedantic\n ? item.replace(new RegExp('^ {1,' + space + '}', 'gm'), '')\n : item.replace(/^ {1,4}/gm, '');\n }\n\n // Determine whether the next list item belongs here.\n // Backpedal if it does not belong in this list.\n if (i !== l - 1) {\n b = block.bullet.exec(cap[i + 1])[0];\n if (bull.length > 1 ? b.length === 1\n : (b.length > 1 || (this.options.smartLists && b !== bull))) {\n src = cap.slice(i + 1).join('\\n') + src;\n i = l - 1;\n }\n }\n\n // Determine whether item is loose or not.\n // Use: /(^|\\n)(?! )[^\\n]+\\n\\n(?!\\s*$)/\n // for discount behavior.\n loose = next || /\\n\\n(?!\\s*$)/.test(item);\n if (i !== l - 1) {\n next = item.charAt(item.length - 1) === '\\n';\n if (!loose) loose = next;\n }\n\n if (loose) {\n listStart.loose = true;\n }\n\n // Check for task list items\n istask = /^\\[[ xX]\\] /.test(item);\n ischecked = undefined;\n if (istask) {\n ischecked = item[1] !== ' ';\n item = item.replace(/^\\[[ xX]\\] +/, '');\n }\n\n t = {\n type: 'list_item_start',\n task: istask,\n checked: ischecked,\n loose: loose\n };\n\n listItems.push(t);\n this.tokens.push(t);\n\n // Recurse.\n this.token(item, false);\n\n this.tokens.push({\n type: 'list_item_end'\n });\n }\n\n if (listStart.loose) {\n l = listItems.length;\n i = 0;\n for (; i < l; i++) {\n listItems[i].loose = true;\n }\n }\n\n this.tokens.push({\n type: 'list_end'\n });\n\n continue;\n }\n\n // html\n if (cap = this.rules.html.exec(src)) {\n src = src.substring(cap[0].length);\n this.tokens.push({\n type: this.options.sanitize\n ? 'paragraph'\n : 'html',\n pre: !this.options.sanitizer\n && (cap[1] === 'pre' || cap[1] === 'script' || cap[1] === 'style'),\n text: this.options.sanitize ? (this.options.sanitizer ? this.options.sanitizer(cap[0]) : escape(cap[0])) : cap[0]\n });\n continue;\n }\n\n // def\n if (top && (cap = this.rules.def.exec(src))) {\n src = src.substring(cap[0].length);\n if (cap[3]) cap[3] = cap[3].substring(1, cap[3].length - 1);\n tag = cap[1].toLowerCase().replace(/\\s+/g, ' ');\n if (!this.tokens.links[tag]) {\n this.tokens.links[tag] = {\n href: cap[2],\n title: cap[3]\n };\n }\n continue;\n }\n\n // table (gfm)\n if (cap = this.rules.table.exec(src)) {\n item = {\n type: 'table',\n header: splitCells(cap[1].replace(/^ *| *\\| *$/g, '')),\n align: cap[2].replace(/^ *|\\| *$/g, '').split(/ *\\| */),\n cells: cap[3] ? cap[3].replace(/\\n$/, '').split('\\n') : []\n };\n\n if (item.header.length === item.align.length) {\n src = src.substring(cap[0].length);\n\n for (i = 0; i < item.align.length; i++) {\n if (/^ *-+: *$/.test(item.align[i])) {\n item.align[i] = 'right';\n } else if (/^ *:-+: *$/.test(item.align[i])) {\n item.align[i] = 'center';\n } else if (/^ *:-+ *$/.test(item.align[i])) {\n item.align[i] = 'left';\n } else {\n item.align[i] = null;\n }\n }\n\n for (i = 0; i < item.cells.length; i++) {\n item.cells[i] = splitCells(\n item.cells[i].replace(/^ *\\| *| *\\| *$/g, ''),\n item.header.length);\n }\n\n this.tokens.push(item);\n\n continue;\n }\n }\n\n // lheading\n if (cap = this.rules.lheading.exec(src)) {\n src = src.substring(cap[0].length);\n this.tokens.push({\n type: 'heading',\n depth: cap[2].charAt(0) === '=' ? 1 : 2,\n text: cap[1]\n });\n continue;\n }\n\n // top-level paragraph\n if (top && (cap = this.rules.paragraph.exec(src))) {\n src = src.substring(cap[0].length);\n this.tokens.push({\n type: 'paragraph',\n text: cap[1].charAt(cap[1].length - 1) === '\\n'\n ? cap[1].slice(0, -1)\n : cap[1]\n });\n continue;\n }\n\n // text\n if (cap = this.rules.text.exec(src)) {\n // Top-level should never reach here.\n src = src.substring(cap[0].length);\n this.tokens.push({\n type: 'text',\n text: cap[0]\n });\n continue;\n }\n\n if (src) {\n throw new Error('Infinite loop on byte: ' + src.charCodeAt(0));\n }\n }\n\n return this.tokens;\n };\n};\n","const {\n noopTest,\n edit,\n merge\n} = require('./helpers.js');\n\n/**\n * Block-Level Grammar\n */\nconst block = {\n newline: /^\\n+/,\n code: /^( {4}[^\\n]+\\n*)+/,\n fences: /^ {0,3}(`{3,}|~{3,})([^`~\\n]*)\\n(?:|([\\s\\S]*?)\\n)(?: {0,3}\\1[~`]* *(?:\\n+|$)|$)/,\n hr: /^ {0,3}((?:- *){3,}|(?:_ *){3,}|(?:\\* *){3,})(?:\\n+|$)/,\n heading: /^ {0,3}(#{1,6}) +([^\\n]*?)(?: +#+)? *(?:\\n+|$)/,\n blockquote: /^( {0,3}> ?(paragraph|[^\\n]*)(?:\\n|$))+/,\n list: /^( {0,3})(bull) [\\s\\S]+?(?:hr|def|\\n{2,}(?! )(?!\\1bull )\\n*|\\s*$)/,\n html: '^ {0,3}(?:' // optional indentation\n + '<(script|pre|style)[\\\\s>][\\\\s\\\\S]*?(?:\\\\1>[^\\\\n]*\\\\n+|$)' // (1)\n + '|comment[^\\\\n]*(\\\\n+|$)' // (2)\n + '|<\\\\?[\\\\s\\\\S]*?\\\\?>\\\\n*' // (3)\n + '|\\\\n*' // (4)\n + '|\\\\n*' // (5)\n + '|?(tag)(?: +|\\\\n|/?>)[\\\\s\\\\S]*?(?:\\\\n{2,}|$)' // (6)\n + '|<(?!script|pre|style)([a-z][\\\\w-]*)(?:attribute)*? */?>(?=[ \\\\t]*(?:\\\\n|$))[\\\\s\\\\S]*?(?:\\\\n{2,}|$)' // (7) open tag\n + '|(?!script|pre|style)[a-z][\\\\w-]*\\\\s*>(?=[ \\\\t]*(?:\\\\n|$))[\\\\s\\\\S]*?(?:\\\\n{2,}|$)' // (7) closing tag\n + ')',\n def: /^ {0,3}\\[(label)\\]: *\\n? *([^\\s>]+)>?(?:(?: +\\n? *| *\\n *)(title))? *(?:\\n+|$)/,\n nptable: noopTest,\n table: noopTest,\n lheading: /^([^\\n]+)\\n {0,3}(=+|-+) *(?:\\n+|$)/,\n // regex template, placeholders will be replaced according to different paragraph\n // interruption rules of commonmark and the original markdown spec:\n _paragraph: /^([^\\n]+(?:\\n(?!hr|heading|lheading|blockquote|fences|list|html)[^\\n]+)*)/,\n text: /^[^\\n]+/\n};\n\nblock._label = /(?!\\s*\\])(?:\\\\[\\[\\]]|[^\\[\\]])+/;\nblock._title = /(?:\"(?:\\\\\"?|[^\"\\\\])*\"|'[^'\\n]*(?:\\n[^'\\n]+)*\\n?'|\\([^()]*\\))/;\nblock.def = edit(block.def)\n .replace('label', block._label)\n .replace('title', block._title)\n .getRegex();\n\nblock.bullet = /(?:[*+-]|\\d{1,9}\\.)/;\nblock.item = /^( *)(bull) ?[^\\n]*(?:\\n(?!\\1bull ?)[^\\n]*)*/;\nblock.item = edit(block.item, 'gm')\n .replace(/bull/g, block.bullet)\n .getRegex();\n\nblock.list = edit(block.list)\n .replace(/bull/g, block.bullet)\n .replace('hr', '\\\\n+(?=\\\\1?(?:(?:- *){3,}|(?:_ *){3,}|(?:\\\\* *){3,})(?:\\\\n+|$))')\n .replace('def', '\\\\n+(?=' + block.def.source + ')')\n .getRegex();\n\nblock._tag = 'address|article|aside|base|basefont|blockquote|body|caption'\n + '|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption'\n + '|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe'\n + '|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option'\n + '|p|param|section|source|summary|table|tbody|td|tfoot|th|thead|title|tr'\n + '|track|ul';\nblock._comment = //;\nblock.html = edit(block.html, 'i')\n .replace('comment', block._comment)\n .replace('tag', block._tag)\n .replace('attribute', / +[a-zA-Z:_][\\w.:-]*(?: *= *\"[^\"\\n]*\"| *= *'[^'\\n]*'| *= *[^\\s\"'=<>`]+)?/)\n .getRegex();\n\nblock.paragraph = edit(block._paragraph)\n .replace('hr', block.hr)\n .replace('heading', ' {0,3}#{1,6} +')\n .replace('|lheading', '') // setex headings don't interrupt commonmark paragraphs\n .replace('blockquote', ' {0,3}>')\n .replace('fences', ' {0,3}(?:`{3,}|~{3,})[^`\\\\n]*\\\\n')\n .replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt\n .replace('html', '?(?:tag)(?: +|\\\\n|/?>)|<(?:script|pre|style|!--)')\n .replace('tag', block._tag) // pars can be interrupted by type (6) html blocks\n .getRegex();\n\nblock.blockquote = edit(block.blockquote)\n .replace('paragraph', block.paragraph)\n .getRegex();\n\n/**\n * Normal Block Grammar\n */\n\nblock.normal = merge({}, block);\n\n/**\n * GFM Block Grammar\n */\n\nblock.gfm = merge({}, block.normal, {\n nptable: /^ *([^|\\n ].*\\|.*)\\n *([-:]+ *\\|[-| :]*)(?:\\n((?:.*[^>\\n ].*(?:\\n|$))*)\\n*|$)/,\n table: /^ *\\|(.+)\\n *\\|?( *[-:]+[-| :]*)(?:\\n((?: *[^>\\n ].*(?:\\n|$))*)\\n*|$)/\n});\n\n/**\n * Pedantic grammar (original John Gruber's loose markdown specification)\n */\n\nblock.pedantic = merge({}, block.normal, {\n html: edit(\n '^ *(?:comment *(?:\\\\n|\\\\s*$)'\n + '|<(tag)[\\\\s\\\\S]+?\\\\1> *(?:\\\\n{2,}|\\\\s*$)' // closed tag\n + '|
'\n + escape(e.message + '', true)\n + '
';\n }\n throw e;\n }\n}\n\n/**\n * Options\n */\n\nmarked.options =\nmarked.setOptions = function(opt) {\n merge(marked.defaults, opt);\n changeDefaults(marked.defaults);\n return marked;\n};\n\nmarked.getDefaults = getDefaults;\n\nmarked.defaults = defaults;\n\n/**\n * Expose\n */\n\nmarked.Parser = Parser;\nmarked.parser = Parser.parse;\n\nmarked.Renderer = Renderer;\nmarked.TextRenderer = TextRenderer;\n\nmarked.Lexer = Lexer;\nmarked.lexer = Lexer.lex;\n\nmarked.InlineLexer = InlineLexer;\nmarked.inlineLexer = InlineLexer.output;\n\nmarked.Slugger = Slugger;\n\nmarked.parse = marked;\n\nmodule.exports = marked;\n","const Renderer = require('./Renderer.js');\nconst Slugger = require('./Slugger.js');\nconst InlineLexer = require('./InlineLexer.js');\nconst TextRenderer = require('./TextRenderer.js');\nconst { defaults } = require('./defaults.js');\nconst {\n merge,\n unescape\n} = require('./helpers.js');\n\n/**\n * Parsing & Compiling\n */\nmodule.exports = class Parser {\n constructor(options) {\n this.tokens = [];\n this.token = null;\n this.options = options || defaults;\n this.options.renderer = this.options.renderer || new Renderer();\n this.renderer = this.options.renderer;\n this.renderer.options = this.options;\n this.slugger = new Slugger();\n }\n\n /**\n * Static Parse Method\n */\n static parse(tokens, options) {\n const parser = new Parser(options);\n return parser.parse(tokens);\n };\n\n /**\n * Parse Loop\n */\n parse(tokens) {\n this.inline = new InlineLexer(tokens.links, this.options);\n // use an InlineLexer with a TextRenderer to extract pure text\n this.inlineText = new InlineLexer(\n tokens.links,\n merge({}, this.options, { renderer: new TextRenderer() })\n );\n this.tokens = tokens.reverse();\n\n let out = '';\n while (this.next()) {\n out += this.tok();\n }\n\n return out;\n };\n\n /**\n * Next Token\n */\n next() {\n this.token = this.tokens.pop();\n return this.token;\n };\n\n /**\n * Preview Next Token\n */\n peek() {\n return this.tokens[this.tokens.length - 1] || 0;\n };\n\n /**\n * Parse Text Tokens\n */\n parseText() {\n let body = this.token.text;\n\n while (this.peek().type === 'text') {\n body += '\\n' + this.next().text;\n }\n\n return this.inline.output(body);\n };\n\n /**\n * Parse Current Token\n */\n tok() {\n let body = '';\n switch (this.token.type) {\n case 'space': {\n return '';\n }\n case 'hr': {\n return this.renderer.hr();\n }\n case 'heading': {\n return this.renderer.heading(\n this.inline.output(this.token.text),\n this.token.depth,\n unescape(this.inlineText.output(this.token.text)),\n this.slugger);\n }\n case 'code': {\n return this.renderer.code(this.token.text,\n this.token.lang,\n this.token.escaped);\n }\n case 'table': {\n let header = '',\n i,\n row,\n cell,\n j;\n\n // header\n cell = '';\n for (i = 0; i < this.token.header.length; i++) {\n cell += this.renderer.tablecell(\n this.inline.output(this.token.header[i]),\n { header: true, align: this.token.align[i] }\n );\n }\n header += this.renderer.tablerow(cell);\n\n for (i = 0; i < this.token.cells.length; i++) {\n row = this.token.cells[i];\n\n cell = '';\n for (j = 0; j < row.length; j++) {\n cell += this.renderer.tablecell(\n this.inline.output(row[j]),\n { header: false, align: this.token.align[j] }\n );\n }\n\n body += this.renderer.tablerow(cell);\n }\n return this.renderer.table(header, body);\n }\n case 'blockquote_start': {\n body = '';\n\n while (this.next().type !== 'blockquote_end') {\n body += this.tok();\n }\n\n return this.renderer.blockquote(body);\n }\n case 'list_start': {\n body = '';\n const ordered = this.token.ordered,\n start = this.token.start;\n\n while (this.next().type !== 'list_end') {\n body += this.tok();\n }\n\n return this.renderer.list(body, ordered, start);\n }\n case 'list_item_start': {\n body = '';\n const loose = this.token.loose;\n const checked = this.token.checked;\n const task = this.token.task;\n\n if (this.token.task) {\n if (loose) {\n if (this.peek().type === 'text') {\n const nextToken = this.peek();\n nextToken.text = this.renderer.checkbox(checked) + ' ' + nextToken.text;\n } else {\n this.tokens.push({\n type: 'text',\n text: this.renderer.checkbox(checked)\n });\n }\n } else {\n body += this.renderer.checkbox(checked);\n }\n }\n\n while (this.next().type !== 'list_item_end') {\n body += !loose && this.token.type === 'text'\n ? this.parseText()\n : this.tok();\n }\n return this.renderer.listitem(body, task, checked);\n }\n case 'html': {\n // TODO parse inline content if parameter markdown=1\n return this.renderer.html(this.token.text);\n }\n case 'paragraph': {\n return this.renderer.paragraph(this.inline.output(this.token.text));\n }\n case 'text': {\n return this.renderer.paragraph(this.parseText());\n }\n default: {\n const errMsg = 'Token with \"' + this.token.type + '\" type was not found.';\n if (this.options.silent) {\n console.log(errMsg);\n } else {\n throw new Error(errMsg);\n }\n }\n }\n };\n};\n","/**\n * Slugger generates header id\n */\nmodule.exports = class Slugger {\n constructor() {\n this.seen = {};\n }\n\n /**\n * Convert string to unique id\n */\n slug(value) {\n let slug = value\n .toLowerCase()\n .trim()\n .replace(/[\\u2000-\\u206F\\u2E00-\\u2E7F\\\\'!\"#$%&()*+,./:;<=>?@[\\]^`{|}~]/g, '')\n .replace(/\\s/g, '-');\n\n if (this.seen.hasOwnProperty(slug)) {\n const originalSlug = slug;\n do {\n this.seen[originalSlug]++;\n slug = originalSlug + '-' + this.seen[originalSlug];\n } while (this.seen.hasOwnProperty(slug));\n }\n this.seen[slug] = 0;\n\n return slug;\n };\n};\n","const { defaults } = require('./defaults.js');\nconst {\n cleanUrl,\n escape\n} = require('./helpers.js');\n\n/**\n * Renderer\n */\nmodule.exports = class Renderer {\n constructor(options) {\n this.options = options || defaults;\n }\n\n code(code, infostring, escaped) {\n const lang = (infostring || '').match(/\\S*/)[0];\n if (this.options.highlight) {\n const out = this.options.highlight(code, lang);\n if (out != null && out !== code) {\n escaped = true;\n code = out;\n }\n }\n\n if (!lang) {\n return '
';\n }\n\n return ''\n + (escaped ? code : escape(code, true))\n + '
\\n';\n };\n\n blockquote(quote) {\n return ''\n + (escaped ? code : escape(code, true))\n + '
\\n' + quote + '
\\n';\n };\n\n html(html) {\n return html;\n };\n\n heading(text, level, raw, slugger) {\n if (this.options.headerIds) {\n return '
\\n' : '
\\n';\n };\n\n list(body, ordered, start) {\n const type = ordered ? 'ol' : 'ul',\n startatt = (ordered && start !== 1) ? (' start=\"' + start + '\"') : '';\n return '<' + type + startatt + '>\\n' + body + '' + type + '>\\n';\n };\n\n listitem(text) {\n return '\\n'\n + '\\n'\n + header\n + '\\n'\n + body\n + '
\\n';\n };\n\n tablerow(content) {\n return '\\n' + content + ' \\n';\n };\n\n tablecell(content, flags) {\n const type = flags.header ? 'th' : 'td';\n const tag = flags.align\n ? '<' + type + ' align=\"' + flags.align + '\">'\n : '<' + type + '>';\n return tag + content + '' + type + '>\\n';\n };\n\n // span level renderer\n strong(text) {\n return '' + text + '';\n };\n\n em(text) {\n return '' + text + '';\n };\n\n codespan(text) {\n return '' + text + '
';\n };\n\n br() {\n return this.options.xhtml ? '
' : '
';\n };\n\n del(text) {\n return '' + text + '';\n };\n\n link(href, title, text) {\n href = cleanUrl(this.options.sanitize, this.options.baseUrl, href);\n if (href === null) {\n return text;\n }\n let out = '' + text + '';\n return out;\n };\n\n image(href, title, text) {\n href = cleanUrl(this.options.sanitize, this.options.baseUrl, href);\n if (href === null) {\n return text;\n }\n\n let out = '' : '>';\n return out;\n };\n\n text(text) {\n return text;\n };\n};\n","const { defaults } = require('./defaults.js');\nconst { block } = require('./rules.js');\nconst {\n rtrim,\n splitCells,\n escape\n} = require('./helpers.js');\n\n/**\n * Block Lexer\n */\nmodule.exports = class Lexer {\n constructor(options) {\n this.tokens = [];\n this.tokens.links = Object.create(null);\n this.options = options || defaults;\n this.rules = block.normal;\n\n if (this.options.pedantic) {\n this.rules = block.pedantic;\n } else if (this.options.gfm) {\n this.rules = block.gfm;\n }\n }\n\n /**\n * Expose Block Rules\n */\n static get rules() {\n return block;\n }\n\n /**\n * Static Lex Method\n */\n static lex(src, options) {\n const lexer = new Lexer(options);\n return lexer.lex(src);\n };\n\n /**\n * Preprocessing\n */\n lex(src) {\n src = src\n .replace(/\\r\\n|\\r/g, '\\n')\n .replace(/\\t/g, ' ');\n\n return this.token(src, true);\n };\n\n /**\n * Lexing\n */\n token(src, top) {\n src = src.replace(/^ +$/gm, '');\n let next,\n loose,\n cap,\n bull,\n b,\n item,\n listStart,\n listItems,\n t,\n space,\n i,\n tag,\n l,\n isordered,\n istask,\n ischecked;\n\n while (src) {\n // newline\n if (cap = this.rules.newline.exec(src)) {\n src = src.substring(cap[0].length);\n if (cap[0].length > 1) {\n this.tokens.push({\n type: 'space'\n });\n }\n }\n\n // code\n if (cap = this.rules.code.exec(src)) {\n const lastToken = this.tokens[this.tokens.length - 1];\n src = src.substring(cap[0].length);\n // An indented code block cannot interrupt a paragraph.\n if (lastToken && lastToken.type === 'paragraph') {\n lastToken.text += '\\n' + cap[0].trimRight();\n } else {\n cap = cap[0].replace(/^ {4}/gm, '');\n this.tokens.push({\n type: 'code',\n codeBlockStyle: 'indented',\n text: !this.options.pedantic\n ? rtrim(cap, '\\n')\n : cap\n });\n }\n continue;\n }\n\n // fences\n if (cap = this.rules.fences.exec(src)) {\n src = src.substring(cap[0].length);\n this.tokens.push({\n type: 'code',\n lang: cap[2] ? cap[2].trim() : cap[2],\n text: cap[3] || ''\n });\n continue;\n }\n\n // heading\n if (cap = this.rules.heading.exec(src)) {\n src = src.substring(cap[0].length);\n this.tokens.push({\n type: 'heading',\n depth: cap[1].length,\n text: cap[2]\n });\n continue;\n }\n\n // table no leading pipe (gfm)\n if (cap = this.rules.nptable.exec(src)) {\n item = {\n type: 'table',\n header: splitCells(cap[1].replace(/^ *| *\\| *$/g, '')),\n align: cap[2].replace(/^ *|\\| *$/g, '').split(/ *\\| */),\n cells: cap[3] ? cap[3].replace(/\\n$/, '').split('\\n') : []\n };\n\n if (item.header.length === item.align.length) {\n src = src.substring(cap[0].length);\n\n for (i = 0; i < item.align.length; i++) {\n if (/^ *-+: *$/.test(item.align[i])) {\n item.align[i] = 'right';\n } else if (/^ *:-+: *$/.test(item.align[i])) {\n item.align[i] = 'center';\n } else if (/^ *:-+ *$/.test(item.align[i])) {\n item.align[i] = 'left';\n } else {\n item.align[i] = null;\n }\n }\n\n for (i = 0; i < item.cells.length; i++) {\n item.cells[i] = splitCells(item.cells[i], item.header.length);\n }\n\n this.tokens.push(item);\n\n continue;\n }\n }\n\n // hr\n if (cap = this.rules.hr.exec(src)) {\n src = src.substring(cap[0].length);\n this.tokens.push({\n type: 'hr'\n });\n continue;\n }\n\n // blockquote\n if (cap = this.rules.blockquote.exec(src)) {\n src = src.substring(cap[0].length);\n\n this.tokens.push({\n type: 'blockquote_start'\n });\n\n cap = cap[0].replace(/^ *> ?/gm, '');\n\n // Pass `top` to keep the current\n // \"toplevel\" state. This is exactly\n // how markdown.pl works.\n this.token(cap, top);\n\n this.tokens.push({\n type: 'blockquote_end'\n });\n\n continue;\n }\n\n // list\n if (cap = this.rules.list.exec(src)) {\n src = src.substring(cap[0].length);\n bull = cap[2];\n isordered = bull.length > 1;\n\n listStart = {\n type: 'list_start',\n ordered: isordered,\n start: isordered ? +bull : '',\n loose: false\n };\n\n this.tokens.push(listStart);\n\n // Get each top-level item.\n cap = cap[0].match(this.rules.item);\n\n listItems = [];\n next = false;\n l = cap.length;\n i = 0;\n\n for (; i < l; i++) {\n item = cap[i];\n\n // Remove the list item's bullet\n // so it is seen as the next token.\n space = item.length;\n item = item.replace(/^ *([*+-]|\\d+\\.) */, '');\n\n // Outdent whatever the\n // list item contains. Hacky.\n if (~item.indexOf('\\n ')) {\n space -= item.length;\n item = !this.options.pedantic\n ? item.replace(new RegExp('^ {1,' + space + '}', 'gm'), '')\n : item.replace(/^ {1,4}/gm, '');\n }\n\n // Determine whether the next list item belongs here.\n // Backpedal if it does not belong in this list.\n if (i !== l - 1) {\n b = block.bullet.exec(cap[i + 1])[0];\n if (bull.length > 1 ? b.length === 1\n : (b.length > 1 || (this.options.smartLists && b !== bull))) {\n src = cap.slice(i + 1).join('\\n') + src;\n i = l - 1;\n }\n }\n\n // Determine whether item is loose or not.\n // Use: /(^|\\n)(?! )[^\\n]+\\n\\n(?!\\s*$)/\n // for discount behavior.\n loose = next || /\\n\\n(?!\\s*$)/.test(item);\n if (i !== l - 1) {\n next = item.charAt(item.length - 1) === '\\n';\n if (!loose) loose = next;\n }\n\n if (loose) {\n listStart.loose = true;\n }\n\n // Check for task list items\n istask = /^\\[[ xX]\\] /.test(item);\n ischecked = undefined;\n if (istask) {\n ischecked = item[1] !== ' ';\n item = item.replace(/^\\[[ xX]\\] +/, '');\n }\n\n t = {\n type: 'list_item_start',\n task: istask,\n checked: ischecked,\n loose: loose\n };\n\n listItems.push(t);\n this.tokens.push(t);\n\n // Recurse.\n this.token(item, false);\n\n this.tokens.push({\n type: 'list_item_end'\n });\n }\n\n if (listStart.loose) {\n l = listItems.length;\n i = 0;\n for (; i < l; i++) {\n listItems[i].loose = true;\n }\n }\n\n this.tokens.push({\n type: 'list_end'\n });\n\n continue;\n }\n\n // html\n if (cap = this.rules.html.exec(src)) {\n src = src.substring(cap[0].length);\n this.tokens.push({\n type: this.options.sanitize\n ? 'paragraph'\n : 'html',\n pre: !this.options.sanitizer\n && (cap[1] === 'pre' || cap[1] === 'script' || cap[1] === 'style'),\n text: this.options.sanitize ? (this.options.sanitizer ? this.options.sanitizer(cap[0]) : escape(cap[0])) : cap[0]\n });\n continue;\n }\n\n // def\n if (top && (cap = this.rules.def.exec(src))) {\n src = src.substring(cap[0].length);\n if (cap[3]) cap[3] = cap[3].substring(1, cap[3].length - 1);\n tag = cap[1].toLowerCase().replace(/\\s+/g, ' ');\n if (!this.tokens.links[tag]) {\n this.tokens.links[tag] = {\n href: cap[2],\n title: cap[3]\n };\n }\n continue;\n }\n\n // table (gfm)\n if (cap = this.rules.table.exec(src)) {\n item = {\n type: 'table',\n header: splitCells(cap[1].replace(/^ *| *\\| *$/g, '')),\n align: cap[2].replace(/^ *|\\| *$/g, '').split(/ *\\| */),\n cells: cap[3] ? cap[3].replace(/\\n$/, '').split('\\n') : []\n };\n\n if (item.header.length === item.align.length) {\n src = src.substring(cap[0].length);\n\n for (i = 0; i < item.align.length; i++) {\n if (/^ *-+: *$/.test(item.align[i])) {\n item.align[i] = 'right';\n } else if (/^ *:-+: *$/.test(item.align[i])) {\n item.align[i] = 'center';\n } else if (/^ *:-+ *$/.test(item.align[i])) {\n item.align[i] = 'left';\n } else {\n item.align[i] = null;\n }\n }\n\n for (i = 0; i < item.cells.length; i++) {\n item.cells[i] = splitCells(\n item.cells[i].replace(/^ *\\| *| *\\| *$/g, ''),\n item.header.length);\n }\n\n this.tokens.push(item);\n\n continue;\n }\n }\n\n // lheading\n if (cap = this.rules.lheading.exec(src)) {\n src = src.substring(cap[0].length);\n this.tokens.push({\n type: 'heading',\n depth: cap[2].charAt(0) === '=' ? 1 : 2,\n text: cap[1]\n });\n continue;\n }\n\n // top-level paragraph\n if (top && (cap = this.rules.paragraph.exec(src))) {\n src = src.substring(cap[0].length);\n this.tokens.push({\n type: 'paragraph',\n text: cap[1].charAt(cap[1].length - 1) === '\\n'\n ? cap[1].slice(0, -1)\n : cap[1]\n });\n continue;\n }\n\n // text\n if (cap = this.rules.text.exec(src)) {\n // Top-level should never reach here.\n src = src.substring(cap[0].length);\n this.tokens.push({\n type: 'text',\n text: cap[0]\n });\n continue;\n }\n\n if (src) {\n throw new Error('Infinite loop on byte: ' + src.charCodeAt(0));\n }\n }\n\n return this.tokens;\n };\n};\n","const {\n noopTest,\n edit,\n merge\n} = require('./helpers.js');\n\n/**\n * Block-Level Grammar\n */\nconst block = {\n newline: /^\\n+/,\n code: /^( {4}[^\\n]+\\n*)+/,\n fences: /^ {0,3}(`{3,}|~{3,})([^`~\\n]*)\\n(?:|([\\s\\S]*?)\\n)(?: {0,3}\\1[~`]* *(?:\\n+|$)|$)/,\n hr: /^ {0,3}((?:- *){3,}|(?:_ *){3,}|(?:\\* *){3,})(?:\\n+|$)/,\n heading: /^ {0,3}(#{1,6}) +([^\\n]*?)(?: +#+)? *(?:\\n+|$)/,\n blockquote: /^( {0,3}> ?(paragraph|[^\\n]*)(?:\\n|$))+/,\n list: /^( {0,3})(bull) [\\s\\S]+?(?:hr|def|\\n{2,}(?! )(?!\\1bull )\\n*|\\s*$)/,\n html: '^ {0,3}(?:' // optional indentation\n + '<(script|pre|style)[\\\\s>][\\\\s\\\\S]*?(?:\\\\1>[^\\\\n]*\\\\n+|$)' // (1)\n + '|comment[^\\\\n]*(\\\\n+|$)' // (2)\n + '|<\\\\?[\\\\s\\\\S]*?\\\\?>\\\\n*' // (3)\n + '|\\\\n*' // (4)\n + '|\\\\n*' // (5)\n + '|?(tag)(?: +|\\\\n|/?>)[\\\\s\\\\S]*?(?:\\\\n{2,}|$)' // (6)\n + '|<(?!script|pre|style)([a-z][\\\\w-]*)(?:attribute)*? */?>(?=[ \\\\t]*(?:\\\\n|$))[\\\\s\\\\S]*?(?:\\\\n{2,}|$)' // (7) open tag\n + '|(?!script|pre|style)[a-z][\\\\w-]*\\\\s*>(?=[ \\\\t]*(?:\\\\n|$))[\\\\s\\\\S]*?(?:\\\\n{2,}|$)' // (7) closing tag\n + ')',\n def: /^ {0,3}\\[(label)\\]: *\\n? *([^\\s>]+)>?(?:(?: +\\n? *| *\\n *)(title))? *(?:\\n+|$)/,\n nptable: noopTest,\n table: noopTest,\n lheading: /^([^\\n]+)\\n {0,3}(=+|-+) *(?:\\n+|$)/,\n // regex template, placeholders will be replaced according to different paragraph\n // interruption rules of commonmark and the original markdown spec:\n _paragraph: /^([^\\n]+(?:\\n(?!hr|heading|lheading|blockquote|fences|list|html)[^\\n]+)*)/,\n text: /^[^\\n]+/\n};\n\nblock._label = /(?!\\s*\\])(?:\\\\[\\[\\]]|[^\\[\\]])+/;\nblock._title = /(?:\"(?:\\\\\"?|[^\"\\\\])*\"|'[^'\\n]*(?:\\n[^'\\n]+)*\\n?'|\\([^()]*\\))/;\nblock.def = edit(block.def)\n .replace('label', block._label)\n .replace('title', block._title)\n .getRegex();\n\nblock.bullet = /(?:[*+-]|\\d{1,9}\\.)/;\nblock.item = /^( *)(bull) ?[^\\n]*(?:\\n(?!\\1bull ?)[^\\n]*)*/;\nblock.item = edit(block.item, 'gm')\n .replace(/bull/g, block.bullet)\n .getRegex();\n\nblock.list = edit(block.list)\n .replace(/bull/g, block.bullet)\n .replace('hr', '\\\\n+(?=\\\\1?(?:(?:- *){3,}|(?:_ *){3,}|(?:\\\\* *){3,})(?:\\\\n+|$))')\n .replace('def', '\\\\n+(?=' + block.def.source + ')')\n .getRegex();\n\nblock._tag = 'address|article|aside|base|basefont|blockquote|body|caption'\n + '|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption'\n + '|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe'\n + '|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option'\n + '|p|param|section|source|summary|table|tbody|td|tfoot|th|thead|title|tr'\n + '|track|ul';\nblock._comment = //;\nblock.html = edit(block.html, 'i')\n .replace('comment', block._comment)\n .replace('tag', block._tag)\n .replace('attribute', / +[a-zA-Z:_][\\w.:-]*(?: *= *\"[^\"\\n]*\"| *= *'[^'\\n]*'| *= *[^\\s\"'=<>`]+)?/)\n .getRegex();\n\nblock.paragraph = edit(block._paragraph)\n .replace('hr', block.hr)\n .replace('heading', ' {0,3}#{1,6} +')\n .replace('|lheading', '') // setex headings don't interrupt commonmark paragraphs\n .replace('blockquote', ' {0,3}>')\n .replace('fences', ' {0,3}(?:`{3,}|~{3,})[^`\\\\n]*\\\\n')\n .replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt\n .replace('html', '?(?:tag)(?: +|\\\\n|/?>)|<(?:script|pre|style|!--)')\n .replace('tag', block._tag) // pars can be interrupted by type (6) html blocks\n .getRegex();\n\nblock.blockquote = edit(block.blockquote)\n .replace('paragraph', block.paragraph)\n .getRegex();\n\n/**\n * Normal Block Grammar\n */\n\nblock.normal = merge({}, block);\n\n/**\n * GFM Block Grammar\n */\n\nblock.gfm = merge({}, block.normal, {\n nptable: /^ *([^|\\n ].*\\|.*)\\n *([-:]+ *\\|[-| :]*)(?:\\n((?:.*[^>\\n ].*(?:\\n|$))*)\\n*|$)/,\n table: /^ *\\|(.+)\\n *\\|?( *[-:]+[-| :]*)(?:\\n((?: *[^>\\n ].*(?:\\n|$))*)\\n*|$)/\n});\n\n/**\n * Pedantic grammar (original John Gruber's loose markdown specification)\n */\n\nblock.pedantic = merge({}, block.normal, {\n html: edit(\n '^ *(?:comment *(?:\\\\n|\\\\s*$)'\n + '|<(tag)[\\\\s\\\\S]+?\\\\1> *(?:\\\\n{2,}|\\\\s*$)' // closed tag\n + '|
{{ user.nickname }}
\n ({{ $t('users.invalidNickname') }})
\n {{ user.nickname }}
\n ({{ $t('users.invalidNickname') }})
\n {{ user.nickname }}
\n \n {{ $t('userProfile.recentStatuses') }} by {{ user.nickname }}\n
\n {{ $t('userProfile.recentStatuses') }}
\n {{ $t('moderationLog.moderationLog') }}
\n {{ $t('invites.inviteTokens') }}
\n {{ $t('settings.settings') }}
\n {{ $t('settings.settings') }}
\n \n
{{ note.user.display_name }}
\n {{ $t('reports.reportOn') }} {{ report.account.display_name }}
\n {{ $t('reports.report') }}
\n {{ $t('reports.id') }}: {{ report.id }}
\n \n \n {{ report.account.display_name }}\n \n \n {{ report.account.display_name }}\n (deactivated)\n \n \n ({{ $t('reports.notFound') }})\n
\n \n {{ report.actor.display_name }}\n \n \n ({{ $t('reports.notFound') }})\n
\n {{ $t('reports.reports') }}\n ({{ normalizedReportsCount }})\n
\n \n
\n \n
\n
\n \n
{{ $t('reports.statusDeleted') }}
\n \n
\n
{{ $t('emoji.emojiPacks') }}
\n \n {{ account.nickname }}\n \n \n {{ account.nickname }}\n \n ({{ $t('users.invalidNickname') }})\n \n
\n
\n \n
\n
\n \n
{{ $t('reports.statusDeleted') }}
\n \n {{ $t('users.users') }}\n ({{ normalizedUsersCount }})\n
\n \n {{ note.user.nickname }}\n ({{ $t('users.invalidNickname') }})\n
{{ $t('reports.reportOn') }} {{ report.account.nickname }}
\n {{ $t('reports.report') }}
\n {{ $t('reports.id') }}: {{ report.id }}
\n \n {{ report.account.nickname }}\n ({{ $t('users.invalidNickname') }})\n
\n {{ report.actor.nickname }}\n ({{ $t('users.invalidNickname') }})\n
\n {{ $t('reports.reports') }}\n ({{ normalizedReportsCount }})\n
\n \n {{ $t('statuses.statuses') }}\n
\n \n
\n \n
\n
\n \n
{{ $t('reports.statusDeleted') }}
\n =h&&l>=h&&u or