We have praefect's database about 500 Mb in size, but wal generation per week is relly huge - about 1Tb.
Most frequent query is being executed ~8 times/minute:
WITH lock AS (
SELECT
id
FROM
replication_queue_lock
WHERE
id LIKE $1 || $4 || $2 || $5 AND
NOT acquired
FOR UPDATE SKIP LOCKED
)
, candidate AS (
SELECT
id
FROM
replication_queue
WHERE
id IN (
SELECT DISTINCT
first_value(queue.id) OVER(PARTITION BY lock_id, job ->> $6 ORDER BY queue.created_at)
FROM
replication_queue queue
JOIN
lock
ON queue.lock_id = lock.id
WHERE
queue.state IN (
$7
, $8
) AND
NOT EXISTS(
SELECT
$9
FROM
replication_queue_job_lock
WHERE
lock_id = queue.lock_id
)
)
ORDER BY
created_at
LIMIT $3
FOR UPDATE
)
, job AS (
UPDATE
replication_queue queue
SET
attempt = CASE
WHEN job ->> $10 = $11 THEN
queue.attempt
ELSE
queue.attempt - $12
END
, state = $13
, updated_at = timezone(
$14
, now()
)
FROM
candidate
WHERE
queue.id = candidate.id
RETURNING
queue.id
, queue.state
, queue.created_at
, queue.updated_at
, queue.lock_id
, queue.attempt
, queue.job
, queue.meta
)
, track_job_lock AS (
INSERT INTO replication_queue_job_lock(
job_id
, lock_id
, triggered_at
)
SELECT
job.id
, job.lock_id
, timezone(
$15
, now()
)
FROM
job
RETURNING
lock_id
)
, acquire_lock AS (
UPDATE
replication_queue_lock lock
SET
acquired = $16
FROM
track_job_lock tracked
WHERE
lock.id = tracked.lock_id
)
SELECT
id
, state
, created_at
, updated_at
, lock_id
, attempt
, job
, meta
FROM
job
ORDER BY
id;
Is there any way to tune application to slow this thing down?
Asked by Daniil Aksenov
(1 rep)
Jan 30, 2023, 11:17 AM
Last activity: Jan 30, 2023, 06:22 PM
Last activity: Jan 30, 2023, 06:22 PM