Connection to server on socket "/tmp/.s.PGSQL.5432" failed: FATAL: sorry, too many clients already

I'm using celery with django to run some task that runs multiple threads querying the database. The example below is a simplified version of the original one in a larger project, but the logic and results are the same. The error occurs either way.

models.py

from django.db import models


class Example(models.Model):
    name = models.CharField(max_length=255)

urls.py

from django.urls import path

from core.views import ExampleView

urlpatterns = [path('example/', ExampleView.as_view())]

views.py

from core.tasks import run_task
from rest_framework.response import Response
from rest_framework.views import APIView


class ExampleView(APIView):
    def get(self, request, *args, **kwargs):
        run_task.delay()
        return Response(status=200)

tasks.py

from concurrent.futures import ThreadPoolExecutor, as_completed

from celery import shared_task

from core.models import Example


@shared_task
def run_task():
    with ThreadPoolExecutor(max_workers=100) as executor:
        futures = [
            executor.submit(Example.objects.get_or_create, name='example')
            for _ in range(200)
        ]
        for future in as_completed(futures):
            future.result()

To reproduce, just run celery:

celery -A example worker -l INFO

and django then run:

curl 'http://localhost:8000/api/example/'

You should be seeing the error below:

[2026-04-12 15:08:49,151: ERROR/ForkPoolWorker-15] Task core.tasks.run_task[1b1682e1-3b9f-43e2-b02f-ba878449d2a5] raised unexpected: OperationalError('connection to server on socket "/tmp/.s.PGSQL.5432" failed: FATAL:  sorry, too many clients already\n')
Traceback (most recent call last):
  File "/Users/users/.local/lib/python3.14t/site-packages/django/db/backends/base/base.py", line 279, in ensure_connection
    self.connect()
    ~~~~~~~~~~~~^^
  File "/Users/user/.local/lib/python3.14t/site-packages/django/utils/asyncio.py", line 26, in inner
    return func(*args, **kwargs)
  File "/Users/user/.local/lib/python3.14t/site-packages/django/db/backends/base/base.py", line 256, in connect
    self.connection = self.get_new_connection(conn_params)
                      ~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^
  File "/Users/user/.local/lib/python3.14t/site-packages/django/utils/asyncio.py", line 26, in inner
    return func(*args, **kwargs)
  File "/Users/user/.local/lib/python3.14t/site-packages/django/db/backends/postgresql/base.py", line 333, in get_new_connection
    connection = self.Database.connect(**conn_params)
  File "/Users/user/.local/lib/python3.14t/site-packages/psycopg2/__init__.py", line 122, in connect
    conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
psycopg2.OperationalError: connection to server on socket "/tmp/.s.PGSQL.5432" failed: FATAL:  sorry, too many clients already


The above exception was the direct cause of the following exception:

Traceback (most recent call last):
  File "/Users/user/.local/lib/python3.14t/site-packages/celery/app/trace.py", line 585, in trace_task
    R = retval = fun(*args, **kwargs)
                 ~~~^^^^^^^^^^^^^^^^^
  File "/Users/user/.local/lib/python3.14t/site-packages/celery/app/trace.py", line 858, in __protected_call__
    return self.run(*args, **kwargs)
           ~~~~~~~~^^^^^^^^^^^^^^^^^
  File "/Users/user/Desktop/postgres-issue/backend/core/tasks.py", line 15, in run_task
    future.result()
    ~~~~~~~~~~~~~^^
  File "/usr/local/lib/python3.14t/concurrent/futures/_base.py", line 443, in result
    return self.__get_result()
           ~~~~~~~~~~~~~~~~~^^
  File "/usr/local/lib/python3.14t/concurrent/futures/_base.py", line 395, in __get_result
    raise self._exception
  File "/usr/local/lib/python3.14t/concurrent/futures/thread.py", line 86, in run
    result = ctx.run(self.task)
  File "/usr/local/lib/python3.14t/concurrent/futures/thread.py", line 73, in run
    return fn(*args, **kwargs)
  File "/Users/user/.local/lib/python3.14t/site-packages/django/db/models/manager.py", line 87, in manager_method
    return getattr(self.get_queryset(), name)(*args, **kwargs)
           ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
  File "/Users/user/.local/lib/python3.14t/site-packages/django/db/models/query.py", line 987, in get_or_create
    return self.get(**kwargs), False
           ~~~~~~~~^^^^^^^^^^
  File "/Users/user/.local/lib/python3.14t/site-packages/django/db/models/query.py", line 635, in get
    num = len(clone)
  File "/Users/user/.local/lib/python3.14t/site-packages/django/db/models/query.py", line 372, in __len__
    self._fetch_all()
    ~~~~~~~~~~~~~~~^^
  File "/Users/user/.local/lib/python3.14t/site-packages/django/db/models/query.py", line 2000, in _fetch_all
    self._result_cache = list(self._iterable_class(self))
                         ~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/user/.local/lib/python3.14t/site-packages/django/db/models/query.py", line 95, in __iter__
    results = compiler.execute_sql(
        chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
    )
  File "/Users/user/.local/lib/python3.14t/site-packages/django/db/models/sql/compiler.py", line 1622, in execute_sql
    cursor = self.connection.cursor()
  File "/Users/user/.local/lib/python3.14t/site-packages/django/utils/asyncio.py", line 26, in inner
    return func(*args, **kwargs)
  File "/Users/user/.local/lib/python3.14t/site-packages/django/db/backends/base/base.py", line 320, in cursor
    return self._cursor()
           ~~~~~~~~~~~~^^
  File "/Users/user/.local/lib/python3.14t/site-packages/django/db/backends/base/base.py", line 296, in _cursor
    self.ensure_connection()
    ~~~~~~~~~~~~~~~~~~~~~~^^
  File "/Users/user/.local/lib/python3.14t/site-packages/django/utils/asyncio.py", line 26, in inner
    return func(*args, **kwargs)
  File "/Users/user/.local/lib/python3.14t/site-packages/django/db/backends/base/base.py", line 278, in ensure_connection
    with self.wrap_database_errors:
         ^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/user/.local/lib/python3.14t/site-packages/django/db/utils.py", line 94, in __exit__
    raise dj_exc_value.with_traceback(traceback) from exc_value
  File "/Users/user/.local/lib/python3.14t/site-packages/django/db/backends/base/base.py", line 279, in ensure_connection
    self.connect()
    ~~~~~~~~~~~~^^
  File "/Users/user/.local/lib/python3.14t/site-packages/django/utils/asyncio.py", line 26, in inner
    return func(*args, **kwargs)
  File "/Users/user/.local/lib/python3.14t/site-packages/django/db/backends/base/base.py", line 256, in connect
    self.connection = self.get_new_connection(conn_params)
                      ~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^
  File "/Users/user/.local/lib/python3.14t/site-packages/django/utils/asyncio.py", line 26, in inner
    return func(*args, **kwargs)
  File "/Users/user/.local/lib/python3.14t/site-packages/django/db/backends/postgresql/base.py", line 333, in get_new_connection
    connection = self.Database.connect(**conn_params)
  File "/Users/user/.local/lib/python3.14t/site-packages/psycopg2/__init__.py", line 122, in connect
    conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
django.db.utils.OperationalError: connection to server on socket "/tmp/.s.PGSQL.5432" failed: FATAL:  sorry, too many clients already

I tried with different python versions: 3.14t, 3.14, postgres versions: 17, 18, django 5+, 6+, ... and the results are the same. Besides, lowering the amount of concurrency would prevent the error for the first run or few runs, then on the nth run, the error is hit again, so even if the below works, that doesn't mean the problem is solved.


@shared_task
def run_task():
    with ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
        futures = [
            executor.submit(Example.objects.get_or_create, name='example')
            for _ in range(200)
        ]
        for future in as_completed(futures):
            future.result()

I'm not sure why these connections aren't being closed / cleaned up automatically, which is probably the main issue here. I even tried manually cleaning connections to no avail.

from django.db import close_old_connections, connections


@shared_task
def run_task():
    with ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
        futures = [
            executor.submit(Example.objects.get_or_create, name='example')
            for _ in range(200)
        ]
        for future in as_completed(futures):
            future.result()
    close_old_connections()
    for connection in connections.all():
        connection.close()

I'm running the above on my m4 max mbp Tahoe 26.4.1 + postgres 18 + python3.14t and the pip versions below:

celery                        5.6.3
channels                      4.3.2
channels_redis                4.3.0
daphne                        4.2.1
Django                        6.0.4
django-cors-headers           4.9.0
djangorestframework           3.17.1
djangorestframework_simplejwt 5.5.1
pip                           26.0.1
psycopg2                      2.9.11
redis                         7.4.0
Вернуться на верх