From 51111996c68ae9961351a1651d2fa264daf14c99 Mon Sep 17 00:00:00 2001 From: Jake Howard Date: Sat, 20 Apr 2024 20:26:30 +0100 Subject: [PATCH] [general] Update default size when using `SpooledTemporaryFile` (#1359) Leaving `0` as the default means the file never gets rolled over to disk, resulting in higher memory usage than expected (or needed). S3 and GCS both used `0` for their default value. Now, they use `FILE_UPLOAD_MAX_MEMORY_SIZE` (2.5MB by default). This is significantly better than `0`, but still allows configuration as needed. Azure already set a default of 2MB, which is better, but now brought in line with S3 and GCS --- docs/backends/amazon-S3.rst | 2 +- docs/backends/azure.rst | 4 ++-- docs/backends/dropbox.rst | 5 +++++ docs/backends/gcloud.rst | 4 ++-- storages/backends/azure_storage.py | 6 ++++-- storages/backends/dropbox.py | 9 ++++++++- storages/backends/gcloud.py | 7 ++++--- storages/backends/s3.py | 4 +++- 8 files changed, 29 insertions(+), 12 deletions(-) diff --git a/docs/backends/amazon-S3.rst b/docs/backends/amazon-S3.rst index 660c9144..eac7cbb7 100644 --- a/docs/backends/amazon-S3.rst +++ b/docs/backends/amazon-S3.rst @@ -91,7 +91,7 @@ Settings ``max_memory_size`` or ``AWS_S3_MAX_MEMORY_SIZE`` - Default: ``0`` i.e do not roll over + Default: ``FILE_UPLOAD_MAX_MEMORY_SIZE`` The maximum amount of memory (in bytes) a file can take up before being rolled over into a temporary file on disk. diff --git a/docs/backends/azure.rst b/docs/backends/azure.rst index 20c94399..c53983bf 100644 --- a/docs/backends/azure.rst +++ b/docs/backends/azure.rst @@ -123,9 +123,9 @@ Settings Global connection timeout in seconds. -``max_memory`` size ``AZURE_BLOB_MAX_MEMORY_SIZE`` +``max_memory_size`` size ``AZURE_BLOB_MAX_MEMORY_SIZE`` - Default: ``2*1024*1024`` i.e ``2MB`` + Default: ``FILE_UPLOAD_MAX_MEMORY_SIZE`` Maximum memory used by a downloaded file before dumping it to disk in bytes. diff --git a/docs/backends/dropbox.rst b/docs/backends/dropbox.rst index e2b2d6df..00591784 100644 --- a/docs/backends/dropbox.rst +++ b/docs/backends/dropbox.rst @@ -120,6 +120,11 @@ Settings Sets the Dropbox WriteMode strategy. Read more in the `official docs`_. +``max_memory_size`` size ``DROPBOX_MAX_MEMORY_SIZE`` + + Default: ``FILE_UPLOAD_MAX_MEMORY_SIZE`` + + Maximum memory used by a downloaded file before dumping it to disk in bytes. .. _`tutorial`: https://www.dropbox.com/developers/documentation/python#tutorial .. _`Dropbox SDK for Python`: https://www.dropbox.com/developers/documentation/python#tutorial diff --git a/docs/backends/gcloud.rst b/docs/backends/gcloud.rst index 962298eb..687f945e 100644 --- a/docs/backends/gcloud.rst +++ b/docs/backends/gcloud.rst @@ -147,10 +147,10 @@ Settings ``max_memory_size`` or ``GS_MAX_MEMORY_SIZE`` - default: ``0`` i.e do not rollover + default: ``FILE_UPLOAD_MAX_MEMORY_SIZE`` The maximum amount of memory a returned file can take up (in bytes) before being - rolled over into a temporary file on disk. Default is 0: Do not roll over. + rolled over into a temporary file on disk. ``blob_chunk_size`` or ``GS_BLOB_CHUNK_SIZE`` diff --git a/storages/backends/azure_storage.py b/storages/backends/azure_storage.py index 0ae14802..7605ca16 100644 --- a/storages/backends/azure_storage.py +++ b/storages/backends/azure_storage.py @@ -40,7 +40,7 @@ def _get_file(self): file = SpooledTemporaryFile( max_size=self._storage.max_memory_size, suffix=".AzureStorageFile", - dir=setting("FILE_UPLOAD_TEMP_DIR", None), + dir=setting("FILE_UPLOAD_TEMP_DIR"), ) if "r" in self._mode or "a" in self._mode: @@ -141,7 +141,9 @@ def get_default_settings(self): "azure_ssl": setting("AZURE_SSL", True), "upload_max_conn": setting("AZURE_UPLOAD_MAX_CONN", 2), "timeout": setting("AZURE_CONNECTION_TIMEOUT_SECS", 20), - "max_memory_size": setting("AZURE_BLOB_MAX_MEMORY_SIZE", 2 * 1024 * 1024), + "max_memory_size": setting( + "AZURE_BLOB_MAX_MEMORY_SIZE", setting("FILE_UPLOAD_MAX_MEMORY_SIZE") + ), "expiration_secs": setting("AZURE_URL_EXPIRATION_SECS"), "overwrite_files": setting("AZURE_OVERWRITE_FILES", False), "location": setting("AZURE_LOCATION", ""), diff --git a/storages/backends/dropbox.py b/storages/backends/dropbox.py index 015d3158..cc26b55c 100644 --- a/storages/backends/dropbox.py +++ b/storages/backends/dropbox.py @@ -47,7 +47,11 @@ def __init__(self, name, storage): def _get_file(self): if self._file is None: - self._file = SpooledTemporaryFile() + self._file = SpooledTemporaryFile( + max_size=self._storage.max_memory_size, + suffix=".DropboxFile", + dir=setting("FILE_UPLOAD_TEMP_DIR"), + ) # As dropbox==9.3.0, the client returns a tuple # (dropbox.files.FileMetadata, requests.models.Response) file_metadata, response = self._storage.client.files_download(self.name) @@ -119,6 +123,9 @@ def get_default_settings(self): "oauth2_refresh_token": setting("DROPBOX_OAUTH2_REFRESH_TOKEN"), "timeout": setting("DROPBOX_TIMEOUT", _DEFAULT_TIMEOUT), "write_mode": setting("DROPBOX_WRITE_MODE", _DEFAULT_MODE), + "max_memory_size": setting( + "DROPBOX_MAX_MEMORY_SIZE", setting("FILE_UPLOAD_MAX_MEMORY_SIZE") + ), } def _full_path(self, name): diff --git a/storages/backends/gcloud.py b/storages/backends/gcloud.py index 4c582b69..b9d32935 100644 --- a/storages/backends/gcloud.py +++ b/storages/backends/gcloud.py @@ -138,9 +138,10 @@ def get_default_settings(self): "file_overwrite": setting("GS_FILE_OVERWRITE", True), "object_parameters": setting("GS_OBJECT_PARAMETERS", {}), # The max amount of memory a returned file can take up before being - # rolled over into a temporary file on disk. Default is 0: Do not - # roll over. - "max_memory_size": setting("GS_MAX_MEMORY_SIZE", 0), + # rolled over into a temporary file on disk. + "max_memory_size": setting( + "GS_MAX_MEMORY_SIZE", setting("FILE_UPLOAD_MAX_MEMORY_SIZE") + ), "blob_chunk_size": setting("GS_BLOB_CHUNK_SIZE"), } diff --git a/storages/backends/s3.py b/storages/backends/s3.py index ef915164..0ca31beb 100644 --- a/storages/backends/s3.py +++ b/storages/backends/s3.py @@ -403,7 +403,9 @@ def get_default_settings(self): "region_name": setting("AWS_S3_REGION_NAME"), "use_ssl": setting("AWS_S3_USE_SSL", True), "verify": setting("AWS_S3_VERIFY", None), - "max_memory_size": setting("AWS_S3_MAX_MEMORY_SIZE", 0), + "max_memory_size": setting( + "AWS_S3_MAX_MEMORY_SIZE", setting("FILE_UPLOAD_MAX_MEMORY_SIZE") + ), "default_acl": setting("AWS_DEFAULT_ACL", None), "use_threads": setting("AWS_S3_USE_THREADS", True), "transfer_config": setting("AWS_S3_TRANSFER_CONFIG", None),