From 51877a40e1d610bf96d7255c170eae4d01fe38ae Mon Sep 17 00:00:00 2001 From: Josh Schneier Date: Sat, 20 Apr 2024 22:05:25 -0400 Subject: [PATCH] Revert "[general] Update default size when using `SpooledTemporaryFile` (#1359)" (#1377) This reverts commit 51111996c68ae9961351a1651d2fa264daf14c99. --- docs/backends/amazon-S3.rst | 2 +- docs/backends/azure.rst | 4 ++-- docs/backends/dropbox.rst | 5 ----- docs/backends/gcloud.rst | 4 ++-- storages/backends/azure_storage.py | 6 ++---- storages/backends/dropbox.py | 9 +-------- storages/backends/gcloud.py | 7 +++---- storages/backends/s3.py | 4 +--- 8 files changed, 12 insertions(+), 29 deletions(-) diff --git a/docs/backends/amazon-S3.rst b/docs/backends/amazon-S3.rst index eac7cbb7..660c9144 100644 --- a/docs/backends/amazon-S3.rst +++ b/docs/backends/amazon-S3.rst @@ -91,7 +91,7 @@ Settings ``max_memory_size`` or ``AWS_S3_MAX_MEMORY_SIZE`` - Default: ``FILE_UPLOAD_MAX_MEMORY_SIZE`` + Default: ``0`` i.e do not roll over The maximum amount of memory (in bytes) a file can take up before being rolled over into a temporary file on disk. diff --git a/docs/backends/azure.rst b/docs/backends/azure.rst index c53983bf..20c94399 100644 --- a/docs/backends/azure.rst +++ b/docs/backends/azure.rst @@ -123,9 +123,9 @@ Settings Global connection timeout in seconds. -``max_memory_size`` size ``AZURE_BLOB_MAX_MEMORY_SIZE`` +``max_memory`` size ``AZURE_BLOB_MAX_MEMORY_SIZE`` - Default: ``FILE_UPLOAD_MAX_MEMORY_SIZE`` + Default: ``2*1024*1024`` i.e ``2MB`` Maximum memory used by a downloaded file before dumping it to disk in bytes. diff --git a/docs/backends/dropbox.rst b/docs/backends/dropbox.rst index 00591784..e2b2d6df 100644 --- a/docs/backends/dropbox.rst +++ b/docs/backends/dropbox.rst @@ -120,11 +120,6 @@ Settings Sets the Dropbox WriteMode strategy. Read more in the `official docs`_. -``max_memory_size`` size ``DROPBOX_MAX_MEMORY_SIZE`` - - Default: ``FILE_UPLOAD_MAX_MEMORY_SIZE`` - - Maximum memory used by a downloaded file before dumping it to disk in bytes. .. _`tutorial`: https://www.dropbox.com/developers/documentation/python#tutorial .. _`Dropbox SDK for Python`: https://www.dropbox.com/developers/documentation/python#tutorial diff --git a/docs/backends/gcloud.rst b/docs/backends/gcloud.rst index 687f945e..962298eb 100644 --- a/docs/backends/gcloud.rst +++ b/docs/backends/gcloud.rst @@ -147,10 +147,10 @@ Settings ``max_memory_size`` or ``GS_MAX_MEMORY_SIZE`` - default: ``FILE_UPLOAD_MAX_MEMORY_SIZE`` + default: ``0`` i.e do not rollover The maximum amount of memory a returned file can take up (in bytes) before being - rolled over into a temporary file on disk. + rolled over into a temporary file on disk. Default is 0: Do not roll over. ``blob_chunk_size`` or ``GS_BLOB_CHUNK_SIZE`` diff --git a/storages/backends/azure_storage.py b/storages/backends/azure_storage.py index 7605ca16..0ae14802 100644 --- a/storages/backends/azure_storage.py +++ b/storages/backends/azure_storage.py @@ -40,7 +40,7 @@ def _get_file(self): file = SpooledTemporaryFile( max_size=self._storage.max_memory_size, suffix=".AzureStorageFile", - dir=setting("FILE_UPLOAD_TEMP_DIR"), + dir=setting("FILE_UPLOAD_TEMP_DIR", None), ) if "r" in self._mode or "a" in self._mode: @@ -141,9 +141,7 @@ def get_default_settings(self): "azure_ssl": setting("AZURE_SSL", True), "upload_max_conn": setting("AZURE_UPLOAD_MAX_CONN", 2), "timeout": setting("AZURE_CONNECTION_TIMEOUT_SECS", 20), - "max_memory_size": setting( - "AZURE_BLOB_MAX_MEMORY_SIZE", setting("FILE_UPLOAD_MAX_MEMORY_SIZE") - ), + "max_memory_size": setting("AZURE_BLOB_MAX_MEMORY_SIZE", 2 * 1024 * 1024), "expiration_secs": setting("AZURE_URL_EXPIRATION_SECS"), "overwrite_files": setting("AZURE_OVERWRITE_FILES", False), "location": setting("AZURE_LOCATION", ""), diff --git a/storages/backends/dropbox.py b/storages/backends/dropbox.py index cc26b55c..015d3158 100644 --- a/storages/backends/dropbox.py +++ b/storages/backends/dropbox.py @@ -47,11 +47,7 @@ def __init__(self, name, storage): def _get_file(self): if self._file is None: - self._file = SpooledTemporaryFile( - max_size=self._storage.max_memory_size, - suffix=".DropboxFile", - dir=setting("FILE_UPLOAD_TEMP_DIR"), - ) + self._file = SpooledTemporaryFile() # As dropbox==9.3.0, the client returns a tuple # (dropbox.files.FileMetadata, requests.models.Response) file_metadata, response = self._storage.client.files_download(self.name) @@ -123,9 +119,6 @@ def get_default_settings(self): "oauth2_refresh_token": setting("DROPBOX_OAUTH2_REFRESH_TOKEN"), "timeout": setting("DROPBOX_TIMEOUT", _DEFAULT_TIMEOUT), "write_mode": setting("DROPBOX_WRITE_MODE", _DEFAULT_MODE), - "max_memory_size": setting( - "DROPBOX_MAX_MEMORY_SIZE", setting("FILE_UPLOAD_MAX_MEMORY_SIZE") - ), } def _full_path(self, name): diff --git a/storages/backends/gcloud.py b/storages/backends/gcloud.py index b9d32935..4c582b69 100644 --- a/storages/backends/gcloud.py +++ b/storages/backends/gcloud.py @@ -138,10 +138,9 @@ def get_default_settings(self): "file_overwrite": setting("GS_FILE_OVERWRITE", True), "object_parameters": setting("GS_OBJECT_PARAMETERS", {}), # The max amount of memory a returned file can take up before being - # rolled over into a temporary file on disk. - "max_memory_size": setting( - "GS_MAX_MEMORY_SIZE", setting("FILE_UPLOAD_MAX_MEMORY_SIZE") - ), + # rolled over into a temporary file on disk. Default is 0: Do not + # roll over. + "max_memory_size": setting("GS_MAX_MEMORY_SIZE", 0), "blob_chunk_size": setting("GS_BLOB_CHUNK_SIZE"), } diff --git a/storages/backends/s3.py b/storages/backends/s3.py index 0ca31beb..ef915164 100644 --- a/storages/backends/s3.py +++ b/storages/backends/s3.py @@ -403,9 +403,7 @@ def get_default_settings(self): "region_name": setting("AWS_S3_REGION_NAME"), "use_ssl": setting("AWS_S3_USE_SSL", True), "verify": setting("AWS_S3_VERIFY", None), - "max_memory_size": setting( - "AWS_S3_MAX_MEMORY_SIZE", setting("FILE_UPLOAD_MAX_MEMORY_SIZE") - ), + "max_memory_size": setting("AWS_S3_MAX_MEMORY_SIZE", 0), "default_acl": setting("AWS_DEFAULT_ACL", None), "use_threads": setting("AWS_S3_USE_THREADS", True), "transfer_config": setting("AWS_S3_TRANSFER_CONFIG", None),