Prompt Details
Model
(grok-3)
Token size
110
Example input
[programming language]: Python
[perform action]: create a multi-threaded file downloader
[optimized for performance, scalable, and maintainable]: efficient under concurrent execution
[algorithms and logic]: queue and stack operations
Example output
import os
import queue
import threading
import requests
import logging
from urllib.parse import urlparse
from concurrent.futures import ThreadPoolExecutor
from typing import List, Optional
import time
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - [%(threadName)s] - %(message)s',
handlers=[
logging.StreamHandler()
]
)
class FileDownloader:
"""
A multi-threaded file downloader that fetches files from URLs concurrently.
Uses a thread-safe queue to manage download tasks and supports configurable
worker threads.
"""
def __init__(self, num_workers: int = 4, download_dir: str = "downloads"):
"""
Initialize the FileDownloader.
Args:
num_workers (int): Number of concurrent worker threads (default: 4).
download_dir (str): Directory to save downloaded files (default: 'downloads').
"""
self.num_workers = max(1, num_workers) # Ensure at least one worker
self.download_dir = download_dir
self.task_queue = queue.Queue()
self.lock = threading.Lock() # For thread-safe operations
self._setup_download_dir()
def _setup_download_dir(self) -> None:
"""Create the download directory if it doesn't exist."""
try:
os.makedirs(self.download_dir, exist_ok=True)
logging.info(f"Download directory set to: {self.download_dir}")
except OSError as e:
logging.error(f"Failed to create download directory: {e}")
raise
def _get_filename_from_url(self, url: str) -> str:
"""
Extract a filename from a URL.
Args:
url (str): The URL of the file.
Returns:
str: The extracted filename.
"""
parsed_url = urlparse(url)
filename = os.path.basename(parsed_url.path)
return filename if filename else "downloaded_file"
def _download_file(self, url: str) -> Optional[str]:
"""
Download a single file from a URL and save it to the download directory.
Args:
url (str): The URL of the file to download.
Returns:
Optional[str]: Path to the saved file, or None if download failed.
"""
filename = self._get_filename_from_url(url)
file_path = os.path.join(self.download_dir, filename)
try:
logging.info(f"Starting download: {url}")
start_time = time.time()
# Stream the file to handle large files efficiently
with requests.get(url, stream=True, timeout=10) as response:
response.raise_for_status() # Raise exception for bad status codes
with open(file_path, 'wb') as f:
for chunk in response.iter_content(chunk_size=8192):
if chunk: # Filter out keep-alive chunks
f.write(chunk)
elapsed_time = time.time() - start_time
logging.info(f"Completed download: {url} in {elapsed_time:.2f}s")
return file_path
except requests.RequestException as e:
logging.error(f"Failed to download {url}: {e}")
return None
except OSError as e:
logging.error(f"Failed to save file {file_path}: {e}")
return None
def worker(self) -> None:
"""
Worker thread function that processes download tasks from the queue.
Runs until the queue is empty.
"""
while True:
try:
# Get a task from the queue with a timeout
url = self.task_queue.get(timeout=1)
self._download_file(url)
self.task_queue.task_done()
except queue.Empty:
logging.debug("Queue is empty, worker exiting")
break
except Exception as e:
logging.error(f"Unexpected error in worker: {e}")
def download_files(self, urls: List[str]) -> List[Optional[str]]:
"""
Download multiple files concurrently from a list of URLs.
Args:
urls (List[str]): List of URLs to download.
Returns:
List[Optional[str]]: List of file paths for successful downloads, None for failures.
"""
if not urls:
logging.warning("No URLs provided for download")
return []
# Add URLs to the task queue
for url in urls:
self.task_queue.put(url)
# Start worker threads
threads = []
for i in range(self.num_workers):
t = threading.Thread(target=self.worker, name=f"Worker-{i+1}")
t.start()
threads.append(t)
# Wait for all tasks to complete
self.task_queue.join()
# Wait for all threads to finish
for t in threads:
t.join()
logging.info("All downloads completed")
return [os.path.join(self.download_dir, self._get_filename_from_url(url)) for url in urls]
# Unit Tests
import unittest
import shutil
import tempfile
class TestFileDownloader(unittest.TestCase):
def setUp(self):
"""Set up a temporary download directory and downloader instance."""
self.download_dir = tempfile.mkdtemp()
self.downloader = FileDownloader(num_workers=2, download_dir=self.download_dir)
# Sample URLs for testing (using small, publicly available files)
self.test_urls = [
"https://www.example.com", # Will fail (not a file)
"https://httpbin.org/image/png", # Valid small image
"https://httpbin.org/image/jpeg" # Valid small image
]
def tearDown(self):
"""Clean up the temporary download directory."""
shutil.rmtree(self.download_dir, ignore_errors=True)
def test_download_single_file(self):
"""Test downloading a single valid file."""
url = "https://httpbin.org/image/png"
result = self.downloader.download_files([url])
self.assertEqual(len(result), 1)
self.assertTrue(os.path.exists(result[0]))
def test_download_multiple_files(self):
"""Test downloading multiple files concurrently."""
results = self.downloader.download_files(self.test_urls)
self.assertEqual(len(results), 3)
# Check that valid files were downloaded
for result in results[1:]: # Skip the first (invalid) URL
self.assertTrue(os.path.exists(result), f"File {result} was not downloaded")
def test_empty_url_list(self):
"""Test handling of an empty URL list."""
results = self.downloader.download_files([])
self.assertEqual(results, [])
def test_invalid_url(self):
"""Test handling of an invalid URL."""
results = self.downloader.download_files(["https://nonexistent.example.com/file"])
self.assertEqual(len(results), 1)
self.assertFalse(os.path.exists(results[0]))
def test_download_dir_creation(self):
"""Test automatic creation of download directory."""
new_dir = os.path.join(tempfile.gettempdir(), "new_downloads")
downloader = FileDownloader(download_dir=new_dir)
self.assertTrue(os.path.exists(new_dir))
shutil.rmtree(new_dir, ignore_errors=True)
if __name__ == "__main__":
# Example usage
downloader = FileDownloader(num_workers=3, download_dir="downloads")
urls = [
"https://httpbin.org/image/png",
"https://httpbin.org/image/jpeg",
"https://httpbin.org/image/webp"
]
downloaded_files = downloader.download_files(urls)
for url, file_path in zip(urls, downloaded_files):
status = "Success" if os.path.exists(file_path) else "Failed"
print(f"URL: {url} -> {status} ({file_path})")
# Run unit tests
unittest.main(argv=[''], exit=False)
By purchasing this prompt, you agree to our terms of service
GROK-3
Unlock the power of elite-level programming with this dynamic prompt! Whether you're building a complex algorithm or a scalable application, this tool guides AI to write clean, efficient, and production-ready code in any language. Featuring best practices, thorough documentation, and robust testing—that, making it the ultimate tool for professional-grade software development.
...more
Added over 1 month ago
