1、源码解析
代码路径:scrapy/core/downloader/__init__.py
详细代码解析,请看代码注释
"""Download handlers for different schemes"""
import logging
from typing import TYPE_CHECKING, Any, Callable, Dict, Generator, Union, cast
from twisted.internet import defer
from twisted.internet.defer import Deferred
from scrapy import Request, Spider, signals
from scrapy.exceptions import NotConfigured, NotSupported
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.misc import create_instance, load_object
from scrapy.utils.python import without_none_values
if TYPE_CHECKING:
from scrapy.crawler import Crawler
logger = logging.getLogger(__name__)
class DownloadHandlers:
def __init__(self, crawler: "Crawler"):
self._crawler: "Crawler" = crawler
self._schemes: Dict[
str, Union[str, Callable]
] = {} # self._schemes 用于存放协议:协议处理器路径的字典,注意是协议处理器的路径,还没有实例化这个协议处理器。
self._handlers: Dict[str, Any] = {} # stores instanced handlers for schemes 用于存放各个协议的下载器的实例
self._notconfigured: Dict[str, str] = {} # remembers failed handlers # 记录不在设置中设置的协议
handlers: Dict[str, Union[str, Callable]] = without_none_values(
crawler.settings.getwithbase("DOWNLOAD_HANDLERS") # 读取配置中,设置的什么协议用什么下载器
)
for scheme, clspath in handlers.items():
self._schemes[scheme] = clspath # 把协议:协议处理器存入self._schemes
self._load_handler(scheme, skip_lazy=True)
# 在这个循环里就加载了大部分协议下载器。只有s3没有被加载。在调用self._load_handler的时候
# 这里传的skip_lazy为True,在self._load_handler内部需要与各个协议下载器中的lazy属性结合判断,最终决定要不要加载
crawler.signals.connect(self._close, signals.engine_stopped)
def _get_handler(self, scheme: str) -> Any: # 就是根据协议名,获取对应的下载器实例
"""Lazy-load the downloadhandler for a scheme
only on the first request for that scheme.
"""
if scheme in self._handlers: # 如果已经取过对应协议的处理器了,就直接返回该处理器
return self._handlers[scheme]
if scheme in self._notconfigured: # 如果是已知的未配置的协议,就直接返回None
return None
if scheme not in self._schemes:
self._notconfigured[scheme] = "no handler available for that scheme" # 第一次出现一个未配置下载器的协议的时候,记录一下这个协议
return None
return self._load_handler(scheme)
def _load_handler(self, scheme: str, skip_lazy: bool = False) -> Any: # 根据协议名,实例化下载器
path = self._schemes[scheme]
try:
dhcls = load_object(path) # 加载下载器类
if skip_lazy and getattr(dhcls, "lazy", True): # 判断要不要惰性加载
return None
dh = create_instance( # 实例化协议下载器
objcls=dhcls,
settings=self._crawler.settings,
crawler=self._crawler,
)
except NotConfigured as ex:
self._notconfigured[scheme] = str(ex)
return None
except Exception as ex:
logger.error(
'Loading "%(clspath)s" for scheme "%(scheme)s"',
{"clspath": path, "scheme": scheme},
exc_info=True,
extra={"crawler": self._crawler},
)
self._notconfigured[scheme] = str(ex)
return None
else:
self._handlers[scheme] = dh
return dh
def download_request(self, request: Request, spider: Spider) -> Deferred: # 从request中取出协议,然后取出下载器,下载
scheme = urlparse_cached(request).scheme
handler = self._get_handler(scheme)
if not handler:
raise NotSupported(
f"Unsupported URL scheme '{scheme}': {self._notconfigured[scheme]}"
)
return cast(Deferred, handler.download_request(request, spider))
@defer.inlineCallbacks
def _close(self, *_a: Any, **_kw: Any) -> Generator[Deferred, Any, None]: # 当收到引擎的engine_stopped的信号的时候,挨个的停止各个协议的下载器
for dh in self._handlers.values():
if hasattr(dh, "close"):
yield dh.close()
2、文件解析
可以把DownloadHandlers理解成各个协议下载器的处理器。用来加载各个协议对应的下载器的处理器。下图可以看出scrapy默认提供了data、file、ftp、http、s3协议的下载器。