spider.py
import scrapy from scrapy import signals from selenium import webdriver class BaiduSpider(scrapy.Spider): name = 'baidu' allowed_domains = ['baidu.com'] start_urls = ['http://www.baidu.com/'] # 抓捕信号 @classmethod def from_crawler(cls, crawler, *args, **kwargs): # 创建爬虫对象 spider = super(BaiduSpider, cls).from_crawler(crawler, *args, **kwargs) spider.chrome = webdriver.Chrome() # 定义信号连接 crawler.signals.connect(spider.spider_closed, signal=signals.spider_closed) return spider def spider_closed(self, spider): spider.logger.info('Spider closed: %s', spider.name) spider.chrome.close() def parse(self, response): print(response.text)
middlewares.py
from scrapy.http.response.html import HtmlResponse # useful for handling different item types with a single interface class Selenium: # def __init__(self): # self.chrome = webdriver.Chrome() def process_request(self, request, spider): # chrome = webdriver.Chrome() # chrome.get(request.url) # html = chrome.page_source # self.chrome.get(request.url) # html = self.chrome.page_source spider.chrome.get(request.url) html = spider.chrome.page_source # 推送到spider.py中 response,该路由已被爬取 return HtmlResponse(url=request.url,body=html,encoding='utf-8')
信号 可参考官网:
信号(Signals) — Scrapy 0.24.6 文档