如果爬取解析的数据不在同一张页面中。(深度爬取)
使用古诗词网站进行模拟
import scrapy from bossPro.items import BossproItem class BossSpider(scrapy.Spider): name = 'boss' # allowed_domains = ['www.xxx.com'] start_urls = ['https://www.shicimingju.com/category/all'] url = 'https://www.shicimingju.com/chaxun/zuozhe/1_%d.html' page_num = 2 # 回调函数接受item def parse_detail(self, response): item = response.meta['item'] detail = response.xpath('//*[@id="main_right"]/div[1]/div[2]/div[1]/div/text()').extract() detail = ''.join(detail) # print(job_desc) item['detail'] = detail yield item #数据解析处理 def parse(self, response): list_data = response.xpath('//*[@id="main_left"]/div') for li in list_data: name = li.xpath('./div[@class="zuozhe_list_item"]/h3/a/text()').extract_first() detail_url = li.xpath('./div[@class="zuozhe_list_item"]/h3/a/@href').extract_first() detail_url = 'https://www.shicimingju.com' + str(detail_url) #有空值需要处理下,如果没有空值可以不用str item = BossproItem() item['name'] = name # 对详情页发请求获取详情页的页面源码数据 # 手动请求的发送 # 请求传参:meta={},可以将meta字典传递给请求对应的回调函数 yield scrapy.Request(detail_url, callback=self.parse_detail, meta={'item': item}) #分页操作 if self.page_num <= 3: new_url = format(self.url%self.page_num) self.page_num += 1 yield scrapy.Request(new_url,callback=self.parse)