scrapy stratproject wangyiPro # 创建项目wangyiPro cd wangyiPro # 进入项目 scrapy genspider wangyi # 创建爬虫文件wangyi
ROBOTSTXT_OBEY = False LOG_LEVEL = 'ERROR' USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36'
首先确定url:https://news.163.com/,我爬取的是网易新闻首页的国际和国内这两个板块的内容,因为首页的数据不是动态加载的所以我们正常拿到国际和国内这两个板块的url就行。
class WangyiSpider(scrapy.Spider): name = 'wangyi' # allowed_domains = ['www.xxx.com'] start_urls = ['https://news.163.com/'] model_urls = [] # 每一个板块对应的url options = webdriver.ChromeOptions() options.add_experimental_option("excludeSwitches", ["enable-logging"]) # 实例化了一个全局的浏览器对象 bro = webdriver.Chrome(executable_path=r'E:/chromedriver/chromedriver.exe', options=options) # 数据解析,解析每一个板块对应的url def parse(self, response): li_list = response.xpath('//*[@id="index2016_wrap"]/div[2]/div[2]/div[2]/div[2]/div/ul/li') indexs = [1, 2] for index in indexs: model_li = li_list[index] model_url = model_li.xpath('./a/@href').extract_first() self.model_urls.append(model_url) # print(self.model_urls) # 对每一个板块的url发起请求 for url in self.model_urls: yield scrapy.Request(url=url, callback=self.parse_model)
拿到需要的板块的url之后我们对其发起请求yield scrapy.Request(url=url, callback=self.parse_model) ,将他交给下一个解析函数parse_model,因为板块里面的新闻标题和新闻的url是动态加载的所以我们要使用中间件的process_response将其不正确的response响应数据拦截下来然后在使用selenium重新请求,再将原来的响应数据篡改成selenium请求的响应数据即可。
from time import sleep # scrapy封装好的相应类 from scrapy.http import HtmlResponse class WangyiproDownloaderMiddleware: def process_request(self, request, spider): return None # 拦截所有的响应对象 # 整个工程发起的请求:1 + 2 + n 次请求,也会有3 + n个响应 # 只有九个响应对象是不满足需求的 # 直接将不满足需求的5个指定的响应对象的响应数据进行篡改 def process_response(self, request, response, spider): # 将拦截到的所有的响应对象中指定的9个响应对象找出 if request.url in spider.model_urls: bro = spider.bro # response表示的就是指定的不满足需求的5个响应对象 # 篡改响应数据:首先先获取满足需求的响应数据,将其篡改到响应对象中即可 # 满足需求的响应数据就可以使用selenium获取 bro.get(request.url) # 使用selenium对九个板块的url发起请求 sleep(2) bro.execute_script('window.scrollTo(0, document.body.scrollHeight)') sleep(2) # 捕获到板块页面中加载出来的全部数据(包含了动态加载的数据) page_text = bro.page_source # response.text = page_text # 返回了一个新的响应对象,新的响应对象替换原来不满足需求的旧的响应对象 return HtmlResponse(url=request.url, body=page_text, encoding='utf-8', request=request) # return page_text else: return response def process_exception(self, request, exception, spider): pass
在settings里面开启中间件
DOWNLOADER_MIDDLEWARES = { 'wangyiPro.middlewares.WangyiproDownloaderMiddleware': 543, }
使用selenium获取到正确的响应对象后就可以继续进行数据解析了。
# 数据解析:新闻标题和新闻详情页的url(动态加载) def parse_model(self, response): div_list = response.xpath('/html/body/div/div[3]/div[3]/div[1]/div[1]/div/ul/li/div/div') for div in div_list: # 新闻标题 title = div.xpath('./div/div[1]/h3/a/text()').extract_first() # 新闻详情页的url href = div.xpath('./div/div[1]/h3/a/@href').extract_first() # print(title + '----' + href) # 实例化item if href: item = WangyiproItem() item['title'] = title # 对新闻详情页的url发起请求 yield scrapy.Request(url=href, callback=self.parse_detail, meta={'item': item})
我们将解析出来的新闻详情页的url交给下一个解析函数继续解析callback=self.parse_detail
def parse_detail(self, response): # 解析新闻内容 content = response.xpath('//*[@id="content"]/div[2]/p/text()').extract() all_content = ''.join(content) item = response.meta['item'] item['all_content'] = all_content # 提交给item yield item
class WangyiproItem(scrapy.Item): title = scrapy.Field() all_content = scrapy.Field()
class MysqlPipeline(object): conn = None cursor = None def open_spider(self, spider): self.conn = pymysql.Connect(host='127.0.0.1', port=3306, user='root', password='xxxxxxx', db='naruto', charset='utf8') def process_item(self, item, spider): self.cursor = self.conn.cursor() sql = 'insert into wangyi values ("%s", "%s")' % (item['title'], item['all_content']) # 事务处理 try: self.cursor.execute(sql) self.conn.commit() except Exception as e: print(e) self.conn.rollback() return item def close_spider(self, spider): self.conn.close() self.cursor.close()
在settings里面开启管道
ITEM_PIPELINES = { # 'wangyiPro.pipelines.WangyiproPipeline': 300, 'wangyiPro.pipelines.MysqlPipeline': 300, }
import scrapy from selenium import webdriver from wangyiPro.items import WangyiproItem class WangyiSpider(scrapy.Spider): name = 'wangyi' # allowed_domains = ['www.xxx.com'] start_urls = ['https://news.163.com/'] model_urls = [] # 每一个板块对应的url options = webdriver.ChromeOptions() options.add_experimental_option("excludeSwitches", ["enable-logging"]) # 实例化了一个全局的浏览器对象 bro = webdriver.Chrome(executable_path=r'E:/chromedriver/chromedriver.exe', options=options) # 数据解析,解析每一个板块对应的url def parse(self, response): li_list = response.xpath('//*[@id="index2016_wrap"]/div[2]/div[2]/div[2]/div[2]/div/ul/li') indexs = [1, 2] for index in indexs: model_li = li_list[index] model_url = model_li.xpath('./a/@href').extract_first() self.model_urls.append(model_url) # print(self.model_urls) # 对每一个板块的url发起请求 for url in self.model_urls: yield scrapy.Request(url=url, callback=self.parse_model) # 数据解析:新闻标题和新闻详情页的url(动态加载) def parse_model(self, response): # 直接对response解析新闻标题数据是无法获取该数据的(动态加载数据) # 可以使用中间件将不满足需求的响应对象中的响应数据篡改成包含了动态加载的响应数据,将其变成满足需求的响应对象 div_list = response.xpath('/html/body/div/div[3]/div[3]/div[1]/div[1]/div/ul/li/div/div') for div in div_list: title = div.xpath('./div/div[1]/h3/a/text()').extract_first() href = div.xpath('./div/div[1]/h3/a/@href').extract_first() # print(title + '----' + href) # 实例化item if href: item = WangyiproItem() item['title'] = title # 对新闻详情页的url发起请求 yield scrapy.Request(url=href, callback=self.parse_detail, meta={'item': item}) def parse_detail(self, response): # 解析新闻内容 content = response.xpath('//*[@id="content"]/div[2]/p/text()').extract() all_content = ''.join(content) item = response.meta['item'] item['all_content'] = all_content yield item # 爬虫类父类的方法,该方法是在爬虫结束最后一刻执行 def closed(self, spider): self.bro.quit()