免责声明:本文所记录的技术手段及实现过程,仅作为爬虫技术学习使用,不对任何人完全或部分地依据本文的全部或部分内容从事的任何事情和因其任何作为或不作为造成的后果承担任何责任
于近年来淘宝的反爬措施逐渐完善,爬取难度变大,在爬取时必须要登录之后才能查看相关的商品信息,淘宝数据是通过动态加载的方式显示的,所以本文使用selenium模拟浏览器操作爬取商品页详情信息 , 需要提取安装和selenuim和浏览器驱动chromedriver
使用的第三方库
async-generator==1.10 attrs==21.4.0 beautifulsoup4==4.10.0 bs4==0.0.1 certifi==2021.10.8 cffi==1.15.0 charset-normalizer==2.0.12 cryptography==36.0.1 cssselect==1.1.0 fake-useragent==0.1.11 h11==0.13.0 HTMLParser==0.0.2 idna==3.3 logger==1.4 lxml==4.8.0 outcome==1.1.0 pinyin==0.4.0 pycparser==2.21 pyOpenSSL==22.0.0 pyquery==1.4.3 PySocks==1.7.1 requests==2.27.1 selenium==4.1.2 sniffio==1.2.0 sortedcontainers==2.4.0 soupsieve==2.3.1 trio==0.20.0 trio-websocket==0.9.2 urllib3==1.26.8 wsproto==1.1.0 xpinyin==0.7.6
通过分析淘宝的页面的数据,可以知道淘宝页面的数据是通过 JS 动态加载出来的,再分析页面源码,可以发现每页的商品数据存储在 p_page_config
这个变量里面
该文件名为 Login.py
from selenium.webdriver import Chrome, ChromeOptions from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions from selenium.webdriver.common.by import By from fake_useragent import UserAgent from time import sleep # 获取登录状态 class Login: def __init__(self): # 全部设置为私有属性,封装类 self.driver = None self.__init_browser() # 初始化浏览器 self.__wait = WebDriverWait(self.driver, 180) # 显示等待 self.__main() # 该方法也可以在外部调用 def __init_browser(self) -> "初始化 浏览器": __options = ChromeOptions() __options.add_experimental_option('excludeSwitcher', ['enable-automation']) # 设置开发者模式启动,该模式下webdriver属性为正常值 __option = ChromeOptions() __option.add_argument(f'user-agent={UserAgent().random}') # 设置请求头 # __option.add_argument( # r'--user-data-dir=C:\Users\35005\AppData\Local\Google\Chrome\User Data\Default') # 加载自己的数据 self.driver = Chrome(chrome_options=__option, options=__options) # 添加到浏览器中 self.driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", { "source": """ Object.defineProperty(navigator, 'webdriver', { get: () => undefined }) """ }) # 避免淘宝滑块验证 def __login(self) -> "登 录": self.driver.maximize_window() # 最大化窗口 self.driver.get("https://login.taobao.com/member/login.jhtml") # 请求登录网页 self.driver.find_element(By.XPATH, "//div[@id='login']/div[1]/i").click() # 二维码登录 self.__wait.until(expected_conditions.presence_of_element_located((By.ID, "q"))) # 等待二维码登录,知道搜索框出现 # cookie = self.driver.execute_script("return document.cookie") # 调用 JS 代码,获得cookie # self.driver.close() # 关闭浏览器 # return cookie def __main(self): self.__login() # 调用接口 sleep(5) # 停止 5 秒钟 # cookie = self.__login() # with open("../Config/cookie.pickle", "wb") as f: # pickle.dump(cookie, f) # 将数据存储到二进制文件中 if __name__ == '__main__': driver = Login() # 直接调用 main 方法
import re, json from ConfiCode.Login import Login # 登录淘宝 from selenium.webdriver.common.by import By from selenium.webdriver.common.action_chains import ActionChains from threading import Thread from time import sleep, ctime class SpiderGoods: def __init__(self, u) -> "初始化 对象": self.__driver = None # 接收浏览器对象 self.__url = u # 接收 url self.__all_lis = [] # 存放所有字典类型数据 self.__file = open("./Config/log.txt", "a", encoding="utf-8") def __verify(self) -> "滑块 问题": while True: try: self.__driver.find_element(By.XPATH, "//*[@id='nc_1__scale_text']/span") # 如果找到滑块 self.__file.write(f"出现滑块,正在处理!{ctime()}\n") self.__file.flush() sleep(2) print("出现滑块,正在处理!") ActionChains(self.__driver).click_and_hold().drag_and_drop_by_offset(260, 0).release().perform() # 拖动滑块 except: sleep(3) # 如果没有找到滑块 def __get_driver(self) -> "得到 浏览器对象": l = Login() # 得到浏览器对象 self.__driver = l.driver def __get_page_source(self) -> "获取 数据": self.__get_driver() self.__file.write(f"登录成功!{ctime()}\n") self.__file.flush() sleep(2) print("登录成功!") for i in range(100): # 淘宝一共有 100 页 every_page = [] # 存储每页的数据 url = f"{self.__url}&s={44 * i}" # 拼接 url self.__file.write(f"开始爬取第{i + 1}页!{ctime()}\n") self.__file.flush() sleep(2) print(f"开始爬取第{i + 1}页") self.__driver.get(url) # 发送请求 sleep(10) # 停止 10 秒钟 try: cot = re.search("g_page_config = {(?P<p_page_source>.*?)};", self.__driver.page_source).group("p_page_source") # 得到存储数据的页面 except: continue cot = "{" + cot + "}" # 拼接成 JSON 字符串 cot_dic = json.loads(cot) # 转换为字典 data_lis = cot_dic["mods"]["itemlist"]["data"]["auctions"] # 得到存储数据的列表 for j in data_lis: # 标题 try: title = j["raw_title"] except: title = "暂无" # 进入详情页的 url try: detail_url = j["nid"] detail_url = f"https://item.taobao.com/item.htm?&id={detail_url}" except: detail_url = "暂无" # 图片 url try: pic_url = j["pic_url"] pic_url = f"https:{pic_url}" except: pic_url = "暂无" # 价格 try: price = j["view_price"] except: price = "-1" # 地址 try: location = j["item_loc"] except: location = "暂无" # 销量 try: sales = j["view_sales"] except: sales = "暂无" # 店铺 try: nick = j["nick"] except: nick = "暂无" # 评论数量 try: comment = j["comment_count"] except: comment = "-1" v_dic = {"标题": title, "详情页url": detail_url, "图片url": pic_url, "最低价格": price, "发货地址": location, "销量": sales, "店铺名称": nick, "评论数量": comment} # 保存数据 # print(v_dic) every_page.append(v_dic) p_dic = {f"第{i + 1}页": every_page} # 将页面内容添加的列表中 self.__file.write(f"获取完第{i + 1}页!{ctime()}\n") self.__file.flush() sleep(2) print(f"获取完第{i + 1}页") self.__all_lis.append(p_dic) # 把页面数据添加到列表中 # print(self.__all_lis) sleep(5) # 停止 3 秒钟 def __save_to_json(self) -> "保存 数据": self.__get_page_source() name = self.__url.split("=")[-1] with open(f"./Source/{name}.json", "w", encoding="utf-8") as f: json.dump(self.__all_lis, f, indent=2, ensure_ascii=False) def main(self) -> "程序 入口": # 开启线程,无限判断是否有滑块 t1 = Thread(target=self.__verify) t1.start() t2 = Thread(target=self.__save_to_json) t2.start() t1.join() t2.join() self.__driver.close() # 关闭驱动 self.__file.write(f"爬取完成!{ctime()}\n") self.__file.flush() sleep(2) print("爬取完成!") sleep(5) self.__file.truncate() self.__file.close() if __name__ == '__main__': name = input("请输入关键词:\n") s = SpiderGoods(f"https://s.taobao.com/search?q={name}") s.main()
在 【https://download.csdn.net/download/qq_62789540/83519171】不需要积分哦!!!