@
目录模拟登录微博是实现微博网页爬虫的第一步,现在的微博网页版有个sina visit system,只有登录过后才能获取更多微博内容。本文使用selenium通过预登陆保存cookie到本地,之后重复登录只需要提取本地cookie即可免去每次扫码或者输密码登录。
先简单引入两个函数实现保存cookies及读取cookies:
import os import json class CookieLogin: def __init__(self,f_path): """ 对象初始化 :param url: 首页地址 :param f_path: Cookies文件保存路径 """ # self.url = url self.f_path = f_path # self.browser = self.start_browser(executable_path) def save_cookies(self, data, encoding="utf-8"): """ Cookies保存方法 :param data: 所保存数据 :param encoding: 文件编码,默认utf-8 """ with open(self.f_path, "w", encoding=encoding) as f_w: json.dump(data, f_w) print("save done!") def load_cookies(self, encoding="utf-8"): """ Cookies读取方法 :param encoding: 文件编码,默认utf-8 """ if os.path.isfile(self.f_path): with open(self.f_path, "r", encoding=encoding) as f_r: user_cookies = json.load(f_r) return user_cookies
用selenium模拟登录,人工扫描二维码转到登录页面后,使用wd.get_cookies()保存cookies
from selenium import webdriver from selenium.webdriver.chrome.service import Service from selenium.webdriver.common.by import By from time import sleep from untitled.py爬虫项目.cookies_usage import CookieLogin prefs = { 'profile.default_content_setting_values': { 'notifications': 2 # 隐藏chromedriver的通知 }, 'credentials_enable_service': False, # 隐藏chromedriver自带的保存密码功能 'profile.password_manager_enabled': False # 隐藏chromedriver自带的保存密码功能 } # 创建一个配置对象 options = webdriver.ChromeOptions() options.add_experimental_option('prefs', prefs) options.add_experimental_option('excludeSwitches', ['enable-automation']) # 设置为开发者模式,禁用chrome正受到自动化检测的显示 options.add_argument('--disable-gpu') # 谷歌文档提到需要加上这个属性来规避bug wd = webdriver.Chrome(service=Service(r'D:\Study\chromedriver.exe'), options=options) # 最大化窗口 wd.maximize_window() wd.implicitly_wait(5) url = "https://weibo.com/" wd.get(url=url) # 现主页实现登录,用二维码扫就行 wd.find_element(By.XPATH, '//*[@id="__sidebar"]/div/div[1]/div[1]/div/button').click() sleep(10) # 保存cookie到本地 cookies = wd.get_cookies() cookie_fname = 'cookie.json' login = CookieLogin(cookie_fname) login.save_cookies(cookies) wd.close() wd.quit()
在这段测试中注意打开网页后一定要多给点睡眠时间,至少4s以上,保证能删除完没有登录时页面的cookies,再重新写入本地保存的登录后cookies。
url = "https://weibo.com/" wd.get(url=url) # cooikes = wd.get_cookies() # for cooike in cooikes: # print(cooike) sleep(4) wd.delete_all_cookies() # 持久化登录,之后登录就不需要上面的扫二维码 login = CookieLogin("cookie.json") cookies = login.load_cookies() try: for cookie in cookies: cookie_dict = { 'domain': '.weibo.com', 'name': cookie.get('name'), 'value': cookie.get('value'), "expires": '', 'path': '/', 'httpOnly': False, 'HostOnly': False, 'Secure': False } print(cookie_dict) wd.add_cookie(cookie_dict) except Exception as e: print(e) sleep(5) wd.refresh() # cooikes2 = wd.get_cookies() # for cooike in cooikes2: # print(cooike) sleep(5) # wd.get(url) url = "https://s.weibo.com/weibo?q=%E6%96%B0%E5%86%A0%E7%96%AB%E6%83%85" wd.get(url) sleep(2) # cooikes3 = wd.get_cookies() # for cooike in cooikes2: # print(cooike) wd.close() wd.quit()
参考博客:
Python Selenium.WebDriver 对Cookies的处理及应用『模拟登录』