获取山东济南城市每天的天气情况。
需要获取四个数据:天气、温度、风向、风级。
url地址:http://www.weather.com.cn/weather/101120101.shtml
该界面通过get请求,得到html数据,包含七天图示数据,故可用bs4对页面进行解析
import os import random import time from bs4 import BeautifulSoup import re import requests # 得到网页并用bs4进行网页解析 def getHtml(url): # 请求头被封,于是采用多个请求头,每次随机用一个,防止被服务器识别为爬虫 user_agent_list = [ "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)", "Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)", "Mozilla/4.0 (compatible; MSIE 6.0; ) Opera/UCWEB7.0.2.37/28/999", "Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)" ] header = {'User-Agent': random.choice(user_agent_list)} # 如果超时,重新进行三次连接 reconnect = 0 while reconnect < 3: try: with requests.get(url=url, headers=header, stream=True, timeout=20) as rep: # 得到中文乱码,查询网页编码方式为utf-8 rep.encoding = 'utf-8' # 解析网页 soup = BeautifulSoup(rep.text, 'html.parser') return soup except (requests.exceptions.RequestException, ValueError): reconnect += 1 return [] # 获取今日天气数据 def get_content(soup): # 返回的是从今天开始一周7天的天气,下标[0]表示今天,如需后面几天的数据,修改下标即可 weather = soup.findAll(name="p", attrs={"class": "wea"})[0].text weather.replace('\\n', '') # print(weather) # 气温同理,下标[0]表示今天最高气温/最低气温 temperature = soup.findAll(name="p", attrs={"class": "tem"})[0].text temperature = temperature.strip() # strip()用于剔除数据中的空格 # print(temperature) # 风向同理,下标[0]表示今天风向 wind_direction = soup.findAll(name="p", attrs={"class": "win"})[0].find(name="span")['title'] # print(wind_direction) # 风级同理,下标[0]表示今天风级 wind_scale = soup.findAll(name="p", attrs={"class": "win"})[0].find(name="i").text # print(wind_scale) return weather, temperature, wind_direction, wind_scale if __name__ == '__main__': url = 'http://www.weather.com.cn/weather/101120101.shtml' soup = getHtml(url) weather, temperature, wind_direction, wind_scale = get_content(soup) with open(r'今日天气.txt', mode='w', encoding='utf-8') as f: f.write("天气:" + weather + "\n") f.write("气温:" + temperature + "\n") f.write("风向:" + wind_direction + "\n") f.write("风级:" + wind_scale + "\n")
在功能一的基础上,利用datetime.timedelta(days=i)
得到七日日期
import datetime import os import random from bs4 import BeautifulSoup import requests # 得到网页并用bs4进行网页解析 def getHtml(url): # 请求头被封,于是采用多个请求头,每次随机用一个,防止被服务器识别为爬虫 user_agent_list = [ "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36", "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36", "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)", "Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)", "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)", "Mozilla/4.0 (compatible; MSIE 6.0; ) Opera/UCWEB7.0.2.37/28/999", "Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)" ] header = {'User-Agent': random.choice(user_agent_list)} # 如果超时,重新进行三次连接 reconnect = 0 while reconnect < 3: try: with requests.get(url=url, headers=header, stream=True, timeout=20) as rep: # 得到中文乱码,查询网页编码方式为utf-8 rep.encoding = 'utf-8' # 解析网页 soup = BeautifulSoup(rep.text, 'html.parser') return soup except (requests.exceptions.RequestException, ValueError): reconnect += 1 return [] # 获取今日天气数据 def get_content(soup, i): # 返回的是从今天开始一周7天的天气,下标[0]表示今天,如需后面几天的数据,修改下标即可 weather = soup.findAll(name="p", attrs={"class": "wea"})[i].text weather.replace('\\n', '') # print(weather) # 气温同理,下标[0]表示今天最高气温/最低气温 temperature = soup.findAll(name="p", attrs={"class": "tem"})[i].text temperature = temperature.strip() # strip()用于剔除数据中的空格 # print(temperature) # 风向同理,下标[0]表示今天风向 wind_direction = soup.findAll(name="p", attrs={"class": "win"})[i].find(name="span")['title'] # print(wind_direction) # 风级同理,下标[0]表示今天风级 wind_scale = soup.findAll(name="p", attrs={"class": "win"})[i].find(name="i").text # print(wind_scale) return weather, temperature, wind_direction, wind_scale if __name__ == '__main__': file = r"./天气.txt" path = os.getcwd() if os.path.exists(file): os.remove(file) url = 'http://www.weather.com.cn/weather/101120101.shtml' soup = getHtml(url) for i in range(7): time = datetime.date.today() + datetime.timedelta(days=i) weather, temperature, wind_direction, wind_scale = get_content(soup, i) with open(file, mode='a', encoding='utf-8') as f: f.write("日期: " + str(time) + "\n") f.write("天气:" + weather + "\n") f.write("气温:" + temperature + "\n") f.write("风向:" + wind_direction + "\n") f.write("风级:" + wind_scale + "\n") f.write("\n")
总体不难,比较实用,mark一下。