https://maoyan.com/board/4?offset=0
https://maoyan.com/board/4?offset=10
…
https://maoyan.com/board/4?offset=90
import re import json import time import requests from requests.exceptions import RequestException #from fake_useragent import UserAgent
def get_one_page(url): """ 发送请求,获取响应! :param url: :return: """ try: headers = { 'User-Agent':'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0' } response = requests.get(url,timeout=30, headers=headers) if response.status_code == 200: return response.text return None except RequestException: return None
def parse_one_page(html): """ 利用正则表达式提取响应里的电影信息,并形成结构化数据! :param html: :return: """ pattern = re.compile('<dd>.*?board-index.*?>(.*?)' '</i>.*?data-src="(.*?)".*?name.*?a.*?>(.*?)' '</a>.*?star.*?>(.*?)' '</p>.*?releasetime.*?>(.*?)' '</p>.*?integer.*?>(.*?)' '</i>.*?fraction.*?>(.*?)' '</i>.*?</dd>',re.S) items = re.findall(pattern, str(html)) for item in items: yield { 'index': item[0], 'image': item[1], 'title': item[2].strip(), 'actor': item[3].strip()[3:] if len(item[3]) > 3 else '', 'time' : item[4].strip()[5:] if len(item[4]) > 5 else '', 'score': item[5].strip() + item[6].strip() }
def write_to_file(content): """ 存储数据,通过JSON库的dumps()方法实现字典的序列化,写入到一个文本文件! :param content: :return: """ with open('result.txt', 'a', encoding='utf-8') as f: f.write(json.dumps(content, ensure_ascii=False) + ',\n')
def main(offset): """ 通过构造URL中的offset参数(偏移量值),实现TOP100十页数据的爬取! :param offset: :return: """ url = "http://maoyan.com/board/4?offset=" + str(offset) html = get_one_page(url) for item in parse_one_page(html): print(item) write_to_file(item)
if __name__ == '__main__': for i in range(1): main(offset=i * 10) time.sleep(5)
import re import json import time import requests from requests.exceptions import RequestException #from fake_useragent import UserAgent def get_one_page(url): """ 发送请求,获取响应! :param url: :return: """ try: headers = { 'User-Agent':'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0' } response = requests.get(url,timeout=30, headers=headers) if response.status_code == 200: return response.text return None except RequestException: return None def parse_one_page(html): """ 利用正则表达式提取响应里的电影信息,并形成结构化数据! :param html: :return: """ pattern = re.compile('<dd>.*?board-index.*?>(.*?)' '</i>.*?data-src="(.*?)".*?name.*?a.*?>(.*?)' '</a>.*?star.*?>(.*?)' '</p>.*?releasetime.*?>(.*?)' '</p>.*?integer.*?>(.*?)' '</i>.*?fraction.*?>(.*?)' '</i>.*?</dd>',re.S) items = re.findall(pattern, str(html)) for item in items: yield { 'index': item[0], 'image': item[1], 'title': item[2].strip(), 'actor': item[3].strip()[3:] if len(item[3]) > 3 else '', 'time' : item[4].strip()[5:] if len(item[4]) > 5 else '', 'score': item[5].strip() + item[6].strip() } def write_to_file(content): """ 存储数据,通过JSON库的dumps()方法实现字典的序列化,写入到一个文本文件! :param content: :return: """ with open('result.txt', 'a', encoding='utf-8') as f: f.write(json.dumps(content, ensure_ascii=False) + ',\n') def main(offset): """ 通过构造URL中的offset参数(偏移量值),实现TOP100十页数据的爬取! :param offset: :return: """ url = "http://maoyan.com/board/4?offset=" + str(offset) html = get_one_page(url) for item in parse_one_page(html): print(item) write_to_file(item) if __name__ == '__main__': for i in range(1): main(offset=i * 10) time.sleep(5)