Java教程

爬斗鱼直播信息

本文主要是介绍爬斗鱼直播信息,对大家解决编程问题具有一定的参考价值,需要的程序猿们随着小编来一起学习吧!

总结思路:

1.循环遍历得到所有的URL

2.使用线程池发送所有的请求,获取响应。

3.在2的响应字符串中使用json.loads()方法转换为字典后提取需要的数据信息。

4.将信息数据保存到MongoDB中

 

 

 

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/11/16 22:12
# @Author : Lhtester
# @Site : 
# @File : douyu.py
# @Software: PyCharm
import requests
import random
import time
import pymongo
import json
from concurrent.futures import ThreadPoolExecutor,wait,ALL_COMPLETED

class DouyuSpider:
    """爬虫类"""
    def __init__(self):
        """构造方法"""
        #headers:这是主要设置User-Agent 伪装成真实浏览器
        self.headers = {'user-agent':'Mozilla/5.0 (Windows NT 10.0;WOW64;Trident/7.0;rv:11.0) like Gecko'}
        #baseURL:基础URL
        self.baseURL='https://www.douyu.com/gapi/rkc/directory/mixList/0_0/'
        #MongoDB客户端对象
        self.client =pymongo.MongoClient('mongodb://localhost:27017/')['mydb']['douyu']
        #线程池
        self.executor =ThreadPoolExecutor(max_workers=10)

    def parse_page(self, url):
        """向URL发送请求,获取响应内容"""
        print('{}爬取中'.format(url))

        try:
            #随机休眠0~2秒,避免爬虫过快,会导致爬虫被封禁
            time.sleep(random.random() * 2 )
            #获取响应数据
            content = requests.get(url,headers=self.headers).text
            #转换为字典
            ret =json.loads(content)
            #提取需要的数据
            datas= ret['data']['rl']
            for data in datas:
                item={}
                #标题
                item['title'] = data['rn']
                #昵称
                item['pname'] = data['nn']
                #类型
                item['tanme'] = data['c2name']
                #人气数
                item['number'] = data['ol']
                #存入MongoDB
                self.client.insert_one(item)#insert已经被弃用

                print('{}爬取成功'.format(url))
        except Exception as ex:
            print(ex)
            print('{}爬取失败'.format(url))

    def startWork(self):
        """开始"""
        print('begin...')
        #所有的URL
        urls = [self.baseURL + str(i) for i in range(1,146)]
        #线程池
        all_task= [self.executor.submit(self.parse_page,url) for url in urls]
        #主线程等待
        wait(all_task,return_when=ALL_COMPLETED)

        print('end...')

if __name__ =="__main__":
    #创建爬虫对象
    spider = DouyuSpider()
    #开始爬虫
    spider.startWork()

 

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/11/16 22:12
# @Author : Lhtester
# @Site : 
# @File : douyu.py
# @Software: PyCharm
import requests
import random
import time
import pymongo
import json
from concurrent.futures import ThreadPoolExecutor,wait,ALL_COMPLETED

class DouyuSpider:
    """爬虫类"""
def __init__(self):
        """构造方法"""
        #headers:这是主要设置User-Agent 伪装成真实浏览器
self.headers = {'user-agent':'Mozilla/5.0 (Windows NT 10.0;WOW64;Trident/7.0;rv:11.0) like Gecko'}
        #baseURL:基础URL
self.baseURL='https://www.douyu.com/gapi/rkc/directory/mixList/0_0/'
#MongoDB客户端对象
self.client =pymongo.MongoClient('mongodb://localhost:27017/')['mydb']['douyu']
        #线程池
self.executor =ThreadPoolExecutor(max_workers=10)

    def parse_page(self, url):
        """向URL发送请求,获取响应内容"""
print('{}爬取中'.format(url))

        try:
            #随机休眠0~2秒,避免爬虫过快,会导致爬虫被封禁
time.sleep(random.random() * 2 )
            #获取响应数据
content = requests.get(url,headers=self.headers).text
            #转换为字典
ret =json.loads(content)
            #提取需要的数据
datas= ret['data']['rl']
            for data in datas:
                item={}
                #标题
item['title'] = data['rn']
                #昵称
item['pname'] = data['nn']
                #类型
item['tanme'] = data['c2name']
                #人气数
item['number'] = data['ol']
                #存入MongoDB
self.client.insert_one(item)#insert已经被弃用

print('{}爬取成功'.format(url))
        except Exception as ex:
            print(ex)
            print('{}爬取失败'.format(url))

    def startWork(self):
        """开始"""
print('begin...')
        #所有的URL
urls = [self.baseURL + str(i) for i in range(1,146)]
        #线程池
all_task= [self.executor.submit(self.parse_page,url) for url in urls]
        #主线程等待
wait(all_task,return_when=ALL_COMPLETED)

        print('end...')

if __name__ =="__main__":
    #创建爬虫对象
spider = DouyuSpider()
    #开始爬虫
spider.startWork()

















这篇关于爬斗鱼直播信息的文章就介绍到这儿,希望我们推荐的文章对大家有所帮助,也希望大家多多支持为之网!