最新消息:

Python爬虫框架,scrapy采集小说主要源码

Scrapy框架 追逐 187浏览 0评论

Python爬虫框架,scrapy采集小说主要源码

奇书网

奇书网

import re
import scrapy
from bs4 import BeautifulSoup
from scrapy.http import Request

class Myspider(scrapy.Spider):

    name='qisuu'
    allowed_domains=['qisuu.la']
    bash_url='https://www.qisuu.la/soft/sort0'
    bashurl='.html'

    def start_requests(self):
        for i in range(1,11):
            url=f'{self.bash_url}{str(i)}/index_1{self.bashurl}'
            yield Request(url,self.parse)

    def parse(self,response):
        max_num=re.findall(r"下一页</a>.+?<a href='/soft/sort0.+?/index_(.+?).html'>尾页</a>",response.text,re.S)[0]
        bashurl=str(response.url)[:-6]
        for i in range(1,int(max_num)+1):
            url=f'{bashurl}{str(i)}{self.bashurl}'
            yield Request(url,callback=self.get_name)

    def get_name(self,response):
        lis=BeautifulSoup(response.text,'lxml').find('div',class_="listBox").find_all('li')
        for li in lis:
            novelname=li.find('a').get_text() #小说名
            novelinformation = li.find('div', class_="s").get_text() #小说信息
            novelintroduce=li.find('div',class_="u").get_text() #小说简介
            novelurl=f"https://www.qisuu.la{li.find('a')['href']}" #小说链接
            yield Request(novelurl,callback=self.get_chapterurl,meta={'name':novelname,'url':novelurl})

    def get_chapterurl(self,response):
        #novelname =BeautifulSoup(response.text,'lxml').find('h1').get_text()
        novelname=str(response.meta['name'])
        lis=BeautifulSoup(response.text,'lxml').find('div',class_="detail_right").find_all('li')
        noveclick=lis[0].get_text() #点击次数
        novefilesize=lis[1].get_text() #文件大小
        novefiletype = lis[2].get_text()  # 书籍类型
        noveupatedate = lis[3].get_text()  # 更新日期
        novestate = lis[4].get_text()  # 连载状态
        noveauthor = lis[5].get_text()  # 书籍作者
        novefile_running_environment = lis[6].get_text()  # 运行环境
        lis=BeautifulSoup(response.text,'lxml').find('div',class_="showDown").find_all('li')
        novefile_href=re.findall(r"'.+?','(.+?)','.+?'",str(lis[-1]),re.S)[0]  #小说下载地址
        print(novelname)
        print(noveclick)
        print(novefilesize)
        print(novefiletype)
        print(noveupatedate)
        print(novestate)
        print(noveauthor)
        print(novefile_running_environment)
        print(novefile_href)






转载请注明:二爷记 » Python爬虫框架,scrapy采集小说主要源码

发表我的评论
取消评论
表情

Hi,您需要填写昵称和邮箱!

  • 昵称 (必填)
  • 邮箱 (必填)
  • 网址