涉及到詳情頁爬取
目錄結(jié)構(gòu):
kaoshi_bqg.py
import scrapy
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
from ..items import BookBQGItem
class KaoshiBqgSpider(scrapy.Spider):
name = 'kaoshi_bqg'
allowed_domains = ['biquge5200.cc']
start_urls = ['https://www.biquge5200.cc/xuanhuanxiaoshuo/']
rules = (
# 編寫匹配文章列表的規(guī)則
Rule(LinkExtractor(allow=r'https://www.biquge5200.cc/xuanhuanxiaoshuo/'), follow=True),
# 匹配文章詳情
Rule(LinkExtractor(allow=r'.+/[0-9]{1-3}_[0-9]{2-6}/'), callback='parse_item', follow=False),
)
# 小書書名
def parse(self, response):
a_list = response.xpath('//*[@id="newscontent"]/div[1]/ul//li//span[1]/a')
for li in a_list:
name = li.xpath(".//text()").get()
detail_url = li.xpath(".//@href").get()
yield scrapy.Request(url=detail_url, callback=self.parse_book, meta={'info': name})
# 單本書所有的章節(jié)名
def parse_book(self, response):
name = response.meta.get('info')
list_a = response.xpath('//*[@id="list"]/dl/dd[position()>20]//a')
for li in list_a:
chapter = li.xpath(".//text()").get()
url = li.xpath(".//@href").get()
yield scrapy.Request(url=url, callback=self.parse_content, meta={'info': (name, chapter)})
# 每章節(jié)內(nèi)容
def parse_content(self, response):
name, chapter = response.meta.get('info')
content = response.xpath('//*[@id="content"]//p/text()').getall()
item = BookBQGItem(name=name, chapter=chapter, content=content)
yield item
xmly.py
# -*- coding: utf-8 -*-
import scrapy
from ..items import BookXMLYItem, BookChapterItem
class XmlySpider(scrapy.Spider):
name = 'xmly'
allowed_domains = ['ximalaya.com']
start_urls = ['https://www.ximalaya.com/youshengshu/wenxue/']
def parse(self, response):
div_details = response.xpath('//*[@id="root"]/main/section/div/div/div[3]/div[1]/div/div[2]/ul/li/div')
# details = div_details[::3]
for details in div_details:
book_id = details.xpath('./div/a/@href').get().split('/')[-2]
book_name = details.xpath('./a[1]/@title').get()
book_author = details.xpath('./a[2]/text()').get() # 作者
book_url = details.xpath('./div/a/@href').get()
url = 'https://www.ximalaya.com' + book_url
# print(book_id, book_name, book_author, url)
item = BookXMLYItem(book_id=book_id, book_name=book_name, book_author=book_author, book_url=url)
yield item
yield scrapy.Request(url=url, callback=self.parse_details, meta={'info': book_id})
def parse_details(self, response):
book_id = response.meta.get('info')
div_details = response.xpath('//*[@id="anchor_sound_list"]/div[2]/ul/li/div[2]')
for details in div_details:
chapter_id = details.xpath('./a/@href').get().split('/')[-1]
chapter_name = details.xpath('./a/text()').get()
chapter_url = details.xpath('./a/@href').get()
url = 'https://www.ximalaya.com' + chapter_url
item = BookChapterItem(book_id=book_id, chapter_id=chapter_id, chapter_name=chapter_name, chapter_url=url)
yield item
item.py
import scrapy
# 筆趣閣字段
class BookBQGItem(scrapy.Item):
name = scrapy.Field()
chapter = scrapy.Field()
content = scrapy.Field()
# 喜馬拉雅 字段
class BookXMLYItem(scrapy.Item):
book_name = scrapy.Field()
book_id = scrapy.Field()
book_url = scrapy.Field()
book_author = scrapy.Field()
# 喜馬拉雅詳情字段
class BookChapterItem(scrapy.Item):
book_id = scrapy.Field()
chapter_id = scrapy.Field()
chapter_name = scrapy.Field()
chapter_url = scrapy.Field()
pipelines.py
from scrapy.exporters import JsonLinesItemExporter
import os
class BqgPipeline(object):
def process_item(self, item, spider):
xs = '小說集'
name = item['name']
xs_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), xs)
fiction_path = os.path.join(xs_path, name)
# print(os.path.dirname(__file__)) D:/Users/Administrator/PycharmProjects/wh1901/biquge.com
# print(os.path.dirname(os.path.dirname(__file__))) D:/Users/Administrator/PycharmProjects/wh1901
if not os.path.exists(xs_path): # 如果目錄不存在
os.mkdir(xs_path)
if not os.path.exists(fiction_path):
os.mkdir(fiction_path) # 創(chuàng)建目錄
chapter = item['chapter']
content = item['content']
file_path = os.path.join(fiction_path, chapter) + '.txt' # 在 該目錄下面創(chuàng)建 xx .txt 文件
with open(file_path, 'w', encoding='utf-8') as fp:
fp.write(content + '\n')
print('保存成功')
# class XmlyPipeline(object):
# def __init__(self):
# self.fp = open("xmly.json", 'wb')
# # JsonLinesItemExporter 調(diào)度器
# self.exporter = JsonLinesItemExporter(self.fp, ensure_ascii=False)
#
# def process_item(self, item, spider):
# self.exporter.export_item(item)
# return item
#
# def close_item(self):
# self.fp.close()
# print("爬蟲結(jié)束")
starts.py
from scrapy import cmdline
cmdline.execute("scrapy crawl kaoshi_bqg".split())
# cmdline.execute("scrapy crawl xmly".split())
然后是爬取到的數(shù)據(jù)
小說
xmly.json
記錄一下爬取過程中遇到的一點點問題:
在爬取詳情頁的的時候, 剛開始不知道怎么獲取詳情頁的 url 以及 上一個頁面拿到的字段
- 也就是 yield 返回 請求詳情頁 里面的參數(shù)沒有很好地理解
- meta:從其他請求傳過來的meta屬性,可以用來保持多個請求之間的數(shù)據(jù)連接。
- url:這個request對象發(fā)送請求的url。
- callback:在下載器下載完相應(yīng)的數(shù)據(jù)后執(zhí)行的回調(diào)函數(shù)。
以上就是本文的全部內(nèi)容,希望對大家的學習有所幫助,也希望大家多多支持腳本之家。
更多文章、技術(shù)交流、商務(wù)合作、聯(lián)系博主
微信掃碼或搜索:z360901061
微信掃一掃加我為好友
QQ號聯(lián)系: 360901061
您的支持是博主寫作最大的動力,如果您喜歡我的文章,感覺我的文章對您有幫助,請用微信掃描下面二維碼支持博主2元、5元、10元、20元等您想捐的金額吧,狠狠點擊下面給點支持吧,站長非常感激您!手機微信長按不能支付解決辦法:請將微信支付二維碼保存到相冊,切換到微信,然后點擊微信右上角掃一掃功能,選擇支付二維碼完成支付。
【本文對您有幫助就好】元

