Python爬虫实战之使用Scrapy爬起点网的完本小说
本篇的目的是用scrapy来爬取起点小说网的完本小说,使用的环境ubuntu,至于scrapy的安装就自行百度了。
scrapy startproject name 通过终端进入到你创建项目的目录下输入上面的命令就可以完成项目的创建.name是项目名字.
我这里定义的item中的title用来存书名,desc用来存书的内容.、
import scrapy
class TutorialItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
title = scrapy.Field()
desc = scrapy.Field()
pass
在pipelines可以编写存储数据的形式,我这里就是使用txt形式的文件来存储每一本书
import json
import codecs
#以txt的形式存储
class TutorialPipeline(object):
#def __init__(self):
def process_item(self, item, spider):
//根据书名来创建文件,item.get('title')就可以获取到书名
self.file = codecs.open(item.get('title')+'.txt', 'w', encoding='utf-8')
self.file.write(item.get("desc")+ "\n")
return item
def spider_closed(self, spider):
self.file.close()
只要将下面代码中的tutorial替换成自己项目的名字就可以
BOT_NAME = 'tutorial'
#USER_AGENT
USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.54 Safari/536.5'
# start MySQL database configure setting
# end of MySQL database configure setting
SPIDER_MODULES = ['tutorial.spiders']
NEWSPIDER_MODULE = 'tutorial.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'tutorial (+http://www.yourdomain.com)'
ITEM_PIPELINES = {
'tutorial.pipelines.TutorialPipeline': 300,
}
# -*- coding: utf-8 -*-
import scrapy
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from tutorial.items import TutorialItem
from scrapy.http import Request
class DmozSpider(scrapy.Spider):
name = "dmoz"
allowed_domains = ["dmoz.org"]
//我这里是下载起点体育类的完本小说,所以通过for来创建每一个页面的url,因为每一个只是page不同而已,而page是根据全部的本数/页数而来
start_urls = [
"http://fin.qidian.com/?size=-1&sign=-1&tag=-1&chanId=8&subCateId=-1&orderId=&update=-1&page="+str(page)+"&month=-1&style=1&vip=-1" for page in range(1,292/20)
]
def parse(self, response):
hxs = HtmlXPathSelector(response)
//获取每一个书的url
book =hxs.select('//div[@class="book-mid-info"]/h4/a//@href').extract()
for bookurl in book:
//根据获取到的书本url跳转到每本书的页面
yield Request("http:"+bookurl, self.parseBook, dont_filter=True)
def parseBook(self,response):
hxs = HtmlXPathSelector(response)
//获取免费阅读的url
charterurl = hxs.select('//div[@class="book-info "]//a[@class="red-btn J-getJumpUrl "]/@href').extract()
//每一本书都创建一个item
item = TutorialItem()
for url in charterurl:
通过免费阅读的url进入书的第一章
yield Request("http:"+url,meta={'item': item},callback=self.parseCharter, dont_filter=True)
def parseCharter(self ,response):
hxs = HtmlXPathSelector(response)
//获取书名
names = hxs.select('//div[@class="info fl"]/a[1]/text()').extract()
//获取上面传递过来的item
item = response.meta['item']
for name in names:
//将书名存入到item的title字段中
names = item.get('title')
if None==names:
item['title'] = name
//获取章节名
biaoti = hxs.select('//h3[@class="j_chapterName"]/text()').extract()
content = ''
for biaot in biaoti:
content=content+biaot+"\n"
//获取每一章的内容
s = hxs.select('//div[@class="read-content j_readContent"]//p/text()').extract()
for srt in s:
//将章节和内容拼接起来存入到item的desc中
content = content + srt
desc = item.get('desc')
if None==desc:
item['desc'] =content
else:
item['desc']=desc+content
if content=='':
yield item
#获取下一章的内容
chapters = hxs.select('//div[@class="chapter-control dib-wrap"]/a[@id="j_chapterNext"]//@href').extract()
for chapter in chapters:
#print "https:" + chapter
yield Request("http:" + chapter, meta={'item': item},callback=self.parseCharter, dont_filter=True)
通过上面的代码虽然可以获取所有书的内容,但是起点是有vip限制的,也就是说必须用起点的vip帐号登录才能查看完本的小说,因此这有点遗憾,我没有起点小说网的会员.