最近一直在學習python,想寫一些練習。看別人都是爬壁紙什麼的,感覺那些也沒啥意思,所有我就去爬番號了。
如果不想自己複製代碼可以自行下載
點我下載,提取碼:9nas
- 爬蟲目錄
__pycache__:這個文件夾python解釋器會自動生成,不用管它。
想知道有什麼用看下面的解釋。
主要是下次再Start.py執行時,若解釋器發現這個Html.py 腳本沒有修改過,就會跳過編譯這一步,直接運行以前生成的保存在
__pycache__文件夾裏的 *.pyc 文件,大大縮短項目運行前的準備時間。
- Html.py
我自己對urllib.request和bs4類庫的二次封裝#1.網頁操作類庫 import urllib.request,urllib.parse,bs4 class Html(object): '''[網頁類] [對"GET"、"POST"方式獲取網頁的二次封裝,基於urllib.request類庫] ''' def __init__(self,url,headers,requestMode = "GET",data = None): '''[初始化] [類的數據初始化] Arguments: url {[str]} -- [url網址] headers {[dict]} -- [網站報頭表單] Keyword Arguments: requestMode {str} -- [請求模式["GET","POST"]] (default: {"GET"}) data {[dict]} -- [POST請求表單] (default: {None}) ''' self.url = url self.headers = headers self.requestMode = requestMode self.data = data #獲取響應文本,並轉換成soup對象 self.responseSoup = bs4.BeautifulSoup(self.getResponse(),"html.parser") def __str__(self): '''[類信息] [用於打印類信息] ''' return self.url + "\n" + _ str(self.headers) + "\n" + _ str(self.requestMode) + "\n" + _ str(self.data) def getResponse(self): '''[獲取響應] [獲取網頁響應,並返回字符串] Returns: [str] -- [網頁響應] ''' #判斷請求模式 if self.requestMode == "GET": #創建請求對象 request = urllib.request.Request(self.url,headers = self.headers) else: #判斷"POST"請求所需要的表單 if len(self.data) == 0: #打印錯誤信息 print("POST請求所需的表單data爲空,退出函數。") return else: #將data表單編碼 data = urllib.parse.urlencode(self.data).encode("utf-8") #創建帶data表單的請求 request = urllib.request.Request(url = self.url,headers = self.headers,data = data) #循環獲取響應,避免獲取失敗直接結束函數 while True: #發送請求,得到響應 response = urllib.request.urlopen(request,timeout = 30) #判斷是否獲取成功 if response.getcode() == 200: break else: #打印錯誤信息,繼續循環 print("獲取網頁失敗,重新獲取。錯誤報告:%d"%response.getcode()) #返回響應 return response def labelSelect(self,label): '''[標籤選擇] [用於獲取soup對象中的標籤] Arguments: label {[str]} -- [選擇條件] ''' if self.responseSoup == None: #打印錯誤提示 print("soup對象爲空。") else: #標籤 return self.responseSoup.select(label) def printResponse(self): '''[打印響應] [在控制檯打印響應文本] ''' print(self.responseSoup.prettify()) def downloadImage(imageHtml,filename): '''[下載圖片] [下載圖片到指點地址] Arguments: imageHtml {[str]} -- [圖片url] filename {[type]} -- [圖片地址] ''' try: #下載圖片 urllib.request.urlretrieve(imageHtml,filename = filename) except Exception as e: print("%s--下載出錯"%imageHtml) def main(): '''[主函數] [用於測試類,被引用時不會執行] ''' if __name__ == '__main__': main()
- Page.txt
這個文件是用來保存爬取到的數據頁,自己創建就好了。然後可以在裏面輸入頁碼(除了數字什麼都不要有),範圍:0~56。
建議輸入數字:0 - main.py
程序入口共有兩個線程
1.主線線程:爬取數據
2.下載線程:下載圖片
#1.自定義類
import Html
#2.系統模塊
import time,threading,queue,os
#基礎地址
baseUrl = "http://nanrenvip.cc"
#女優作品,字典隊列
AV = queue.Queue()
#首層報頭
firstHeaders = {'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'
,'Accept-Language':'zh-CN,zh;q=0.9'
,'Cache-Control':'no-cache'
,'Connection':'keep-alive'
,'Cookie':'fikker-QnSp-g2oO=iKQJO0GhsfKh7h3VS0kRIhGLYXCZJH9F; uv_cookie_126326=1; UBGLAI63GV=ydzyn.1547129688; fikker-vSjQ-xL6G=03JePCRqBCD5DLHAISRLxwNwncVYbCOU; __jclm_cpv_r_61506_cpv_plan_ids=%7C1365%7C%7C2093%7C%7C2013%7C%7C2092%7C%7C1776%7C%7C1780%7C%7C1364%7C; fikker-Ajmv-gZS6=iKSTQXOxMHSpeHwxUMSvCbWX5OSp9hoF; Hm_lvt_474976084829d4090d0d97d377ac5b38=1547017567,1547129535,1547131172,1547133591; Hm_lvt_60852cb607c7b21f13202e5e672131ce=1547017605,1547129688,1547133594; Hm_lpvt_60852cb607c7b21f13202e5e672131ce=1547133594; Hm_lpvt_474976084829d4090d0d97d377ac5b38=1547133594; uqcpvcouplet_fidx=6'
,'Host':'nanrenvip.cc'
,'Pragma':'no-cache'
,'Referer':'http://nanrenvip.cc/olds.html'
,'Upgrade-Insecure-Requests':'1'
,'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.6814.400 QQBrowser/10.3.3005.400'
}
#作品集合報頭
gatherHeaders = {'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'
,'Accept-Language':'zh-CN,zh;q=0.9'
,'Cache-Control':'no-cache'
,'Connection':'keep-alive'
,'Cookie':'fikker-QnSp-g2oO=iKQJO0GhsfKh7h3VS0kRIhGLYXCZJH9F; uv_cookie_126326=1; UBGLAI63GV=ydzyn.1547129688; fikker-vSjQ-xL6G=03JePCRqBCD5DLHAISRLxwNwncVYbCOU; __jclm_cpv_r_61506_cpv_plan_ids=%7C1365%7C%7C2093%7C%7C2013%7C%7C2092%7C%7C1776%7C%7C1780%7C%7C1364%7C; fikker-Ajmv-gZS6=iKSTQXOxMHSpeHwxUMSvCbWX5OSp9hoF; Hm_lvt_474976084829d4090d0d97d377ac5b38=1547017567,1547129535,1547131172,1547133591; Hm_lvt_60852cb607c7b21f13202e5e672131ce=1547017605,1547129688,1547133594; Hm_lpvt_60852cb607c7b21f13202e5e672131ce=1547133594; Hm_lpvt_474976084829d4090d0d97d377ac5b38=1547133594; uqcpvcouplet_fidx=6'
,'Host':'nanrenvip.cc'
,'Pragma':'no-cache'
,'Referer':'http://nanrenvip.cc/olds.html'
,'Upgrade-Insecure-Requests':'1'
,'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.6814.400 QQBrowser/10.3.3005.400'
}
#封面報頭
coverHeaders = {'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'
,'Accept-Language':'zh-CN,zh;q=0.9'
,'Cache-Control':'no-cache'
,'Connection':'keep-alive'
,'Cookie':'fikker-QnSp-g2oO=iKQJO0GhsfKh7h3VS0kRIhGLYXCZJH9F; uv_cookie_126326=1; UBGLAI63GV=ydzyn.1547129688; fikker-vSjQ-xL6G=03JePCRqBCD5DLHAISRLxwNwncVYbCOU; __jclm_cpv_r_61506_cpv_plan_ids=%7C1365%7C%7C2093%7C%7C2013%7C%7C2092%7C%7C1776%7C%7C1780%7C%7C1364%7C; fikker-Ajmv-gZS6=iKSTQXOxMHSpeHwxUMSvCbWX5OSp9hoF; Hm_lvt_474976084829d4090d0d97d377ac5b38=1547017567,1547129535,1547131172,1547133591; Hm_lvt_60852cb607c7b21f13202e5e672131ce=1547017605,1547129688,1547133594; Hm_lpvt_60852cb607c7b21f13202e5e672131ce=1547133594; Hm_lpvt_474976084829d4090d0d97d377ac5b38=1547133594; uqcpvcouplet_fidx=6'
,'Host':'nanrenvip.cc'
,'Pragma':'no-cache'
,'Referer':'http://nanrenvip.cc/olds.html'
,'Upgrade-Insecure-Requests':'1'
,'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.6814.400 QQBrowser/10.3.3005.400'
}
def getAV():
'''[獲取AV]
[將獲取的女優名字,番號,封面url放入隊列]
'''
global AV
#讀取上一次下載到的頁碼
htmlPage = pageRecord("r")
#記錄女優數
AVnumber = 0
#臨時作品字典
tempGatherDict = {}
#第一層html對象
firstHtml = Html.Html(baseUrl + "/nvyouku/1-0-0-0-0-0-" + str(htmlPage) + ".html",firstHeaders,"GET")
#根據是否有a標籤判斷頁尾
while len(firstHtml.labelSelect(".avps_ny a")) != 0:
#遍歷女優
for a1 in firstHtml.labelSelect(".avps_ny a"):
#這裏的a標籤已經是bs4對象
AVname = a1.select("span.fh_bt")[0].get_text()
AVnumber += 1
print("-------------------------第%d名:%s-------------------------"%(AVnumber,AVname))
try:
#作品集合對象
gatherHtml = Html.Html(baseUrl + a1["href"],gatherHeaders,"GET")
except Exception as e:
print("作品集對象--創建失敗")
continue
#遍歷作品集
for index,a2 in enumerate(gatherHtml.labelSelect(".avps a")):
try:
#封面對象
coverHtml = Html.Html(baseUrl + a2["href"],coverHeaders,"GET")
AVid = coverHtml.labelSelect("h1.heading")[0].get_text()
except Exception as e:
print("封面對象--創建失敗")
continue
#臨時作品組裝{番號:url}
tempGatherDict[AVid] = baseUrl + coverHtml.labelSelect("img.lazyload")[0]["data-original"]
print("爬取作品:%s\n還剩:%d張"%(AVid,(len(gatherHtml.labelSelect(".avps a")) - index - 1)))
#添加女優名字、作品
AV.put({AVname:tempGatherDict})
#清空臨時字典
tempGatherDict = {}
#爬取下一頁的女優們
htmlPage += 1
firstHtml = Html.Html(baseUrl + "/nvyouku/1-0-0-0-0-0-" + str(htmlPage) + ".html",firstHeaders,"GET")
#爬取完畢
print("爬取完畢")
AV.put("end")
def pageRecord(mode,page = None):
'''[記錄頁碼]
[寫入和讀取TXT文件來保存頁碼]
Arguments:
mode {[模式]} -- [對文件的操作模式]
Keyword Arguments:
page {[頁碼]} -- [要寫入的頁碼] (default: {None})
Returns:
[整數] -- [返回頁碼]
'''
if mode == "w":
#寫入頁數
f = open(".\\Page.txt","w")
f.write(str(page))
f.close()
if mode == "r":
#讀取頁數
f = open(".\\Page.txt","r")
page = int(f.read())
f.close()
return page
def downloadImage(queue):
'''[下載圖片]
[下載隊列裏的圖片到指定路徑]
Arguments:
queue {[Queue]} -- [隊列]
'''
while True:
print("等待下載")
#獲取要下載的作品
gather = queue.get()
#判斷是否已經沒有作品
if gather == "end":
#結束線程
print("***全部下載完成***")
return
#遍歷女優
for AVname in gather:
# #判斷是否存在文件夾如果不存在則創建爲文件夾
if not os.path.exists(".\\Performer\\" + AVname):
os.makedirs(".\\Performer\\" + AVname)
#遍歷番號
for AVid in gather[AVname]:
#判斷圖片是否已經下載
if not os.path.exists(".\\Performer\\" + AVname + "\\" + AVid + gather[AVname][AVid][-4:]):
print("開始下載:%s"%gather[AVname][AVid])
Html.downloadImage(gather[AVname][AVid],".\\Performer\\" + AVname + "\\" + AVid + gather[AVname][AVid][-4:])
print("下載完成!:%s"%AVname)
def main():
'''[主函數]
[程序入口]
'''
print("程序已啓動(*^▽^*)")
#啓動下載線程
download = threading.Thread(target = downloadImage,args = ([AV]))
download.start()
#將AV添加到下載隊列
getAV()
if __name__ == '__main__':
#程序入口
main()
- 爬取完後的資源是這樣子的
- 注意
下載的圖片會在你運行main.py的相對位置下,所以建議在用控制檯先cd到自己創建的目錄下運行。
資源還是很多的,爬到十多頁的時候都已經有1G多了。
至於這些扒下來的番號怎用這裏不能講,請自行百度找使用方法。