Python爬蟲一則

就是個python爬蟲

就像爬個圖看看
源網站鏈接:http://www.setuw.com
使用python編寫,使用了threadpool 等庫,自行下載。
環境:python 3 , win10 , 樹莓派環境下測試通過

網站元素結構

在這裏插入圖片描述
在這裏插入圖片描述

代碼

# -*- coding: utf-8 -*
from concurrent.futures import ThreadPoolExecutor
import urllib.request
import _thread
import json
import threadpool  
from time import sleep
from bs4 import BeautifulSoup 
import os
import random


maxThreadCount = 8

available_thread = 8

baseDomain="http://www.setuw.com"
intrance = "http://www.setuw.com/tag/rosi/"

#網站分類對應的目錄
tags = [ "/tag/rosi/", "/tag/tuigirl/" , "/tag/ugirls/" ,
"/tag/xiuren/" , "/tag/disi/" , "/tag/dongman/" , "/tag/xinggan/" , 
"/tag/qingchun/" , "/tag/youhuo/" , "/tag/mote/" , "/tag/chemo/" ,
"/tag/tiyu/" , "/tag/zuqiubaobei/" , "/meinv/liuyan/"
]

types = ["ROSI" , "推女郎" , " 尤果" , "  秀人 " , 
" DISI " , "動漫 " , "性感 " , "清純 " , " 誘惑 " , " 模特 " , " 車模" , "體育" , " 足球" , " 柳巖" ]

typeSize = len(types)
path =  ""
header = {
    "User-Agent":'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36',
 
    'Accept': '*/*',
    'Accept-Language': 'en-US,en;q=0.8',
    'Cache-Control': 'max-age=0',
    'Connection': 'keep-alive'
}



def Download(argv1):
    url = argv1.split("#")[0]
    title = argv1.split("#")[1]
    name = argv1.split("#")[2]
    #print("URl is " , url, " , title is " , title , " , name is "  , name)
    print("Download processing:" , argv1.split("#")[3])
    apath = path+"/" + title  + "/"
    #print(apath)
    if not os.path.exists(apath): #判斷系統是否存在該路徑,不存在則創建
        os.makedirs(apath)
    urllib.request.urlretrieve( url, '{0}{1}.jpg'.format(apath, name)) # ,下載圖片保存在本地
    return

    

def run(targetUrl,title):
    global available_thread
    print("downloading " + title)
    req = urllib.request.Request(url=targetUrl,headers=header)
    response = urllib.request.urlopen(req)#1111111這裏的req可看成一種更爲高級的URL
    html = response.read().decode('utf-8','ignore')
    soup = BeautifulSoup(html, 'html.parser')


    imgs = soup.find_all('img') 
    size = len(imgs)
    resules = 1
    with ThreadPoolExecutor(maxThreadCount) as pool:
        for i in range(2,size-16):
            #已經證實過,頁面中抓取的img,2到size-6爲圖集圖片
            data =  imgs[i]["datas"]
            all = data.split("'")
            '''
            參數列表:下載鏈接,圖集名,圖片名,圖集下載進度
            '''
            argv ={ all[len(all) - 2] + "#" + title  + "#" + all[len(all) - 2].split(".")[1]+str(i) + "#" + str(i-1) + "/" + str(size-18) }
            results = pool.map(Download,(argv)) #使用map添加線程進線程池
    print(title  , " download successfull;")
    return



if __name__ == '__main__':
    '''自定義下載路徑。若輸入.,則下載至當前目錄,跳過則下載到/home/hdd/picDl/(這是我)
    自己的硬盤掛載點。。。可自定義)'''
    input1 = input("input a folder(. as ./ , none as /home/hdd/picDl/):")
    if input1==".":
        path = "./"
    elif input1=="":
        path = "/home/hdd/picDl/"
    else:
        path = input1
    print("Path seted to " + path)
    #選擇一個下載類別。在網站最上方有,我是手動找出來的,有時效性
    for i in range(0,len(types)-1):
        print("| " + str(i)+ " | " + types[i] + " | ")
    print("select a type to download , ")
    index = input(" or input nothing to download index page:")
    if index == "":
        intrance = intrance
    else:
        index1 = int(index)
        if index1 < len(types)-1 and index1 > 0 :
            intrance = baseDomain + tags[index1]
        else:
            print("something wrong , setting download tartget as default")
            intrance = intrance
    print( intrance +  " is going to download.")
    '''
    自定義下載線程數。注意,函數中每個線程用於下載一張圖片,
    所以只能說是多個圖片並行下載。
    '''
    maxThreadCount_ = input("input a number if you want to modify default thread number:")
    if maxThreadCount_ == "" :
        print("using default number:" , maxThreadCount)
    else :
        print("Modified number to:" , maxThreadCount_)
        maxThreadCount = int(maxThreadCount_)
    req = urllib.request.Request(url=intrance,headers=header)
    response = urllib.request.urlopen(req)
    html = response.read().decode('utf-8','ignore')
    #解碼 得到這個網頁的html源代碼
    soup = BeautifulSoup(html, 'html.parser')
    Divs = soup.find_all('a',attrs={'class':'a1' }) 
    for div in Divs:
        if div["href"] is None:
            print("沒有圖集了")
#            return
        elif div['href'] is None or div['href']=="": #有鏈接,但是是 空鏈接
            print("圖集沒有連接")
#            return
        else:
            targetUrl= baseDomain + div['href']
            title=div["title"]
            print("正在下載套圖:" + title)
            run(targetUrl,title)

缺點(改進方向):

  1. 目前只下載分類第一頁的圖集
  2. 不能保存下載進度。
  3. 沒了吧。。歡迎批評指正
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章