Python網絡爬蟲(requests, 代理,Web認證, SSL證書認證)

requests模塊
    代理(proxies)
        西刺代理
        快代理
        全網代理
        高匿ip:看不到真實ip
        透明ip:可以看到代理 和 真實ip
    普通代理
        proxies = {"協議":"協議://IP地址:端口號"}

'''01_普通代理示例.py'''
import requests

url = "http://www.baidu.com/"
proxies = {"http":"http://183.129.207.82:11597"}
headers = {"User-Agent":"Mozilla/5.0"}

res = requests.get(url,proxies=proxies,headers=headers)
print(res.status_code)

    私密代理
        proxies = {"協議": "協議://用戶名:密碼@ip地址:端口號"}

'''02_私密代理示例.py'''
import requests

url = "http://httpbin.org/get"
headers = {"User-Agent":"Mozilla/5.0"}
proxies = {"http":"http://309435365:[email protected]:16817"}

res = requests.get(url,proxies=proxies,headers=headers)
res.encoding = "utf-8"
print(res.text)

爬取鏈家二手房信息 --> 存到MySQL數據庫中

'''05_鏈家數據ToMongo.py'''
import requests
import re
import pymysql
import warnings

class LianjiaSpider:
    def __init__(self):
        self.baseurl = "https://bj.lianjia.com/ershoufang/pg"
        self.page = 1
        self.headers = {"User-Agent": "Mozilla/5.0"}
        self.proxies = {"http": "http://127.0.0.1:8888"}
        self.db = pymysql.connect("localhost",
                  "root","ParisPython",charset="utf8")
        self.cursor = self.db.cursor()

    def getPage(self,url):
        res = requests.get(url,proxies=self.proxies,headers=self.headers,timeout=5)
        res.encoding = "utf-8"
        html = res.text
        print("頁面爬取成功,正在解析...")
        self.parsePage(html)

    def parsePage(self,html):
        p = re.compile('<div class="houseInfo".*?data-el="region">(.*?)</a>.*?<div class="totalPrice">.*?<span>(.*?)</span>(.*?)</div>',re.S)
        r_list = p.findall(html)
        # [("天通苑","480","萬"),()..]
        print("頁面解析完成,正在存入數據庫...")
        self.writeTomysql(r_list)

    def writeTomysql(self,r_list):
        c_db = "create database if not exists Lianjiadb \
                character set utf8"
        u_db = "use Lianjiadb"
        c_tab = "create table if not exists housePrice( \
                 id int primary key auto_increment,\
                 housename varchar(50), \
                 totalprice int)charset=utf8"
        
        warnings.filterwarnings("ignore")
        try:
            self.cursor.execute(c_db)
            self.cursor.execute(u_db)
            self.cursor.execute(c_tab)
        except Warning:
            pass

        ins = "insert into housePrice(housename,totalprice) \
               values(%s,%s)"
        for r_tuple in r_list:
            name = r_tuple[0].strip()
            price = float(r_tuple[1].strip())*10000
            L = [name,price]
            self.cursor.execute(ins,L)
            self.db.commit()
        print("存入數據庫成功")


    def workOn(self):
        while True:
            c = input("爬取按y(q退出):")
            if c.strip().lower() == "y":
                url = self.baseurl + str(self.page) + "/"
                self.getPage(url)
                self.page += 1
            else:
                self.cursor.close()
                self.db.close()
                print("爬取結束,謝謝使用!")
                break



if __name__ == "__main__":
    spider = LianjiaSpider()
    spider.workOn()

    找URL
      https://bj.lianjia.com/ershoufang/pg1/
    正則
<div class="houseInfo".*?data-el="region">(.*?)</a>.*?<div="totalPrice">.*?<span>(.*?)</span>(.*?)</div>
  Web客戶端驗證(參數名:auth)
    auth=("用戶名","密碼")
    案例 :09_Web客戶端驗證.py

'''09_Web客戶端驗證.py'''
import requests
import re

class NoteSpider:
    def __init__(self):
        self.headers = {"User-Agent":"Mozilla/5.0"}
        self.url = "網址"
        self.proxies = {"http":"http://309435365:[email protected]:16817"}
        # auth參數存儲用戶名和密碼(必須爲元組)
        self.auth = ("賬號","密碼")
    
    def getParsePage(self):
        res = requests.get(self.url,
                           proxies=self.proxies,
                           headers=self.headers,
                           auth=self.auth,
                           timeout=3)
        res.encoding = "utf-8"
        html = res.text
        print(html)
        p = re.compile('<a href=".*?>(.*?)</a>',re.S)
        r_list = p.findall(html)
        print(r_list)
        self.writePage(r_list)
        
    def writePage(self,r_list):
        print("開始寫入文件...")
        with open("達內科技.txt","a") as f:
            for r_str in r_list:
                f.write(r_str + "\n\n")
        print("寫入成功")
    
if __name__ == "__main__":
    spider = NoteSpider()
    spider.getParsePage()

  SSL證書認證(參數名:verify)
    verify = True : 默認,進行SSL證書認證
    verify = False: 不做認證

'''10_SSL證書認證示例.py'''
import requests

url = "https://www.12306.cn/mormhweb/"
headers = {"User-Agent":"Mozilla/5.0"}

res = requests.get(url,headers=headers,verify=False)
res.encoding = "utf-8"
print(res.text)

urllib.request中Handler處理器
  定義
    自定義的urlopen()方法,urlopen()方法是一個特殊的opener(模塊已定義好),
    不支持代理等功能,通過Handler處理器對象來自定義opener對象
  常用方法
    build_opener(Handler處理器對象) :創建opener對象
    opener.open(url,參數)

# 創建Handler處理器對象
http_handler = urllib.request.HTTPHandler()
#proxy_handler = urllib.request.ProxyHandler()
# 創建自定義的opener對象
opener = urllib.request.build_opener(http_handler)
# 利用opener對象的open()方法發請求
req = urllib.request.Request(url)
res = opener.open(req)
print(res.read().decode("utf-8"))

  Handler處理器分類
    HTTPHandler() :沒有任何特殊功能
    ProxyHandler(普通代理)
      代理: {"協議":"IP地址:端口號"}
    ProxyBasicAuthHandler(密碼管理器對象) :私密代理
    HTTPBasicAuthHandler(密碼管理器對象) : web客戶端認證
    密碼管理器對象作用
      私密代理
      Web客戶端認證
      程序實現流程
        創建密碼管理器對象
  pwdmg = urllib.request.HTTPPasswordMgrWithDefaultRealm()
把認證信息添加到密碼管理器對象
  pwdmg.add_password(None,webserver,user,passwd)
創建Handler處理器對象
  私密代理
    proxy = urllib.request.ProxyAuthBasicHandler(pwdmg)
  Web客戶端
    webbasic = urllib.request.HTTPBasicAuthHandler(pwdmg)

# 創建Handler處理器對象
pro_hand = urllib.request.ProxyHandler(proxy)
# 創建自定義opener對象
opener = urllib.request.build_opener(pro_hand)
# opener對象open方法發請求
req = urllib.request.Request(url)
res = opener.open(req)
print(res.read().decode("utf-8"))

爬取貓眼電影排行榜存入MongoDB數據庫

'''06_貓眼電影top100抓取.py'''
import requests
import re
import pymongo

class MaoyanSpider:
    def __init__(self):
        self.baseurl = "http://maoyan.com/board/4?offset="
        self.headers = {"User-Agent":"Mozilla/5.0"}
        self.page = 1
        self.offset = 0
        self.proxies = {"http":"http://309435365:[email protected]:16817"}
        self.conn = pymongo.MongoClient("localhost",27017)
        self.db = self.conn.Film
        self.myset = self.db.top100

        
    # 下載頁面
    def loadPage(self,url):
        res = requests.get(url,headers=self.headers)
        res.encoding = "utf-8"
        html = res.text
        self.parsePage(html)
        
    # 解析頁面
    def parsePage(self,html):
        p = re.compile('<div class="movie-item-info">.*?title="(.*?)".*?<p class="star">(.*?)</p>.*?releasetime">(.*?)</p>',re.S)
        r_list = p.findall(html)
#        print(r_list)
        # [("霸王別姬","張國榮","1994-01-01"),(),()...]
        self.writeTomysql(r_list)
    
    def writeTomysql(self,r_list):
        for r_tuple in r_list:
            name = r_tuple[0].strip()
            star = r_tuple[1].strip()
            releasetime = r_tuple[2].strip()
            D = {"name":name,
                 "star":star,
                 "releasetime":releasetime}
            self.myset.insert(D)
        print("存入數據庫成功")

    
    def workOn(self):
        while True:
            c = input("爬取請按y(y/n):")
            if c.strip().lower() == "y":
                self.offset = (self.page-1)*10
                url = self.baseurl + str(self.offset)
                self.loadPage(url)
                self.page += 1
            else:
                print("爬取結束,謝謝使用!")
                break

                
if __name__ == "__main__":
    spider = MaoyanSpider()
    spider.workOn()     
        


發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章