【Python】Crawler簡單實習

'''Implement a crawler program, grab the novel, save the novel content to a file'''
import requests
import bs4  
import re
import time
# ButifulSoul4的縮寫

# 1. 先構造一個 HTTP請求,把這個請求發送出去獲取到響應
# 既能打開內容頁,也能打開菜單頁
def open_page(url):
    #headers={
    #    'User-Agent':
    #}
    response = requests.get(url)#,headers=headers)
    # 手動將程序的解析的編碼方式設定爲 Gbk
    response.ecncoding = 'gbk'
    if response.status_code != 200:
        print(f"requests get {url}")
        return
    return response.text
def test1():
    print(open_page('http://book.zongheng.com/chapter/841970/56888216.html'))


# 2. 根據內容進行解析
# 解析出每個章節的URL (a 標籤中的解析)

def parse_main_page(html):
    # a) 創建一個 soup 對象
    soup = bs4.BeautifulSoup(html,"html.parser")
    #找到所有 href 屬性由6個連續的數字構成的url
    charts = soup.find_all(href=re.compile(r'\d{6}.html'))
    # c) 根據上一步的結果生成所有章節的 url 的列表
    url_list = ['http://book.zongheng.com/chapter/841970/' + item['href'] for item in charts]
    return url_list

def test2():
    html = open_page('http://book.zongheng.com/chapter/841970/56888216.html')
    print(parse_main_page(html))


# 要解析的詳情頁
def parse_detail_page(html):
    '''解析出當前章節的標題和正文'''
    soup = bs4.BeautifulSoup(html,'html.parser')
    soup.find_all(class_='bookname')[0].h1.get.text()
    content = soup.find(id_='content')[0].get.text()
    return title,content

def test4():
    html = open_page('http://book.zongheng.com/chapter/841970/56888216.html')
    title,content = parse_detail_page(html)
    print("title: ",title)
    print("content: ",content)

def write_file(title,content):
    with open("tmp.txt",'a',encoding='gbk',errors='ignore') as f:
        f.write(title + '\n' + content + '\n\n\n\n')

def run():
    url = "http://book.zongheng.com/chapter/841970/"
    # 打開入口頁面,並分析其中的所有詳情頁的 url
    html = open_page(url)
    url_list = parse_main_page(html)
    # 2.遍歷詳情頁的 url,依次分析每個詳細內容頁
    for url in url_list:
        print("crawler url: ",url)
        detail_html = open_page(url)
        title,content = parse_detail_page(detail_html)
        write_file(title,content)
        time.sleep(1)

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章