Python爬取鏈家房產信息

Python爬取鏈家房產信息

需要的庫

  • requests
  • BeautifulSoup
  • re
  • pandas

程序如下:

import requests
from bs4 import BeautifulSoup
import pandas as pd
import re

def removenone(mylist):
#移除空信息
    while '' in mylist:
        mylist.remove('')
    return mylist

def addnone(mylist,length,cha):
#未填入的信息補全
    while len(mylist) < length:
        mylist.append(cha)
    return mylist

def regnum(s):
#提取字符串中的數值
    mylist = re.findall(r'[\d+\.\d]*', s)
    mylist = removenone(mylist)
    return mylist

def lianjia(url,page_range,district):
    #Initialization
    colum_name = ['Title','Position','Tag','followInfo','VR','Info','Total_price','RMB/m^2','Attention','Update day','Bed room','Living room','Area','Floors','Year','WebPage']    #表頭
    data_list = [] #空列表
    for page in range(page_range):
        pgurl = url+'/pg'+str(page+1)
        print (pgurl)
        header = {
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36'}
        page = requests.get(pgurl, headers=header)  #訪問網址 獲取該 html內容
        a = page.text
        soup = BeautifulSoup(a,"lxml")  #解析該網頁內容
        for b in soup.find_all('div',class_='info clear'): #find_all 找到 div  class='info clear' 的標籤
            temp = []
            for wz in b.find_all('div',class_ = ['title','positionInfo','tag','houseInfo','priceInfo','followInfo']):
                temp.append(wz.get_text())
            # price = temp[3].split('萬')
            # price = temp[3].split('元')
            tag = regnum(temp[2])
            date = regnum(temp[3])
            date = addnone(date,2,'>365')
            price = regnum(temp[5])
            temp.extend(price)
            temp.extend(date)
            temp.extend(tag)
            # print (temp)
            temp = addnone(temp,15,'0') 
            for title in b.find_all('div',class_ = 'title'):
                for link in title.find_all('a'):  
                    temp.append(link.get('href'))        
            data_list.append(temp)	    
    data = pd.DataFrame(data_list,columns=colum_name)
    data.to_csv(district+'.csv')    #保存信息到文件
    return data

if __name__ == '__main__':
    #User defined
    district_list = ['anting'] #輸入想獲取的區域,如果區域不存在將獲得上一級網頁的房產信息
    for district in district_list:
        url = "https://sh.lianjia.com/ershoufang/"+district
        # url = "https://xinxiang.lianjia.com/ershoufang/"+district
        page_range=30          #輸入獲得翻頁數量
        my = lianjia(url,page_range,district)
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章