python 爬取noaa數據

目標爬取該路徑下的所有tif數據

https://data.ngdc.noaa.gov/instruments/remote-sensing/passive/spectrometers-radiometers/imaging/viirs/mosaics/ 

實現思路:

構造所有的文件夾路徑

遍歷a標籤拿到所有的下載鏈接,判斷是tif結尾的就下載

由於影像文件相對較大,增加了進度的顯示

#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib
from bs4 import BeautifulSoup
import urllib.request
start=20200210

rawurl='https://data.ngdc.noaa.gov/instruments/remote-sensing/passive/spectrometers-radiometers/imaging/viirs/mosaics/'

def downloaddata(baseurl,filename):
    file_name = r"D:\nppdata\\s"+filename
    url=baseurl+filename
    print(url)
    urllib.request.urlretrieve(url, file_name)
def callback(a1, a2, a3):
    """
    顯示下載文件的進度
    :param @a1:目前爲此傳遞的數據塊數量
    :param @a2:每個數據塊的大小,單位是byte,字節
    :param @a3:遠程文件的大小
    :return: None
    """
    download_pg = 100.0 * a1 * a2 / a3
    if download_pg > 100:
        download_pg = 100

    print("當前下載進度爲: %.2f%%" % download_pg, )

def download(url, filename, callback, header):
    """
    封裝了 urlretrieve()的自定義函數,遞歸調用urlretrieve(),當下載失敗時,重新下載
    download file from internet
    :param url: path to download from
    :param savepath: path to save files
    :return: None
    """

    try:
        urllib.request.urlretrieve(url, filename, callback, header)
    # except urllib.ContentTooShortError:
    #     print('Network conditions is not good.Reloading.')
    #     download(url, filename, callback, header)
    except Exception as e:
        print(e)
        print('Network conditions is not good.\nReloading.....')
        download(url, filename, callback, header)
headers = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.80 Safari/537.36'
header = headers.encode() # 不進行類型轉換,傳入urlretrieve()後會報錯,需轉換成bytes類型

for i in range(20):
    urls=[]
    print (rawurl+str(start)+"/")
    url=rawurl+str(start)+"/"
    content = urllib.request.urlopen(url).read().decode('ascii')  #獲取頁面的HTML
    soup = BeautifulSoup(content, 'lxml')
    list_urls=soup.find_all("a") #定位到a標籤,其中存放着文件的url
    for i in list_urls[1:]:
        lianjie=i.get('href')
        if(lianjie[0:5]=='SVDNB' and lianjie.endswith(".tif")):
            print(lianjie,"下載中")
            url=url+lianjie
            file_name = r"D:\nppdata\\s" + lianjie
            download(url, file_name, callback, header)

    start += 1

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章