from urllib.parse import urlparse;
from bs4 import BeautifulSoup;
import requests;
import os;
r = requests.get('http://www.xiachufang.com') // 重點
soup = BeautifulSoup(r.text, features="lxml") // 重點
img_list = []
for img in soup.select('img'):
if img.has_attr('data-src'):
print(img.attrs['data-src'])
img_list.append(img.attrs['data-src'])
else:
print(img.attrs['src'])
img_list.append(img.attrs['src'])
# 初始化下載文件目錄
image_dir = os.path.join(os.curdir, 'images')
# if not os.path.isdir(image_dir):
# os.mkdir(image_dir)
for img in img_list:
o = urlparse(img)
filename = o.path[1:].split('@')[0]
filepath = os.path.join(image_dir, filename)
if not os.path.isdir(os.path.dirname(filepath)):
os.mkdir(os.path.dirname(filepath))
url = '%s://%s/%s' % (o.scheme, o.netloc, filename)
resp = requests.get(img)
print(url)
resp = requests.get(img)
with open(filepath, 'wb') as f:
for chunk in resp.iter_content(1024):
f.write(chunk)