https://www.doutula.com/photo/list/?page=1
import requests,os,re,threading,time
from lxml import etree
from urllib import request
from queue import Queue
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36'
}
class procuder(threading.Thread):
def __init__(self,page_queue,img_queue,*args,**kwargs):
super(procuder,self).__init__(*args,**kwargs)
self.page_queue = page_queue
self.img_queue = img_queue
def run(self):
while True:
if self.page_queue.empty():
break
url = self.page_queue.get()
self.parse_spider(url)
def parse_spider(self,url):
response = requests.get(url=url,headers=header).text
html = etree.HTML(response)
imgs = html.xpath("//div[@class='page-content text-center']//a[@class='col-xs-6 col-sm-3']//img[@class!='gif']")
for img in imgs:
img_url = img.get('data-original')
alt = img.get('alt')
alt = re.sub(r'\???\.,。?!\*','',alt)
suffix = os.path.splitext(img_url)[1]
filename = alt+suffix
self.img_queue.put((img_url,filename))
class Consumer(threading.Thread):
def __init__(self,page_queue,img_queue,*args,**kwargs):
super(Consumer,self).__init__(*args,**kwargs)
self.page_queue = page_queue
self.img_queue = img_queue
def run(self):
while True:
if self.img_queue.empty() and self.page_queue.empty():
break
img_url,filename = self.img_queue.get()
request.urlretrieve(img_url,'images/'+filename)
print(filename+' 下載完成! ')
def main():
page_queue = Queue(1000)
img_queue = Queue(500000)
for i in range(1,200):
url = 'https://www.doutula.com/photo/list/?page=%d' %i
page_queue.put(url)
print(i)
for x in range(100):
t = procuder(page_queue,img_queue)
t.start()
print('='*20)
time.sleep(10)#這裏等待10秒,儘可能的往page_queue裏面添加數據,以免數據過少,導致下面的線程浪費(上面break之後線程就會結束)
for x in range(100):
t = Consumer(page_queue,img_queue)
t.start()
if __name__ == '__main__':
main()
import requests
def request_list_page():
url = "https://www.lagou.com/jobs/positionAjax.json?city=%E4%B8%8A%E6%B5%B7&needAddtionalResult=false"
url_1 = "https://www.lagou.com/jobs/list_python?labelWords=&fromSearch=true&suginput="
headers = {
"Accept": "application/json, text/javascript, */*; q=0.01",
"Host": "www.lagou.com",
"Referer": "https://www.lagou.com/jobs/list_%E6%95%B0%E6%8D%AE%E5%88%86%E6%9E%90?labelWords=&fromSearch=true&suginput=",
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'
}
data = {
"first": "true",
"pn": "1",
"kd": "python"
}
session = requests.Session() # 創建cookie存儲
session.get(url=url_1, headers=headers) # 通過網址url1建立cookie
response = session.post(url=url, headers=headers, data=data)
response.encoding = 'utf-8'
print(response.json())
if __name__ == '__main__':
request_list_page()