scrapy錯誤記錄:Missing scheme in request url: h

寫scrapy爬蟲框架時,運行出現錯誤:Missing scheme in request url: h

spider.py代碼如下:

注意查看start_urls,裏面存放爬蟲框架開始時的鏈接,該鏈接必須以列表形式存放

不能像我一樣以字符串形式存放

# -*- coding: utf-8 -*-
import scrapy
from scrapy import Selector
from miao.items import MiaoItem
class MmiaoSpider(scrapy.Spider):
    name = 'mmiao'
    offset = 0
    allowed_domains = ["tencent.com"]
    url = 'http://hr.tencent.com/position.php?&start='
    start_urls = ('http://hr.tencent.com/position.php?&start=' + str(offset))
    addurl = 'https://hr.tencent.com/'
    def parse(self, response):
        for each in response.xpath("//tr[@class='even']|//tr[@class='odd']"):
            item = MiaoItem()
            item['positionname'] = each.xpath('./td[1]/a/text()').extract()[0]
            item['positionlink'] = self.addurl+each.xpath('./td[1]/a/@href').extract()[0]
            try:
                item['positiontype'] = each.xpath('./td[2]/text()').extract()[0]
            except:
                pass
            item['peoplenum']  = each.xpath('./td[3]/text()').extract()[0]
            item['worklocation'] = each.xpath("./td[4]/text()").extract()[0]
            # 發佈時間
            item['publishtime'] = each.xpath("./td[5]/text()").extract()[0]
            yield item
        if self.offset<1680:
            self.offset+=10
        yield  scrapy.Request(self.url+str(self.offset),callback=self.parse)

修改代碼如下

# -*- coding: utf-8 -*-
import scrapy
from scrapy import Selector
from miao.items import MiaoItem
class MmiaoSpider(scrapy.Spider):
    name = 'mmiao'
    offset = 0
    allowed_domains = ["tencent.com"]
    url = 'http://hr.tencent.com/position.php?&start='
    start_urls = ['http://hr.tencent.com/position.php?&start=' + str(offset)]
    addurl = 'https://hr.tencent.com/'
    def parse(self, response):
        for each in response.xpath("//tr[@class='even']|//tr[@class='odd']"):
            item = MiaoItem()
            item['positionname'] = each.xpath('./td[1]/a/text()').extract()[0]
            item['positionlink'] = self.addurl+each.xpath('./td[1]/a/@href').extract()[0]
            try:
                item['positiontype'] = each.xpath('./td[2]/text()').extract()[0]
            except:
                pass
            item['peoplenum']  = each.xpath('./td[3]/text()').extract()[0]
            item['worklocation'] = each.xpath("./td[4]/text()").extract()[0]
            # 發佈時間
            item['publishtime'] = each.xpath("./td[5]/text()").extract()[0]
            yield item
        if self.offset<1680:
            self.offset+=10
        yield  scrapy.Request(self.url+str(self.offset),callback=self.parse)

大功告成 OK

本次博客記錄到此結束

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章