組長要求把gerrit上所有的提交和comment數據都收集起來寫到excel裏面。現學python把這事給自動化了。之前連python語法都只知道一點點,大部分是現學,連中午飯都沒時間吃。寫完只有一個字:爽!於是晚上沙縣加了個雞腿。
大致思路
先說下,這個python腳本是根據BrainZou在github上的代碼改寫的https://github.com/BrainZou/PythonSpider/tree/master/Gerrit,非常感謝這位仁兄,不然我今天肯定下不了班了- -
用chrome打開gerrit,F12打開開發者工具點擊Network->XHR可以看到頁面的HTTP請求,沒有就刷新一下,如下圖:
然後用python的第三方庫requests來進行請求,得到結果後進行處理,處理完成後寫入csv文件,csv可以用任何類似excel的軟件打開。
運行環境:安裝python3,再安裝pip3,然後運行pip3 install requests來安裝第三方庫requests,最後使用python3 gerrit.py運行
通過requests可以爬取所有的前後端分離的網站,不分離的網站只能爬網頁源碼了。
完整代碼
代碼比較簡單,都加了註釋所以就直接上代碼了,注意加上自己公司gerrit網站的cookie。
#coding:utf-8
import requests
import json
import re
import csv
#gerrit的默認url是下面這個url,然後下一頁按鈕的url是這個url加上這一頁最後一項的_sortkey(形如0049b98c0000f3b8)
UNMERGE_URL = 'https://gerrit-sin.xxx.biz/changes/?n=25&O=1'
MERGED_URL = 'https://gerrit-sin.xxx.biz/changes/?q=status:merged&n=25&O=1'
#用來判斷是哪個項目組添加的comment
OUR_IDS = [9,10,11,12,13,16,22]
CUSTOMERS_IDS = [2,5]
# 請求訪問接口,獲取到json格式數據,稍加處理,寫入表格。
def requesst(url,limit_time,start,merged):
continue_flag = 1
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
'Cookie': 'GerritAccount=aHyuzmj3aQisN6CyELrhcU1JnxOEcJK; XSRF_TOKEN=aHyuS4.LW.5ZOwZizn7DMLUV7IfZ-Pi'
}
request_url = url + "&S=" + str(start)
data = requests.get(request_url, headers=headers, verify=True).text
# content = data.text #你要的數據,JSON格式的
#去掉多餘的幾個垃圾字符
remove = re.compile('\)\]\}\'')
# data = data.replace(')]}\'',"")
data = re.sub(remove, "", data)
data_json = json.loads(data)
if(len(data_json) <=0):
continue_flag = 0
#打開csv文件,準備寫入
with open("gerrit_commits.csv", "a",newline='') as csvfile:
writer = csv.writer(csvfile)
#遍歷所有commit數據
for one in data_json:
one["created"] = time_format(one["created"])
if (time_cmp(one["created"], limit_time) < 0):
print("已經發現有超過時間的數據")
continue_flag = 0
break
else:
project = one["project"];
owner = get_owner(one["owner"]["_account_id"])
#獲取未merge代碼的預估review時間
if(merged != True):
advice_url = "https://gerrit-sin.xxx.biz/changes/"+str(one["_number"])+"/revisions/1/reviewassistant~advice"
advice_data = requests.get(advice_url, headers=headers, verify=True).text
advice_data = re.sub(remove, "", advice_data)
estimated = advice_data[advice_data.index("\\u003cstrong\\u003e")+len("\\u003cstrong\\u003e"):advice_data.index("\\u003c/strong\\u003e reviewing")]
#獲取commit的comment信息
comment_url = "https://gerrit-sin.xxx.biz/changes/"+str(one["_number"])+"/comments"
comment_data = requests.get(comment_url, headers=headers, verify=True).text
comment_data = re.sub(remove, "", comment_data)
comment_dict = json.loads(comment_data)
ts_size, customer_size = get_comment_size(comment_dict)
print("comment size = " + str(ts_size) + ":" + str(customer_size))
#將數據寫入表格中
writer.writerow([
owner,
project[project.rfind("/"):],
one["subject"],
"https://gerrit-sin.xxx.biz/"+str(one["_number"]),
one["insertions"],
one["deletions"],
"NA" if merged==True else estimated,
customer_size + ts_size,
customer_size,
ts_size,
merged
])
#打印日誌
print(
owner,
project[project.rfind("/"):],
one["subject"],
"https://gerrit-sin.xxx.biz/"+str(one["_number"]),
one["insertions"],
one["deletions"],
"NA" if merged==True else estimated,
customer_size + ts_size,
customer_size,
ts_size,
merged
)
return continue_flag, start+len(data_json)
#通過id獲取owner姓名
def get_owner(account_id):
return {
1: 'memer1',
5: 'member5',
9: 'member9',
10: 'member10',
11: 'member11',
12: 'member12',
13: 'member13',
16: 'member16',
22: 'member22'
}.get(account_id, 'error')
#解析comment json獲取comment個數
def get_comment_size(comment_dict):
ts_size = 0
customer_size = 0
for k,v in comment_dict.items():
for comment in v:
_account_id = comment["author"]["_account_id"]
if(_account_id in TS_IDS):
ts_size += 1
if(_account_id in CUSTOMER_IDS):
customer_size += 1
return ts_size, customer_size
#對時間"2017-12-11 02:07:27.000000000"格式化爲20171211020727
def time_format(time):
time = time.replace('.000000000', "")
time = time.replace(' ', "")
time = time.replace(':', "")
time = time.replace('-', "")
return time
#兩個字符串轉int 相減計算大小
def time_cmp(first_time, second_time):
print(first_time)
print(second_time)
return int(first_time) - int(second_time)
def main():
limit_time = input("請輸入截止時間 格式:20171211000000\n")
url = UNMERGE_URL
continue_flag = 1
start = 0
#寫標題
with open("gerrit_commits.csv", "w",newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["Owner", "Module", "Title", "Gerrit link", "Insertions", "Deletions",
"Gerrit estimated review time", "Total comments", "Total Customer comments",
"Total TS comments", "Merged"])
# 循環爬下一頁
while (continue_flag):
continue_flag, start = requesst(url,limit_time,start,False)
url = MERGED_URL
continue_flag = 1
start = 0
while (continue_flag):
continue_flag, start = requesst(url,limit_time,start,True)
if __name__ == '__main__':
main()