yolov3 使用自帶的python接口darknet.py 處理單張圖片和視頻

目標

使用 darknet (https://github.com/pjreddie/darknet) 自帶的 python 接口處理圖片和視頻。

project 下載

git clone https://github.com/pjreddie/darknet
cd darknet
#改一些配置 ,具體操作見 我的上一份博客的結尾部分
#(https://blog.csdn.net/qq_20241587/article/details/111176541)
make

處理單張圖片

即:指定一張圖片的路徑,指定檢測結果新圖片的存放位置,進行 model檢測+畫框+另存爲新圖片.

下圖中,左邊是處理前的樣子,右邊是處理後的樣子,兩張圖位置在代碼中指定

代碼如下。

核心改動處 :

1 # lib = CDLL("libdarknet.so", RTLD_GLOBAL)   , 改成自己的項目的具體地址

2  使用 cv2.rectangle  畫框框, 使用cv2.putText 放文字,爲了避免框框和文字交叉,我加了一丟丟的偏移量。

其中  yolov3的輸出是  :

            label_i = box_i[0]    #標籤
            prob_i = box_i[1]    #標籤置信度
            x_ = box_i[2][0]
            y_ = box_i[2][1]
            w_ = box_i[2][2]
            h_ = box_i[2][3]     # bbox信息(x,y,w,h)爲物體的中心位置相對格子位置的偏移及寬度和高度,




          cv2.rectangle(image, (int(x_ - w_ / 2), int(y_ - h_ / 2)),
                          (int(x_ + w_ / 2), int(y_ + h_ / 2)),
                          color, line_type)
                   cv2.putText(image, text_, (int(x_ - w_ / 2 - 5), int(y_ - h_ / 2 - 5)), cv2.FONT_HERSHEY_DUPLEX, 0.7, color,
                        2)



 

from ctypes import *
import math
import random
import cv2
import os


def sample(probs):
    s = sum(probs)
    probs = [a / s for a in probs]
    r = random.uniform(0, 1)
    for i in range(len(probs)):
        r = r - probs[i]
        if r <= 0:
            return i
    return len(probs) - 1


def c_array(ctype, values):
    arr = (ctype * len(values))()
    arr[:] = values
    return arr


class BOX(Structure):
    _fields_ = [("x", c_float),
                ("y", c_float),
                ("w", c_float),
                ("h", c_float)]


class DETECTION(Structure):
    _fields_ = [("bbox", BOX),
                ("classes", c_int),
                ("prob", POINTER(c_float)),
                ("mask", POINTER(c_float)),
                ("objectness", c_float),
                ("sort_class", c_int)]


class IMAGE(Structure):
    _fields_ = [("w", c_int),
                ("h", c_int),
                ("c", c_int),
                ("data", POINTER(c_float))]


class METADATA(Structure):
    _fields_ = [("classes", c_int),
                ("names", POINTER(c_char_p))]


lib = CDLL("/home/jiantang/桌面/enn/workcode/yoloV3/github/darknet/libdarknet.so", RTLD_GLOBAL)
# lib = CDLL("libdarknet.so", RTLD_GLOBAL)
lib.network_width.argtypes = [c_void_p]
lib.network_width.restype = c_int
lib.network_height.argtypes = [c_void_p]
lib.network_height.restype = c_int

predict = lib.network_predict
predict.argtypes = [c_void_p, POINTER(c_float)]
predict.restype = POINTER(c_float)

set_gpu = lib.cuda_set_device
set_gpu.argtypes = [c_int]

make_image = lib.make_image
make_image.argtypes = [c_int, c_int, c_int]
make_image.restype = IMAGE

get_network_boxes = lib.get_network_boxes
get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(c_int), c_int, POINTER(c_int)]
get_network_boxes.restype = POINTER(DETECTION)

make_network_boxes = lib.make_network_boxes
make_network_boxes.argtypes = [c_void_p]
make_network_boxes.restype = POINTER(DETECTION)

free_detections = lib.free_detections
free_detections.argtypes = [POINTER(DETECTION), c_int]

free_ptrs = lib.free_ptrs
free_ptrs.argtypes = [POINTER(c_void_p), c_int]

network_predict = lib.network_predict
network_predict.argtypes = [c_void_p, POINTER(c_float)]

reset_rnn = lib.reset_rnn
reset_rnn.argtypes = [c_void_p]

load_net = lib.load_network
load_net.argtypes = [c_char_p, c_char_p, c_int]
load_net.restype = c_void_p

do_nms_obj = lib.do_nms_obj
do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]

do_nms_sort = lib.do_nms_sort
do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]

free_image = lib.free_image
free_image.argtypes = [IMAGE]

letterbox_image = lib.letterbox_image
letterbox_image.argtypes = [IMAGE, c_int, c_int]
letterbox_image.restype = IMAGE

load_meta = lib.get_metadata
lib.get_metadata.argtypes = [c_char_p]
lib.get_metadata.restype = METADATA

load_image = lib.load_image_color
load_image.argtypes = [c_char_p, c_int, c_int]
load_image.restype = IMAGE

rgbgr_image = lib.rgbgr_image
rgbgr_image.argtypes = [IMAGE]

predict_image = lib.network_predict_image
predict_image.argtypes = [c_void_p, IMAGE]
predict_image.restype = POINTER(c_float)


# net_d = load_net(b"../cfg/yolov3.cfg", b"../yolov3.weights", 0)
# meta_d = load_meta(b"../cfg/coco.data")


def classify(net, meta, im):
    out = predict_image(net, im)
    res = []
    for i in range(meta.classes):
        res.append((meta.names[i], out[i]))
    res = sorted(res, key=lambda x: -x[1])
    return res


def detect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45):
    im = load_image(image, 0, 0)
    num = c_int(0)
    pnum = pointer(num)
    predict_image(net, im)
    dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum)
    num = pnum[0]
    if (nms): do_nms_obj(dets, num, meta.classes, nms);

    res = []
    for j in range(num):
        for i in range(meta.classes):
            if dets[j].prob[i] > 0:
                b = dets[j].bbox
                res.append((meta.names[i], dets[j].prob[i], (b.x, b.y, b.w, b.h)))
    res = sorted(res, key=lambda x: -x[1])
    free_image(im)
    free_detections(dets, num)
    return res



def detect_and_boxing(net, meta, b_path, raw_path, save_path,
                      color=(0.255, 255), line_type=1):
    image = cv2.imread(raw_path)
    r = detect(net, meta, b_path)
    if not len(r) > 0:
        print("nothing detected in this picture!")
    else:
        for i in range(len(r)):
            box_i = r[i]
            label_i = box_i[0]
            prob_i = box_i[1]
            x_ = box_i[2][0]
            y_ = box_i[2][1]
            w_ = box_i[2][2]
            h_ = box_i[2][3]
            text_ = str(label_i) + "," + str(round(prob_i, 3))

            cv2.rectangle(image, (int(x_ - w_ / 2), int(y_ - h_ / 2)),
                          (int(x_ + w_ / 2), int(y_ + h_ / 2)),
                          color, line_type)
            cv2.putText(image, text_, (int(x_ - w_ / 2 - 5), int(y_ - h_ / 2 - 5)), cv2.FONT_HERSHEY_DUPLEX, 0.7, color,
                        2)
            cv2.imwrite(save_path, image)
            print("boxing ", i, " found ", label_i, "with prob = ", prob_i, ", finished!")
            print("box position is :", box_i[2])


if __name__ == "__main__":
    # net = load_net("cfg/densenet201.cfg", "/home/pjreddie/trained/densenet201.weights", 0)
    # im = load_image("data/wolf.jpg", 0, 0)
    # meta = load_meta("cfg/imagenet1k.data")
    # r = classify(net, meta, im)
    # print(r)
    net = load_net(b"../cfg/yolov3.cfg", b"../yolov3.weights", 0)
    meta = load_meta(b"../cfg/coco.data")

    b_path = b"../data/hat_sougou2.jpg"
    raw_path = "../data/hat_sougou2.jpg"
    save_path = "/home/jiantang/z_test/hat_sougou2.jpg"
    detect_and_boxing(net, meta, b_path=b_path, raw_path=raw_path, save_path=save_path)

處理視頻

即:指定一視頻的路徑,指定檢測結果新新品和中間產生的臨時幀的存放位置,進行  幀獲取+  model檢測+畫框+另存爲新幀 + 拼成新video.

(這個視頻43秒,63M,.avi 格式。

   產生的框好的新視頻爲 41秒, 55幀每秒, 6.9G,.avi 格式,每幀400kb左右 )

效果如下,原視頻對比我就不放了,,因爲每秒55幀,所以流暢感沒啥問題。

代碼有一些潛在的問題: 

1  .avi格式產生的新視頻size 好大,63M 成了6.9G ,而且這個代碼僅支持.avi格式。

2 因爲是一個視頻,不知道原視頻幀率,所以新視頻指定幀率後,時長有一丟丟差異。

3 處理速度感人 (中間有很多幀s的磁盤讀寫操作,,嚴重拉垮了速度。 model detect 速度使用GPU還是很快的,真要用實時的,不用把幀和新幀存起來,直接走內存display)

代碼如下 (新建一個darknet_video.py文件): 

from ctypes import *
import math
import random
import cv2
import os


def sample(probs):
    s = sum(probs)
    probs = [a / s for a in probs]
    r = random.uniform(0, 1)
    for i in range(len(probs)):
        r = r - probs[i]
        if r <= 0:
            return i
    return len(probs) - 1


def c_array(ctype, values):
    arr = (ctype * len(values))()
    arr[:] = values
    return arr


class BOX(Structure):
    _fields_ = [("x", c_float),
                ("y", c_float),
                ("w", c_float),
                ("h", c_float)]


class DETECTION(Structure):
    _fields_ = [("bbox", BOX),
                ("classes", c_int),
                ("prob", POINTER(c_float)),
                ("mask", POINTER(c_float)),
                ("objectness", c_float),
                ("sort_class", c_int)]


class IMAGE(Structure):
    _fields_ = [("w", c_int),
                ("h", c_int),
                ("c", c_int),
                ("data", POINTER(c_float))]


class METADATA(Structure):
    _fields_ = [("classes", c_int),
                ("names", POINTER(c_char_p))]


lib = CDLL("/home/jiantang/桌面/enn/workcode/yoloV3/github/darknet/libdarknet.so", RTLD_GLOBAL)
# lib = CDLL("libdarknet.so", RTLD_GLOBAL)
lib.network_width.argtypes = [c_void_p]
lib.network_width.restype = c_int
lib.network_height.argtypes = [c_void_p]
lib.network_height.restype = c_int

predict = lib.network_predict
predict.argtypes = [c_void_p, POINTER(c_float)]
predict.restype = POINTER(c_float)

set_gpu = lib.cuda_set_device
set_gpu.argtypes = [c_int]

make_image = lib.make_image
make_image.argtypes = [c_int, c_int, c_int]
make_image.restype = IMAGE

get_network_boxes = lib.get_network_boxes
get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(c_int), c_int, POINTER(c_int)]
get_network_boxes.restype = POINTER(DETECTION)

make_network_boxes = lib.make_network_boxes
make_network_boxes.argtypes = [c_void_p]
make_network_boxes.restype = POINTER(DETECTION)

free_detections = lib.free_detections
free_detections.argtypes = [POINTER(DETECTION), c_int]

free_ptrs = lib.free_ptrs
free_ptrs.argtypes = [POINTER(c_void_p), c_int]

network_predict = lib.network_predict
network_predict.argtypes = [c_void_p, POINTER(c_float)]

reset_rnn = lib.reset_rnn
reset_rnn.argtypes = [c_void_p]

load_net = lib.load_network
load_net.argtypes = [c_char_p, c_char_p, c_int]
load_net.restype = c_void_p

do_nms_obj = lib.do_nms_obj
do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]

do_nms_sort = lib.do_nms_sort
do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]

free_image = lib.free_image
free_image.argtypes = [IMAGE]

letterbox_image = lib.letterbox_image
letterbox_image.argtypes = [IMAGE, c_int, c_int]
letterbox_image.restype = IMAGE

load_meta = lib.get_metadata
lib.get_metadata.argtypes = [c_char_p]
lib.get_metadata.restype = METADATA

load_image = lib.load_image_color
load_image.argtypes = [c_char_p, c_int, c_int]
load_image.restype = IMAGE

rgbgr_image = lib.rgbgr_image
rgbgr_image.argtypes = [IMAGE]

predict_image = lib.network_predict_image
predict_image.argtypes = [c_void_p, IMAGE]
predict_image.restype = POINTER(c_float)

net_d = load_net(b"../cfg/yolov3.cfg", b"../yolov3.weights", 0)
meta_d = load_meta(b"../cfg/coco.data")


def classify(net, meta, im):
    out = predict_image(net, im)
    res = []
    for i in range(meta.classes):
        res.append((meta.names[i], out[i]))
    res = sorted(res, key=lambda x: -x[1])
    return res


def detect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45):
    im = load_image(image, 0, 0)
    num = c_int(0)
    pnum = pointer(num)
    predict_image(net, im)
    dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum)
    num = pnum[0]
    if (nms): do_nms_obj(dets, num, meta.classes, nms);

    res = []
    for j in range(num):
        for i in range(meta.classes):
            if dets[j].prob[i] > 0:
                b = dets[j].bbox
                res.append((meta.names[i], dets[j].prob[i], (b.x, b.y, b.w, b.h)))
    res = sorted(res, key=lambda x: -x[1])
    free_image(im)
    free_detections(dets, num)
    return res


# calc all box , red label for the biggest one ,yellow label for the rest, save the img to a specific path
def detect_and_boxing_default(b_path, raw_path, save_path,
                              color=(0.255, 255), line_type=1):
    print("checking pic...", raw_path)
    image = cv2.imread(raw_path)
    r = detect(net_d, meta_d, b_path)
    if not len(r) > 0:
        print("nothing detected in this picture!")
    else:
        print(len(r), " stuff detected in this picture! boxing...")
        print("going to save as :", save_path)
        for i in range(len(r)):
            box_i = r[i]
            label_i = box_i[0]
            prob_i = box_i[1]
            x_ = box_i[2][0]
            y_ = box_i[2][1]
            w_ = box_i[2][2]
            h_ = box_i[2][3]
            text_ = str(label_i) + "," + str(round(prob_i, 3))

            cv2.rectangle(image, (int(x_ - w_ / 2), int(y_ - h_ / 2)),
                          (int(x_ + w_ / 2), int(y_ + h_ / 2)),
                          color, line_type)
            cv2.putText(image, text_, (int(x_ - w_ / 2 - 5), int(y_ - h_ / 2 - 5)), cv2.FONT_HERSHEY_DUPLEX, 0.7, color,
                        2)
            cv2.imwrite(save_path, image)


def video_to_pics(video_path='/home/jiantang/work_data/sample_video.avi',
                  video_out_path='/home/jiantang/work_data/'):
    print("video_to_pics start...")
    vc = cv2.VideoCapture(video_path)
    c = 1
    if vc.isOpened():
        rval, frame = vc.read()
    else:
        print('open error!')
        rval = False
    count_c = 1
    while rval:
        rval, frame = vc.read()
        if rval:
            print("dealing with frame : ", count_c)
            cv2.imwrite(video_out_path + str(int(c)) + '.jpg', frame)
        c += 1
        cv2.waitKey(1)
        count_c += 1
    vc.release()
    print("video_to_pics finished...")


def pics_boxing(pics_path, save_path):
    raw_save_path = save_path
    print("pics_boxing start...")
    print("checking path : ", pics_path)
    pics_names = os.listdir(pics_path)
    print("found pics num :", len(pics_names))
    count_c = 1
    for name in pics_names:
        print("dealing with pics ", count_c)
        raw_path = pics_path + "/" + name
        b_path = bytes(raw_path, encoding="utf8")
        save_path = raw_save_path + "/" + name
        detect_and_boxing_default(b_path, raw_path, save_path)
        count_c += 1
    print("pics_boxing finished...")


def pics_to_video(pics_path, video_new_path='/home/jiantang/work_data/sample_video_new.avi', ):
    print("pics_to_video start...")
    print("checking files in :", pics_path)
    file_list = os.listdir(pics_path)
    # remove non-jpg files, remove  .jpg  sign
    tmp_jpg = []
    for name in file_list:
        if not name.endswith('.jpg'):
            print("found sth called:", name, ", skip it.")
            file_list.remove(name)
            continue
        tmp_jpg.append(name.replace(".jpg", ""))
    # sort names
    tmp_jpg.sort(key=int)

    fourcc = cv2.VideoWriter_fourcc('I', '4', '2', '0')  # 設置輸出視頻爲avi格式
    # cap_fps是幀率,可以根據隨意設置;size要和圖片的size一樣,但是通過img.shape得到圖像的參數是
    # (height,width,channel),但是此處的size要傳的是(width,height),這裏一定要注意注意,
    # 不然結果會打不開,提示“無法解碼多工傳送的流”等.比如通過img.shape得到常用的圖片尺寸
    # (1080,1920,3),則size設爲(1920,1080)
    cap_fps = 50
    size = (1920, 1080)
    # 設置視頻輸出的參數
    video = cv2.VideoWriter(video_new_path, fourcc, cap_fps, size)
    # video.write默認保存彩色圖,如果是彩色圖,則直接保存
    for name in tmp_jpg:
        img_E = cv2.imread(pics_path + "/" + name + ".jpg")
        print("reading....")
        video.write(img_E)
    video.release()
    print("pics_to_video finished...")


video_path = '/home/jiantang/work_data/sample_video.avi'
video_out_path = '/home/jiantang/work_data/pics/'
video_out_dir = '/home/jiantang/work_data/pics'
video_out_new_path = '/home/jiantang/work_data/pics_new'
video_new_path = '/home/jiantang/work_data/sample_video_new.avi'

video_to_pics(video_path, video_out_path)
pics_boxing(video_out_dir, video_out_new_path)
pics_to_video(video_out_new_path, video_new_path)

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章