Python YOLOv3(YOLOv3_TensorFlow-master)predict_add.py (預測圖像文件)移動到其他目錄需要同時複製utils文件夾和model.py文件件

 把預測文件predict_add.py,從YOLOv3_TensorFlow-master文件夾中移動到根目錄下,如果直接執行會出現以下錯誤。

wu@wu-X555LF:~$ python predict_add.py 
Traceback (most recent call last):
  File "predict_add.py", line 11, in <module>
    from utils.misc_utils import parse_anchors, read_class_names
ImportError: No module named utils.misc_utils
wu@wu-X555LF:~$ python predict_add.py 
Traceback (most recent call last):
  File "predict_add.py", line 16, in <module>
    from model import yolov3
ImportError: No module named model

看部分引入的包頭,除了tensorflow,numpy,argparse,cv2,os是系統安裝的包;  utils,model是本地目錄加載的包,Python首先會在系統目錄下查找所有的包,如果沒有查找到指定的包,會在當前目錄查找,最後還是找不到,會報錯。所以需要把utils,model包,放在同一目錄下。

# coding: utf-8

from __future__ import division, print_function

import tensorflow as tf
import numpy as np
import argparse
import cv2
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from utils.misc_utils import parse_anchors, read_class_names
from utils.nms_utils import gpu_nms
from utils.plot_utils import get_color_table, plot_one_box
from utils.data_aug import letterbox_resize

from model import yolov3

需要同步複製utils文件夾和model.py文件,到移動到predict_add.py相同目錄下。

然後修改predict_add.py裏面要加載的文件的目錄,即可

# coding: utf-8

from __future__ import division, print_function

import tensorflow as tf
import numpy as np
import argparse
import cv2
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from utils.misc_utils import parse_anchors, read_class_names
from utils.nms_utils import gpu_nms
from utils.plot_utils import get_color_table, plot_one_box
from utils.data_aug import letterbox_resize

from model import yolov3

picture_dir = './YOLOv3_TensorFlow-master/data/demo_data'
output_dir = './predict.txt'
output_txt = open(output_dir,"w");

parser = argparse.ArgumentParser(description="YOLO-V3 test single image test procedure.")
#parser.add_argument("input_image", type=str,
#                    help="The path of the input image.")
parser.add_argument("--anchor_path", type=str, default="./YOLOv3_TensorFlow-master/data/yolo_anchors.txt",
                    help="The path of the anchor txt file.")
parser.add_argument("--new_size", nargs='*', type=int, default=[416, 416],
                    help="Resize the input image with `new_size`, size format: [width, height]")
parser.add_argument("--letterbox_resize", type=lambda x: (str(x).lower() == 'true'), default=True,
                    help="Whether to use the letterbox resize.")
parser.add_argument("--class_name_path", type=str, default="./YOLOv3_TensorFlow-master/data/my_data/data.names",
                    help="The path of the class names.")
parser.add_argument("--restore_path", type=str, default="./YOLOv3_TensorFlow-master/data/darknet_weights/best/best_model_Epoch_98_step_50291_mAP_0.7358_loss_3.6063_lr_1e-05",
                    help="The path of the weights to restore.")
args = parser.parse_args()

args.anchors = parse_anchors(args.anchor_path)
args.classes = read_class_names(args.class_name_path)
args.num_class = len(args.classes)

color_table = get_color_table(args.num_class)

with tf.Session() as sess:
    input_data = tf.placeholder(tf.float32, [1, args.new_size[1], args.new_size[0], 3], name='input_data')
    yolo_model = yolov3(args.num_class, args.anchors)
    with tf.variable_scope('yolov3'):
        pred_feature_maps = yolo_model.forward(input_data, False)
    pred_boxes, pred_confs, pred_probs = yolo_model.predict(pred_feature_maps)

    pred_scores = pred_confs * pred_probs

    boxes, scores, labels = gpu_nms(pred_boxes, pred_scores, args.num_class, max_boxes=200, score_thresh=0.3, nms_thresh=0.45)

    saver = tf.train.Saver()
    saver.restore(sess, args.restore_path)
    
    img_list = os.listdir(picture_dir)
    for i in range(0,len(img_list)):
        img_path = os.path.join(picture_dir,img_list[i])
        img_ori = cv2.imread(img_path) #args.input_image
        if args.letterbox_resize:
            img, resize_ratio, dw, dh = letterbox_resize(img_ori, args.new_size[0], args.new_size[1])
        else:
            height_ori, width_ori = img_ori.shape[:2]
        img = cv2.resize(img_ori, tuple(args.new_size))
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        img = np.asarray(img, np.float32)
        img = img[np.newaxis, :] / 255.
        boxes_, scores_, labels_ = sess.run([boxes, scores, labels], feed_dict={input_data: img})

        # rescale the coordinates to the original image
        if args.letterbox_resize:
            boxes_[:, [0, 2]] = (boxes_[:, [0, 2]] - dw) / resize_ratio
            boxes_[:, [1, 3]] = (boxes_[:, [1, 3]] - dh) / resize_ratio
        else:
            boxes_[:, [0, 2]] *= (width_ori/float(args.new_size[0]))
            boxes_[:, [1, 3]] *= (height_ori/float(args.new_size[1]))

        print("box coords:")
        print(boxes_)
        print('*' * 30)
        print("scores:")
        print(scores_)
        print('*' * 30)
        print("labels:")
        print(labels_)
        output_txt.write(img_path)  #ig_path
        for j in range(len(boxes_)):
            label_class = ' '+ str(labels_[j]) + ' '
            x0, y0, x1, y1 = boxes_[j]
            locate = str(x0)+' '+str(y0)+' '+str(x1)+' '+str(y1)
            output_txt.write(label_class)
            output_txt.write(locate)
        output_txt.write('\n')
            #print(type(labels_[j])) <type 'numpy.int32'>
            #print(type(boxes_[j]))  <type 'numpy.ndarray'>           
            #output_txt.write(labels_[i])  #classes_label
            #output_txt.write(boxes_[j])  #[x0, y0, x1, y1]
            #plot_one_box(img_ori, [x0, y0, x1, y1], label=args.classes[labels_[i]] + ', {:.2f}%'.format(scores_[i] * 100), color=color_table[labels_[i]])
        #cv2.imshow('Detection result', img_ori)
        #cv2.imwrite('detection_result.jpg', img_ori)
        #cv2.waitKey(0)

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章