Tensorflow分類器項目自定義數據讀入

Tensorflow分類器項目自定義數據讀入

在照着Tensorflow官網的demo敲了一遍分類器項目的代碼後,運行倒是成功了,結果也不錯。但是最終還是要訓練自己的數據,所以嘗試準備加載自定義的數據,然而demo中只是出現了fashion_mnist.load_data()並沒有詳細的讀取過程,隨後我又找了些資料,把讀取的過程記錄在這裏。
首先提一下需要用到的模塊:

import os

import keras
import matplotlib.pyplot as plt
from PIL import Image
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split

圖片分類器項目,首先確定你要處理的圖片分辨率將是多少,這裏的例子爲30像素:

IMG_SIZE_X = 30
IMG_SIZE_Y = 30

其次確定你圖片的方式目錄:

image_path = r'D:\Projects\ImageClassifier\data\set'
path = ".\data"
# 你也可以使用相對路徑的方式
# image_path =os.path.join(path, "set")

目錄下的結構如下:

folder

相應的label.txt如下:

動漫
風景
美女
物語
櫻花

接下來是接在labels.txt,如下:

label_name = "labels.txt"
label_path = os.path.join(path, label_name)
class_names = np.loadtxt(label_path, type(""))

這裏簡便起見,直接利用了numpy的loadtxt函數直接加載。

之後便是正式處理圖片數據了,註釋就卸載裏面了:

re_load = False
re_build = False
# re_load = True
re_build = True

data_name = "data.npz"
data_path = os.path.join(path, data_name)
model_name = "model.h5"
model_path = os.path.join(path, model_name)

count = 0

# 這裏判斷是否存在序列化之後的數據,re_load是一個開關,是否強制重新處理,測試用,可以去除。
if not os.path.exists(data_path) or re_load:
    labels = []
    images = []
    print('Handle images')
    # 由於label.txt是和圖片防止目錄的分類目錄一一對應的,即每個子目錄的目錄名就是labels.txt裏的一個label,所以這裏可以通過讀取class_names的每一項去拼接path後讀取
    for index, name in enumerate(class_names):
        # 這裏是拼接後的子目錄path
        classpath = os.path.join(image_path, name)
        # 先判斷一下是否是目錄
        if not os.path.isdir(classpath):
            continue
        # limit是測試時候用的這裏可以去除
        limit = 0
        for image_name in os.listdir(classpath):
            if limit >= max_size:
                break
            # 這裏是拼接後的待處理的圖片path
            imagepath = os.path.join(classpath, image_name)
            count = count + 1
            limit = limit + 1
            # 利用Image打開圖片
            img = Image.open(imagepath)
            # 縮放到你最初確定要處理的圖片分辨率大小
            img = img.resize((IMG_SIZE_X, IMG_SIZE_Y))
            # 轉爲灰度圖片,這裏彩色通道會干擾結果,並且會加大計算量
            img = img.convert("L")
            # 轉爲numpy數組
            img = np.array(img)
            # 由(30,30)轉爲(1,30,30)(即`channels_first`),當然你也可以轉換爲(30,30,1)(即`channels_last`)但爲了之後預覽處理後的圖片方便這裏採用了(1,30,30)的格式存放
            img = np.reshape(img, (1, IMG_SIZE_X, IMG_SIZE_Y))
            # 這裏利用循環生成labels數據,其中存放的實際是class_names中對應元素的索引
            labels.append([index])
            # 添加到images中,最後統一處理
            images.append(img)
            # 循環中一些狀態的輸出,可以去除
            print("{} class: {} {} limit: {} {}"
                  .format(count, index + 1, class_names[index], limit, imagepath))
    # 最後一次性將images和labels都轉換成numpy數組
    npy_data = np.array(images)
    npy_labels = np.array(labels)
    # 處理數據只需要一次,所以我們選擇在這裏利用numpy自帶的方法將處理之後的數據序列化存儲
    np.savez(data_path, x=npy_data, y=npy_labels)
    print("Save images by npz")
else:
    # 如果存在序列化號的數據,便直接讀取,提高速度
    npy_data = np.load(data_path)["x"]
    npy_labels = np.load(data_path)["y"]
    print("Load images by npz")
image_data = npy_data
labels_data = npy_labels

到了這裏原始數據的加工預處理便已經完成,只需要最後一步,就和demo中fashion_mnist.load_data()返回的結果一樣了。代碼如下:

# 最後一步就是將原始數據分成訓練數據和測試數據
train_images, test_images, train_labels, test_labels = \
    train_test_split(image_data, labels_data, test_size=0.2, random_state=6)

這裏將相關信息打印的方法也附上:

print("_________________________________________________________________")
print("%-28s %-s" % ("Name", "Shape"))
print("=================================================================")
print("%-28s %-s" % ("Image Data", image_data.shape))
print("%-28s %-s" % ("Labels Data", labels_data.shape))
print("=================================================================")

print('Split train and test data,p=%')
print("_________________________________________________________________")
print("%-28s %-s" % ("Name", "Shape"))
print("=================================================================")
print("%-28s %-s" % ("Train Images", train_images.shape))
print("%-28s %-s" % ("Test Images", test_images.shape))
print("%-28s %-s" % ("Train Labels", train_labels.shape))
print("%-28s %-s" % ("Test Labels", test_labels.shape))
print("=================================================================")

之後別忘了歸一化喲:

print("Normalize images")
train_images = train_images / 255.0
test_images = test_images / 255.0

最後附上讀取自定義數據的完整代碼:

import os

import keras
import matplotlib.pyplot as plt
from PIL import Image
from keras.layers import *
from keras.models import *
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# 支持中文
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用來正常顯示中文標籤
plt.rcParams['axes.unicode_minus'] = False  # 用來正常顯示負號
re_load = False
re_build = False
# re_load = True
re_build = True
epochs = 50
batch_size = 5
count = 0
max_size = 2000000000
IMG_SIZE_X = 30
IMG_SIZE_Y = 30
np.random.seed(9277)
image_path = r'D:\Projects\ImageClassifier\data\set'
path = ".\data"
data_name = "data.npz"
data_path = os.path.join(path, data_name)
model_name = "model.h5"
model_path = os.path.join(path, model_name)
label_name = "labels.txt"
label_path = os.path.join(path, label_name)
class_names = np.loadtxt(label_path, type(""))
print('Load class names')
if not os.path.exists(data_path) or re_load:
    labels = []
    images = []
    print('Handle images')
    for index, name in enumerate(class_names):
        classpath = os.path.join(image_path, name)
        if not os.path.isdir(classpath):
            continue
        limit = 0
        for image_name in os.listdir(classpath):
            if limit >= max_size:
                break
            imagepath = os.path.join(classpath, image_name)
            count = count + 1
            limit = limit + 1
            img = Image.open(imagepath)
            img = img.resize((30, 30))
            img = img.convert("L")
            img = np.array(img)
            img = np.reshape(img, (1, 30, 30))
            # img = skimage.io.imread(imagepath, as_grey=True)
            # if img.shape[2] != 3:
            #     print("{} shape is {}".format(image_name, img.shape))
            #     continue
            # data = transform.resize(img, (IMG_SIZE_X, IMG_SIZE_Y))
            labels.append([index])
            images.append(img)
            print("{} class: {} {} limit: {} {}"
                  .format(count, index + 1, class_names[index], limit, imagepath))
    npy_data = np.array(images)
    npy_labels = np.array(labels)
    np.savez(data_path, x=npy_data, y=npy_labels)
    print("Save images by npz")
else:
    npy_data = np.load(data_path)["x"]
    npy_labels = np.load(data_path)["y"]
    print("Load images by npz")
image_data = npy_data
labels_data = npy_labels
print("_________________________________________________________________")
print("%-28s %-s" % ("Name", "Shape"))
print("=================================================================")
print("%-28s %-s" % ("Image Data", image_data.shape))
print("%-28s %-s" % ("Labels Data", labels_data.shape))
print("=================================================================")
train_images, test_images, train_labels, test_labels = \
    train_test_split(image_data, labels_data, test_size=0.2, random_state=6)
print('Split train and test data,p=%')
print("_________________________________________________________________")
print("%-28s %-s" % ("Name", "Shape"))
print("=================================================================")
print("%-28s %-s" % ("Train Images", train_images.shape))
print("%-28s %-s" % ("Test Images", test_images.shape))
print("%-28s %-s" % ("Train Labels", train_labels.shape))
print("%-28s %-s" % ("Test Labels", test_labels.shape))
print("=================================================================")

# 歸一化
# 我們將這些值縮小到 0 到 1 之間,然後將其饋送到神經網絡模型。爲此,將圖像組件的數據類型從整數轉換爲浮點數,然後除以 255。以下是預處理圖像的函數:
# 務必要以相同的方式對訓練集和測試集進行預處理:
print("Normalize images")
train_images = train_images / 255.0
test_images = test_images / 255.0
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章