1.FCM模糊聚類原理
模糊c均值聚類FCM算法融合了模糊理論的精髓,相較於k-means的硬聚類,FCM算法(Fuzzy C-Means,FCM)提供了更加靈活的聚類結果。因爲大部分情況下,數據集中的對象不能劃分成爲明顯分離的簇,將一個對象劃分到一個特定的簇有些生硬,不符合人的客觀認知。因此,對每個對象和每個簇賦予一個權值,指明對象屬於該簇的程度即可。當然,基於概率的方法也可以給出這樣的權值,但是有時候我們很難確定一個合適的統計模型,因此使用具有自然地、非概率特性的FCM聚類算法就是一個比較好的選擇。
2.FCM模糊聚類流程
3.FCM模糊聚類程序
from pylab import *
from numpy import *
import pandas as pd
import numpy as np
import operator
import math
import matplotlib.pyplot as plt
import random
# 數據保存在.csv文件中
df_full = pd.read_csv("iris.csv")
columns = list(df_full.columns)
features = columns[:len(columns) - 1]
# class_labels = list(df_full[columns[-1]])
df = df_full[features]
# 維度
num_attr = len(df.columns) - 1
# 分類數
k = 3
# 最大迭代數
MAX_ITER = 100
# 樣本數
n = len(df) # the number of row
# 模糊參數
m = 2.00
# 初始化模糊矩陣U
def initializeMembershipMatrix():
membership_mat = list()
for i in range(n):
random_num_list = [random.random() for i in range(k)]
summation = sum(random_num_list)
temp_list = [x / summation for x in random_num_list] # 首先歸一化
membership_mat.append(temp_list)
return membership_mat
# 計算類中心點
def calculateClusterCenter(membership_mat):
cluster_mem_val = zip(*membership_mat)
cluster_centers = list()
cluster_mem_val_list = list(cluster_mem_val)
for j in range(k):
x = cluster_mem_val_list[j]
xraised = [e ** m for e in x]
denominator = sum(xraised)
temp_num = list()
for i in range(n):
data_point = list(df.iloc[i])
prod = [xraised[i] * val for val in data_point]
temp_num.append(prod)
numerator = map(sum, zip(*temp_num))
center = [z / denominator for z in numerator] # 每一維都要計算。
cluster_centers.append(center)
return cluster_centers
# 更新隸屬度
def updateMembershipValue(membership_mat, cluster_centers):
# p = float(2/(m-1))
data = []
for i in range(n):
x = list(df.iloc[i]) # 取出文件中的每一行數據
data.append(x)
distances = [np.linalg.norm(list(map(operator.sub, x, cluster_centers[j]))) for j in range(k)]
for j in range(k):
den = sum([math.pow(float(distances[j] / distances[c]), 2) for c in range(k)])
membership_mat[i][j] = float(1 / den)
return membership_mat, data
# 得到聚類結果
def getClusters(membership_mat):
cluster_labels = list()
for i in range(n):
max_val, idx = max((val, idx) for (idx, val) in enumerate(membership_mat[i]))
cluster_labels.append(idx)
return cluster_labels
def fuzzyCMeansClustering():
# 主程序
membership_mat = initializeMembershipMatrix()
curr = 0
while curr <= MAX_ITER: # 最大迭代次數
cluster_centers = calculateClusterCenter(membership_mat)
membership_mat, data = updateMembershipValue(membership_mat, cluster_centers)
cluster_labels = getClusters(membership_mat)
curr += 1
print(membership_mat)
return cluster_labels, cluster_centers, data, membership_mat
def xie_beni(membership_mat, center, data):
sum_cluster_distance = 0
min_cluster_center_distance = inf
for i in range(k):
for j in range(n):
sum_cluster_distance = sum_cluster_distance + membership_mat[j][i] ** 2 * sum(
power(data[j, :] - center[i, :], 2)) # 計算類一致性
for i in range(k - 1):
for j in range(i + 1, k):
cluster_center_distance = sum(power(center[i, :] - center[j, :], 2)) # 計算類間距離
if cluster_center_distance < min_cluster_center_distance:
min_cluster_center_distance = cluster_center_distance
return sum_cluster_distance / (n * min_cluster_center_distance)
labels, centers, data, membership = fuzzyCMeansClustering()
print(labels)
print(centers)
center_array = array(centers)
label = array(labels)
datas = array(data)
# Xie-Beni聚類有效性
print("聚類有效性:", xie_beni(membership, center_array, datas))
xlim(0, 10)
ylim(0, 10)
# 做散點圖
fig = plt.gcf()
fig.set_size_inches(16.5, 12.5)
f1 = plt.figure(1)
plt.scatter(datas[nonzero(label == 0), 0], datas[nonzero(label == 0), 1], marker='o', color='r', label='0', s=10)
plt.scatter(datas[nonzero(label == 1), 0], datas[nonzero(label == 1), 1], marker='+', color='b', label='1', s=10)
plt.scatter(datas[nonzero(label == 2), 0], datas[nonzero(label == 2), 1], marker='*', color='g', label='2', s=10)
plt.scatter(center_array[:, 0], center_array[:, 1], marker='x', color='m', s=30)
plt.show()