錯誤原因
原因在於使用了自定義的層
例如:定義損失函數 Loss(因爲這個函數不好,就不多做說明了,只是做個例子來說明)
def contrastive_loss_layer(top_different, deep_different, y_true):
margin = 1000
top_distance=tf.norm((top_different),ord=2)
deep_distance=tf.norm((deep_different),ord=2)
mul_distance=K.log(top_distance*deep_distance)
loss=(1-y_true)*mul_distance+y_true*tf.square(tf.maximum(margin-mul_distance,0))
loss=tf.reduce_mean(loss)
return loss
model.add_loss(layers.Lambda(lambda x:self.contrastive_loss_layer(*x), name='loss')([left_inputs-right_inputs,left_output-right_output,label_inputs]))
替換成層類就不會報錯了
class ContrastiveLoss(layers.Layer):
def __init__(self, **kwargs):
super(ContrastiveLoss, self).__init__(**kwargs)
def call(self, inputs, **kwargs):
"""
# inputs:Input tensor, or list/tuple of input tensors.
如上,父類KL.Layer的call方法明確要求inputs爲一個tensor,或者包含多個tensor的列表/元組
所以這裏不能直接接受多個入參,需要把多個入參封裝成列表/元組的形式然後在函數中自行解包,否則會報錯。
"""
# 解包入參
top_different,deep_different,y_true=inputs
margin = 100
top_distance = tf.norm((top_different), ord=2)
deep_distance = tf.norm((deep_different), ord=2)
mul_distance = K.log(top_distance * deep_distance)
loss = (1 - y_true) * mul_distance + y_true * tf.square(tf.maximum(margin - mul_distance, 0))
loss = tf.reduce_mean(loss)
# 重點:把自定義的loss添加進層使其生效,同時加入metric方便在KERAS的進度條上實時追蹤
self.add_loss(loss, inputs=True)
self.add_metric(loss, aggregation="mean", name="C_loss")
return loss