這是一個殘差unet: https://github.com/AhadMomin/ResUNet/blob/master/resUnet.py
def batch_Norm_Activation(x, BN=False): ## To Turn off Batch Normalization, Change BN to False >
if BN == True:
x = BatchNormalization()(x)
x = Activation("relu")(x)
else:
x= Activation("relu")(x)
return x
def ResUnet2D(filters):
# encoder
inputs = Input(shape=(image_dims[0], image_dims[1], 3))
conv = Conv2D(filters*1, kernel_size= (3,3), padding= 'same', strides= (1,1))(inputs)
conv = batch_Norm_Activation(conv)
conv = Conv2D(filters*1, kernel_size= (3,3), padding= 'same', strides= (1,1))(conv)
shortcut = Conv2D(filters*1, kernel_size=(1,1), padding='same', strides=(1,1))(inputs)
shortcut = batch_Norm_Activation(shortcut)
output1 = add([conv, shortcut])
res1 = batch_Norm_Activation(output1)
res1 = Conv2D(filters*2, kernel_size= (3,3), padding= 'same', strides= (2,2))(res1)
res1 = batch_Norm_Activation(res1)
res1 = Conv2D(filters*2, kernel_size= (3,3), padding= 'same', strides= (1,1))(res1)
shortcut1 = Conv2D(filters*2, kernel_size= (3,3), padding='same', strides=(2,2))(output1)
shortcut1 = batch_Norm_Activation(shortcut1)
output2 = add([shortcut1, res1])
res2 = batch_Norm_Activation(output2)
res2 = Conv2D(filters*4, kernel_size= (3,3), padding= 'same', strides= (2,2))(res2)
res2 = batch_Norm_Activation(res2)
res2 = Conv2D(filters*4, kernel_size= (3,3), padding= 'same', strides= (1,1))(res2)
shortcut2 = Conv2D(filters*4, kernel_size= (3,3), padding='same', strides=(2,2))(output2)
shortcut2 = batch_Norm_Activation(shortcut2)
output3 = add([shortcut2, res2])
res3 = batch_Norm_Activation(output3)
res3 = Conv2D(filters*8, kernel_size= (3,3), padding= 'same', strides= (2,2))(res3)
res3 = batch_Norm_Activation(res3)
res3 = Conv2D(filters*8, kernel_size= (3,3), padding= 'same', strides= (1,1))(res3)
shortcut3 = Conv2D(filters*8, kernel_size= (3,3), padding='same', strides=(2,2))(output3)
shortcut3 = batch_Norm_Activation(shortcut3)
output4 = add([shortcut3, res3])
res4 = batch_Norm_Activation(output4)
res4 = Conv2D(filters*16, kernel_size= (3,3), padding= 'same', strides= (2,2))(res4)
res4 = batch_Norm_Activation(res4)
res4 = Conv2D(filters*16, kernel_size= (3,3), padding= 'same', strides= (1,1))(res4)
shortcut4 = Conv2D(filters*16, kernel_size= (3,3), padding='same', strides=(2,2))(output4)
shortcut4 = batch_Norm_Activation(shortcut4)
output5 = add([shortcut4, res4])
#bridge
conv = batch_Norm_Activation(output5)
conv = Conv2D(filters*16, kernel_size= (3,3), padding= 'same', strides= (1,1))(conv)
conv = batch_Norm_Activation(conv)
conv = Conv2D(filters*16, kernel_size= (3,3), padding= 'same', strides= (1,1))(conv)
#decoder
uconv1 = UpSampling2D((2,2))(conv)
uconv1 = concatenate([uconv1, output4])
uconv11 = batch_Norm_Activation(uconv1)
uconv11 = Conv2D(filters*16, kernel_size= (3,3), padding= 'same', strides=(1,1))(uconv11)
uconv11 = batch_Norm_Activation(uconv11)
uconv11 = Conv2D(filters*16, kernel_size= (3,3), padding= 'same', strides=(1,1))(uconv11)
shortcut5 = Conv2D(filters*16, kernel_size= (3,3), padding='same', strides=(1,1))(uconv1)
shortcut5 = batch_Norm_Activation(shortcut5)
output6 = add([uconv11,shortcut5])
uconv2 = UpSampling2D((2,2))(output6)
uconv2 = concatenate([uconv2, output3])
uconv22 = batch_Norm_Activation(uconv2)
uconv22 = Conv2D(filters*8, kernel_size= (3,3), padding= 'same', strides=(1,1))(uconv22)
uconv22 = batch_Norm_Activation(uconv22)
uconv22 = Conv2D(filters*8, kernel_size= (3,3), padding= 'same', strides=(1,1))(uconv22)
shortcut6 = Conv2D(filters*8, kernel_size= (3,3), padding='same', strides=(1,1))(uconv2)
shortcut6 = batch_Norm_Activation(shortcut6)
output7 = add([uconv22,shortcut6])
uconv3 = UpSampling2D((2,2))(output7)
uconv3 = concatenate([uconv3, output2])
uconv33 = batch_Norm_Activation(uconv3)
uconv33 = Conv2D(filters*4, kernel_size= (3,3), padding= 'same', strides=(1,1))(uconv33)
uconv33 = batch_Norm_Activation(uconv33)
uconv33 = Conv2D(filters*4, kernel_size= (3,3), padding= 'same', strides=(1,1))(uconv33)
shortcut7 = Conv2D(filters*4, kernel_size= (3,3), padding='same', strides=(1,1))(uconv3)
shortcut7 = batch_Norm_Activation(shortcut7)
output8 = add([uconv33,shortcut7])
uconv4 = UpSampling2D((2,2))(output8)
uconv4 = concatenate([uconv4, output1])
uconv44 = batch_Norm_Activation(uconv4)
uconv44 = Conv2D(filters*2, kernel_size= (3,3), padding= 'same', strides=(1,1))(uconv44)
uconv44 = batch_Norm_Activation(uconv44)
uconv44 = Conv2D(filters*2, kernel_size= (3,3), padding= 'same', strides=(1,1))(uconv44)
shortcut8 = Conv2D(filters*2, kernel_size= (3,3), padding='same', strides=(1,1))(uconv4)
shortcut8 = batch_Norm_Activation(shortcut8)
output9 = add([uconv44,shortcut8])
output_layer = Conv2D(4, (1, 1), padding="same", activation="softmax")(output9)
model = Model(inputs, output_layer)
return model
這是一個殘差unet:https://github.com/junyuchen245/SPECT-CT-Seg-ResUNet-Keras/tree/master/nets
from keras.models import Model
#from keras.layers import Input, Reshape, Dropout, Activation, Permute, Concatenate, GaussianNoise, Add
from keras.optimizers import Adam
from keras import backend as K
from keras.layers import *
import numpy as np
from nets.custom_losses import exp_dice_loss
"""Building Res-U-Net."""
def ResUnet(pretrained_weights = None, input_size = (192,192,1)):
""" first encoder for spect image """
input_seg = Input(input_size)
input_segBN = BatchNormalization()(input_seg)
conv1_spect = Conv2D(32, 3, activation='relu', padding='same', kernel_initializer='he_normal')(input_segBN)
conv1_spect = BatchNormalization()(conv1_spect)
conv1_spect = Conv2D(32, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1_spect)
conv1_spect = BatchNormalization(name='conv_spect_32')(conv1_spect)
conv1_spect = Add()([conv1_spect, input_segBN])
pool1_spect = MaxPool2D(pool_size=(2, 2))(conv1_spect)
conv2_spect_in = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1_spect)
conv2_spect_in = BatchNormalization()(conv2_spect_in)
conv2_spect = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2_spect_in)
conv2_spect = BatchNormalization(name='conv_spect_64')(conv2_spect)
conv2_spect = Add()([conv2_spect, conv2_spect_in])
pool2_spect = MaxPool2D(pool_size=(2, 2))(conv2_spect)
conv3_spect_in = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2_spect)
conv3_spect_in = BatchNormalization()(conv3_spect_in)
conv3_spect = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3_spect_in)
conv3_spect = BatchNormalization(name='conv_spect_128')(conv3_spect)
conv3_spect = Add()([conv3_spect, conv3_spect_in])
pool3_spect = MaxPool2D(pool_size=(2, 2))(conv3_spect)
conv4_spect_in = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3_spect)
conv4_spect_in = BatchNormalization()(conv4_spect_in)
conv4_spect = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4_spect_in)
conv4_spect = BatchNormalization(name='conv_spect_256')(conv4_spect)
conv4_spect = Add()([conv4_spect, conv4_spect_in])
drop4_spect = Dropout(0.5)(conv4_spect)
pool4_spect = MaxPool2D(pool_size=(2, 2))(drop4_spect)
""" second encoder for ct image """
input_ct = Input(input_size)
input_ctBN = BatchNormalization()(input_ct)
conv1_ct = Conv2D(32, 3, activation='relu', padding='same', kernel_initializer='he_normal')(input_ctBN)
conv1_ct = BatchNormalization()(conv1_ct)
conv1_ct = Conv2D(32, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1_ct)
conv1_ct = BatchNormalization(name='conv_ct_32')(conv1_ct)
conv1_ct = Add()([conv1_ct, input_ctBN])
pool1_ct = MaxPool2D(pool_size=(2, 2))(conv1_ct) #192x192
conv2_ct_in = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1_ct)
conv2_ct_in = BatchNormalization()(conv2_ct_in)
conv2_ct = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2_ct_in)
conv2_ct = BatchNormalization(name='conv_ct_64')(conv2_ct)
conv2_ct = Add()([conv2_ct, conv2_ct_in])
pool2_ct = MaxPool2D(pool_size=(2, 2))(conv2_ct) #96x96
conv3_ct_in = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2_ct)
conv3_ct_in = BatchNormalization()(conv3_ct_in)
conv3_ct = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3_ct_in)
conv3_ct = BatchNormalization(name='conv_ct_128')(conv3_ct)
conv3_ct = Add()([conv3_ct, conv3_ct_in])
pool3_ct = MaxPool2D(pool_size=(2, 2))(conv3_ct) #48x48
conv4_ct_in = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3_ct)
conv4_ct_in = BatchNormalization()(conv4_ct_in)
conv4_ct = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4_ct_in)
conv4_ct = BatchNormalization(name='conv_ct_256')(conv4_ct)
conv4_ct = Add()([conv4_ct, conv4_ct_in])
drop4_ct = Dropout(0.5)(conv4_ct)
pool4_ct = MaxPool2D(pool_size=(2, 2))(drop4_ct) #24x24
conv5_ct_in = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4_ct)
conv5_ct_in = BatchNormalization()(conv5_ct_in)
conv5_ct = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5_ct_in)
conv5_ct = BatchNormalization(name='conv_ct_512')(conv5_ct)
conv5_ct = Add()([conv5_ct, conv5_ct_in])
conv5_ct = Dropout(0.5)(conv5_ct)
#pool5_ct = MaxPool2D(pool_size=(2, 2))(conv5_ct) #12x12
conv5_spect_in = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4_spect)
conv5_spect_in = BatchNormalization()(conv5_spect_in)
conv5_spect = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5_spect_in)
conv5_spect = BatchNormalization(name='conv_spect_512')(conv5_spect)
conv5_spect = Add()([conv5_spect, conv5_spect_in])
conv5_spect = Dropout(0.5)(conv5_spect)
#pool5_spect = MaxPool2D(pool_size=(2, 2))(conv5_spect)
merge5_cm = concatenate([conv5_spect, conv5_ct], axis=3) #12x12
up7_cm = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(merge5_cm)) #24x24
up7_cm = BatchNormalization()(up7_cm)
merge7_cm = concatenate([drop4_ct, drop4_spect, up7_cm], axis=3) # cm: cross modality
merge7_cm = BatchNormalization()(merge7_cm)
conv7_cm = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7_cm)
conv7_cm_in = BatchNormalization()(conv7_cm)
conv7_cm = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7_cm_in)
conv7_cm = BatchNormalization(name='decoder_conv_256')(conv7_cm)
conv7_cm = Add()([conv7_cm, conv7_cm_in])
up8_cm = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv7_cm))
up8_cm = BatchNormalization()(up8_cm)
merge8_cm = concatenate([conv3_ct, conv3_spect, up8_cm], axis=3) # cm: cross modality
merge8_cm = BatchNormalization()(merge8_cm)
conv8_cm = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8_cm)
conv8_cm_in = BatchNormalization()(conv8_cm)
conv8_cm = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8_cm_in)
conv8_cm = BatchNormalization(name='decoder_conv_128')(conv8_cm)
conv8_cm = Add()([conv8_cm, conv8_cm_in])
up9_cm = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv8_cm))
up9_cm = BatchNormalization()(up9_cm)
merge9_cm = concatenate([conv2_ct, conv2_spect, up9_cm], axis=3) # cm: cross modality
merge9_cm = BatchNormalization()(merge9_cm)
conv9_cm = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9_cm)
conv9_cm_in = BatchNormalization()(conv9_cm)
conv9_cm = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9_cm_in)
conv9_cm = BatchNormalization(name='decoder_conv_64')(conv9_cm)
conv9_cm = Add()([conv9_cm, conv9_cm_in])
up10_cm = Conv2D(32, 3, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv9_cm))
up10_cm = BatchNormalization()(up10_cm)
merge10_cm = concatenate([conv1_ct, conv1_spect, up10_cm], axis=3) # cm: cross modality
merge10_cm = BatchNormalization()(merge10_cm)
conv10_cm = Conv2D(32, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge10_cm)
conv10_cm_in = BatchNormalization()(conv10_cm)
conv10_cm = Conv2D(32, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv10_cm_in)
conv10_cm = BatchNormalization(name='decoder_conv_32')(conv10_cm)
conv10_cm = Add()([conv10_cm, conv10_cm_in])
conv11_cm = Conv2D(filters=6, kernel_size=3, activation='relu', padding='same')(conv10_cm)
conv11_cm = BatchNormalization()(conv11_cm)
out = Conv2D(filters=3, kernel_size=1, activation='softmax', padding='same', name='segmentation')(conv11_cm)
# if channels_first:
# new_shape = tuple(range(1, K.ndim(x)))
# new_shape = new_shape[1:] + new_shape[:1]
# x = Permute(new_shape)(x)
image_size = tuple((192, 192))
x = Reshape((np.prod(image_size), 3))(out)
model = Model(inputs=[input_ct, input_seg], outputs=x)
model.compile(optimizer=Adam(lr=1e-3), loss=exp_dice_loss(exp=1.0))
return model
這是一個殘差unet:https://github.com/Kaido0/Brain-Tissue-Segment-Keras/blob/master/net/res_unet.py
'''
=======================
res_block added to unet
=======================
'''
import tensorflow as tf
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
config=tf.ConfigProto()
config.gpu_options.allow_growth=True
session=tf.Session(config=config)
# import packages
from functools import partial
import os
from keras.models import *
from keras.layers import Input, Conv2D, UpSampling2D, BatchNormalization, Activation, add, concatenate
from keras.optimizers import Adam
from keras import callbacks
from keras import backend as K
import keras.backend.tensorflow_backend as KTF
# import configurations
import configs
K.set_image_data_format('channels_last') # TF dimension ordering in this code
# init configs
image_rows = configs.VOLUME_ROWS
image_cols = configs.VOLUME_COLS
image_depth = configs.VOLUME_DEPS
num_classes = configs.NUM_CLASSES
# patch extraction parameters
patch_size = configs.PATCH_SIZE
BASE = configs.BASE
smooth = configs.SMOOTH
# compute dsc
def dice_coef(y_true, y_pred, smooth=1.):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
# proposed loss function
def dice_coef_loss(y_true, y_pred):
distance = 0
for label_index in range(num_classes):
dice_coef_class = dice_coef(y_true[:,:,:,label_index], y_pred[:, :,:,label_index])
distance = 1 - dice_coef_class + distance
return distance
# dsc per class
def label_wise_dice_coefficient(y_true, y_pred, label_index):
return dice_coef(y_true[:,:,:,label_index], y_pred[:, :,:,label_index])
# get label dsc
def get_label_dice_coefficient_function(label_index):
f = partial(label_wise_dice_coefficient, label_index=label_index)
f.__setattr__('__name__', 'label_{0}_dice_coef'.format(label_index))
return f
def res_block(x, nb_filters, strides):
res_path = BatchNormalization()(x)
res_path = Activation(activation='relu')(res_path)
res_path = Conv2D(filters=nb_filters[0], kernel_size=(3, 3), padding='same', strides=strides[0])(res_path)
res_path = BatchNormalization()(res_path)
res_path = Activation(activation='relu')(res_path)
res_path = Conv2D(filters=nb_filters[1], kernel_size=(3, 3), padding='same', strides=strides[1])(res_path)
shortcut = Conv2D(nb_filters[1], kernel_size=(1, 1), strides=strides[0])(x)
shortcut = BatchNormalization()(shortcut)
res_path = add([shortcut, res_path])
return res_path
def encoder(x):
to_decoder = []
main_path = Conv2D(filters=64, kernel_size=(3, 3), padding='same', strides=(1, 1))(x)
main_path = BatchNormalization()(main_path)
main_path = Activation(activation='relu')(main_path)
main_path = Conv2D(filters=64, kernel_size=(3, 3), padding='same', strides=(1, 1))(main_path)
shortcut = Conv2D(filters=64, kernel_size=(1, 1), strides=(1, 1))(x)
shortcut = BatchNormalization()(shortcut)
main_path = add([shortcut, main_path])
# first branching to decoder
to_decoder.append(main_path)
main_path = res_block(main_path, [128, 128], [(2, 2), (1, 1)])
to_decoder.append(main_path)
main_path = res_block(main_path, [256, 256], [(2, 2), (1, 1)])
to_decoder.append(main_path)
return to_decoder
def decoder(x, from_encoder):
main_path = UpSampling2D(size=(2, 2))(x)
main_path = concatenate([main_path, from_encoder[2]], axis=3)
main_path = res_block(main_path, [256, 256], [(1, 1), (1, 1)])
main_path = UpSampling2D(size=(2, 2))(main_path)
main_path = concatenate([main_path, from_encoder[1]], axis=3)
main_path = res_block(main_path, [128, 128], [(1, 1), (1, 1)])
main_path = UpSampling2D(size=(2, 2))(main_path)
main_path = concatenate([main_path, from_encoder[0]], axis=3)
main_path = res_block(main_path, [64, 64], [(1, 1), (1, 1)])
return main_path
def build_res_unet():
metrics = dice_coef
include_label_wise_dice_coefficients = True;
inputs = Input((patch_size, patch_size, 1))
to_decoder = encoder(inputs)
path = res_block(to_decoder[2], [512, 512], [(2, 2), (1, 1)])
path = decoder(path, from_encoder=to_decoder)
path = Conv2D(filters=num_classes, kernel_size=(1, 1), activation='softmax')(path)
model = Model(inputs=[inputs], outputs=[path])
if not isinstance(metrics, list):
metrics = [metrics]
if include_label_wise_dice_coefficients and num_classes > 1:
label_wise_dice_metrics = [get_label_dice_coefficient_function(index) for index in range(num_classes)]
if metrics:
metrics = metrics + label_wise_dice_metrics
else:
metrics = label_wise_dice_metrics
model.compile(optimizer=Adam(lr=1e-4), loss='categorical_crossentropy', metrics=metrics)
return model
這是一個squeezeunet:https://github.com/Kaido0/Brain-Tissue-Segment-Keras/blob/master/net/squeeze_unet.py
# -*- coding: utf-8 -*-
from __future__ import print_function
import tensorflow as tf
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
config=tf.ConfigProto()
config.gpu_options.allow_growth=True
session=tf.Session(config=config)
from functools import partial
from keras.models import Model
from keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Dropout,Input
from keras.layers import concatenate, Conv2DTranspose, BatchNormalization
from keras.optimizers import Adam
from keras import backend as K
import keras.backend.tensorflow_backend as KTF
# import configurations
import configs
K.set_image_data_format('channels_last') # TF dimension ordering in this code
# init configs
image_rows = configs.VOLUME_ROWS
image_cols = configs.VOLUME_COLS
image_depth = configs.VOLUME_DEPS
num_classes = configs.NUM_CLASSES
# patch extraction parameters
patch_size = configs.PATCH_SIZE
BASE = configs.BASE
smooth = configs.SMOOTH
# compute dsc
def dice_coef(y_true, y_pred, smooth=1.):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred):
return 1.-dice_coef(y_true, y_pred)
'''
# proposed loss function
def dice_coef_loss(y_true, y_pred):
distance = 0
for label_index in range(num_classes):
dice_coef_class = dice_coef(y_true[:,:,:,label_index], y_pred[:, :,:,label_index])
distance = 1 - dice_coef_class + distance
return distance
'''
# dsc per class
def label_wise_dice_coefficient(y_true, y_pred, label_index):
return dice_coef(y_true[:,:,:,label_index], y_pred[:, :,:,label_index])
# get label dsc
def get_label_dice_coefficient_function(label_index):
f = partial(label_wise_dice_coefficient, label_index=label_index)
f.__setattr__('__name__', 'label_{0}_dice_coef'.format(label_index))
return f
def fire_module(x, fire_id, squeeze=16, expand=64):
f_name = "fire{0}/{1}"
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = Conv2D(squeeze, (1, 1), activation='relu', padding='same', name=f_name.format(fire_id, "squeeze1x1"))(x)
x = BatchNormalization(axis=channel_axis)(x)
left = Conv2D(expand, (1, 1), activation='relu', padding='same', name=f_name.format(fire_id, "expand1x1"))(x)
right = Conv2D(expand, (3, 3), activation='relu', padding='same', name=f_name.format(fire_id, "expand3x3"))(x)
x = concatenate([left, right], axis=channel_axis, name=f_name.format(fire_id, "concat"))
return x
def SqueezeUNet(deconv_ksize=3, dropout=0.5):
"""SqueezeUNet is a implementation based in SqueezeNetv1.1 and unet for semantic segmentation
:param inputs: input layer.
:param num_classes: number of classes.
:param deconv_ksize: (width and height) or integer of the 2D deconvolution window.
:param dropout: dropout rate
:param activation: type of activation at the top layer.
:returns: SqueezeUNet model
"""
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
#channel_axis=-1
metrics = dice_coef
include_label_wise_dice_coefficients = True;
inputs = Input((patch_size, patch_size, 1))
x01 = Conv2D(64, (3, 3), strides=(2, 2), padding='same', activation='relu', name='conv1')(inputs)
x02 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool1', padding='same')(x01)
x03 = fire_module(x02, fire_id=2, squeeze=16, expand=64)
x04 = fire_module(x03, fire_id=3, squeeze=16, expand=64)
x05 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool3', padding="same")(x04)
x06 = fire_module(x05, fire_id=4, squeeze=32, expand=128)
x07 = fire_module(x06, fire_id=5, squeeze=32, expand=128)
x08 = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool5', padding="same")(x07)
x09 = fire_module(x08, fire_id=6, squeeze=48, expand=192)
x10 = fire_module(x09, fire_id=7, squeeze=48, expand=192)
x11 = fire_module(x10, fire_id=8, squeeze=64, expand=256)
x12 = fire_module(x11, fire_id=9, squeeze=64, expand=256)
if dropout != 0.0:
x12 = Dropout(dropout)(x12)
up1 = concatenate([
Conv2DTranspose(192, deconv_ksize, strides=(1, 1), padding='same')(x12),
x10,
], axis=channel_axis)
up1 = fire_module(up1, fire_id=10, squeeze=48, expand=192)
up2 = concatenate([
Conv2DTranspose(128, deconv_ksize, strides=(1, 1), padding='same')(up1),
x08,
], axis=channel_axis)
up2 = fire_module(up2, fire_id=11, squeeze=32, expand=128)
up3 = concatenate([
Conv2DTranspose(64, deconv_ksize, strides=(2, 2), padding='same')(up2),
x05,
], axis=channel_axis)
up3 = fire_module(up3, fire_id=12, squeeze=16, expand=64)
up4 = concatenate([
Conv2DTranspose(32, deconv_ksize, strides=(2, 2), padding='same')(up3),
x02,
], axis=channel_axis)
up4 = fire_module(up4, fire_id=13, squeeze=16, expand=32)
up4 = UpSampling2D(size=(2, 2))(up4)
x = concatenate([up4, x01], axis=channel_axis)
x = Conv2D(64, (3, 3), strides=(1, 1), padding='same', activation='relu')(x)
x = UpSampling2D(size=(2, 2))(x)
x = Conv2D(num_classes, (1, 1), activation='softmax')(x)
model = Model(inputs=[inputs], outputs=[x])
if not isinstance(metrics, list):
metrics = [metrics]
if include_label_wise_dice_coefficients and num_classes > 1:
label_wise_dice_metrics = [get_label_dice_coefficient_function(index) for index in range(num_classes)]
if metrics:
metrics = metrics + label_wise_dice_metrics
else:
metrics = label_wise_dice_metrics
model.compile(optimizer=Adam(lr=1e-4), loss='categorical_crossentropy', metrics=metrics)
return model
這是一個denseunet:
def DenseBlock(channels,inputs):
conv1_1 = Conv2D(channels, (1, 1),activation=None, padding='same')(inputs)
conv1_1=BatchActivate(conv1_1)
conv1_2 = Conv2D(channels//4, (3, 3), activation=None, padding='same')(conv1_1)
conv1_2 = BatchActivate(conv1_2)
conv2=concatenate([inputs,conv1_2])
conv2_1 = Conv2D(channels, (1, 1), activation=None, padding='same')(conv2)
conv2_1 = BatchActivate(conv2_1)
conv2_2 = Conv2D(channels // 4, (3, 3), activation=None, padding='same')(conv2_1)
conv2_2 = BatchActivate(conv2_2)
conv3 = concatenate([inputs, conv1_2,conv2_2])
conv3_1 = Conv2D(channels, (1, 1), activation=None, padding='same')(conv3)
conv3_1 = BatchActivate(conv3_1)
conv3_2 = Conv2D(channels // 4, (3, 3), activation=None, padding='same')(conv3_1)
conv3_2 = BatchActivate(conv3_2)
conv4 = concatenate([inputs, conv1_2, conv2_2,conv3_2])
conv4_1 = Conv2D(channels, (1, 1), activation=None, padding='same')(conv4)
conv4_1 = BatchActivate(conv4_1)
conv4_2 = Conv2D(channels // 4, (3, 3), activation=None, padding='same')(conv4_1)
conv4_2 = BatchActivate(conv4_2)
result=concatenate([inputs,conv1_2, conv2_2,conv3_2,conv4_2])
return result
def BatchActivate(x):
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
def DenseUNet(input_size=(320, 320, 1), start_neurons=64, keep_prob=0.8,block_size=7,lr=1e-4):
inputs = Input(input_size)
conv1 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(inputs)
conv1 = BatchActivate(conv1)
conv1 = DenseBlock(start_neurons * 1, conv1)
pool1 = MaxPooling2D((2, 2))(conv1)
conv2 = DenseBlock(start_neurons * 2, pool1)
pool2 = MaxPooling2D((2, 2))(conv2)
conv3 = DenseBlock(start_neurons * 4, pool2)
pool3 = MaxPooling2D((2, 2))(conv3)
convm = DenseBlock(start_neurons * 8, pool3)
deconv3 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="same")(convm)
uconv3 = concatenate([deconv3, conv3])
uconv3 = Conv2D(start_neurons * 4, (1, 1), activation=None, padding="same")(uconv3)
uconv3 = BatchActivate(uconv3)
uconv3 = DenseBlock(start_neurons * 4, uconv3)
deconv2 = Conv2DTranspose(start_neurons * 2, (3, 3), strides=(2, 2), padding="same")(uconv3)
uconv2 = concatenate([deconv2, conv2])
uconv2 = Conv2D(start_neurons * 2, (1, 1), activation=None, padding="same")(uconv2)
uconv2 = BatchActivate(uconv2)
uconv2 = DenseBlock(start_neurons * 2, uconv2)
deconv1 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="same")(uconv2)
uconv1 = concatenate([deconv1, conv1])
uconv1 = Conv2D(start_neurons * 1, (1, 1), activation=None, padding="same")(uconv1)
uconv1 = BatchActivate(uconv1)
uconv1 = DenseBlock(start_neurons * 1, uconv1)
output_layer_noActi = Conv2D(3, (1, 1), padding="same", activation=None)(uconv1)
output_layer = Activation('softmax')(output_layer_noActi)
model = Model(input=inputs, output=output_layer)
# print(model.output_shape)
model.compile(optimizer = Nadam(lr = 1e-4), loss=dices,metrics=['accuracy'])
# model.compile(optimizer=Adam(lr=lr), loss='binary_crossentropy', metrics=['accuracy'])
return model