如何在tensorflow2.0中获得keras创建的resnet的中间层输出,我想使用resnet作为图像特征提取器



residual_block.py

import tensorflow as tf

class BasicBlock(tf.keras.layers.Layer):
def __init__(self, filter_num, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = tf.keras.layers.Conv2D(filters=filter_num,
kernel_size=(3, 3),
strides=stride,
padding="same")
self.bn1 = tf.keras.layers.BatchNormalization()
self.conv2 = tf.keras.layers.Conv2D(filters=filter_num,
kernel_size=(3, 3),
strides=1,
padding="same")
self.bn2 = tf.keras.layers.BatchNormalization()
if stride != 1:
self.downsample = tf.keras.Sequential()
self.downsample.add(tf.keras.layers.Conv2D(filters=filter_num,
kernel_size=(1, 1),
strides=stride))
self.downsample.add(tf.keras.layers.BatchNormalization())
else:
self.downsample = lambda x: x
def call(self, inputs, training=None, **kwargs):
residual = self.downsample(inputs)
x = self.conv1(inputs)
x = self.bn1(x, training=training)
x = tf.nn.relu(x)
x = self.conv2(x)
x = self.bn2(x, training=training)
output = tf.nn.relu(tf.keras.layers.add([residual, x]))
return output

class BottleNeck(tf.keras.layers.Layer):
def __init__(self, filter_num, stride=1):
super(BottleNeck, self).__init__()
self.conv1 = tf.keras.layers.Conv2D(filters=filter_num,
kernel_size=(1, 1),
strides=1,
padding='same')
self.bn1 = tf.keras.layers.BatchNormalization()
self.conv2 = tf.keras.layers.Conv2D(filters=filter_num,
kernel_size=(3, 3),
strides=stride,
padding='same')
self.bn2 = tf.keras.layers.BatchNormalization()
self.conv3 = tf.keras.layers.Conv2D(filters=filter_num * 4,
kernel_size=(1, 1),
strides=1,
padding='same')
self.bn3 = tf.keras.layers.BatchNormalization()
self.downsample = tf.keras.Sequential()
self.downsample.add(tf.keras.layers.Conv2D(filters=filter_num * 4,
kernel_size=(1, 1),
strides=stride))
self.downsample.add(tf.keras.layers.BatchNormalization())
def call(self, inputs, training=None, **kwargs):
residual = self.downsample(inputs)
x = self.conv1(inputs)
x = self.bn1(x, training=training)
x = tf.nn.relu(x)
x = self.conv2(x)
x = self.bn2(x, training=training)
x = tf.nn.relu(x)
x = self.conv3(x)
x = self.bn3(x, training=training)
output = tf.nn.relu(tf.keras.layers.add([residual, x]))
return output

def make_basic_block_layer(filter_num, blocks, stride=1):
res_block = tf.keras.Sequential()
res_block.add(BasicBlock(filter_num, stride=stride))
for _ in range(1, blocks):
res_block.add(BasicBlock(filter_num, stride=1))
return res_block

def make_bottleneck_layer(filter_num, blocks, stride=1):
res_block = tf.keras.Sequential()
res_block.add(BottleNeck(filter_num, stride=stride))
for _ in range(1, blocks):
res_block.add(BottleNeck(filter_num, stride=1))
return res_block

resnet.py

import tensorflow as tf
from config import NUM_CLASSES
from models.residual_block import make_basic_block_layer, make_bottleneck_layer

class ResNetTypeI(tf.keras.Model):
def __init__(self, layer_params):
super(ResNetTypeI, self).__init__()
self.conv1 = tf.keras.layers.Conv2D(filters=64,
#kernel_size=(7, 7),
kernel_size=(3, 3),
# strides=2,
strides=1,
padding="same")
self.bn1 = tf.keras.layers.BatchNormalization()
# self.pool1 = tf.keras.layers.MaxPool2D(pool_size=(3, 3),
#                                        strides=2,
#                                        padding="same")
self.pool1 = tf.keras.layers.MaxPool2D(pool_size=(2, 2),
strides=1,
padding="same")
self.layer1 = make_basic_block_layer(filter_num=64,
blocks=layer_params[0])
self.layer2 = make_basic_block_layer(filter_num=128,
blocks=layer_params[1],
stride=2)
self.layer3 = make_basic_block_layer(filter_num=256,
blocks=layer_params[2],
stride=2)
self.layer4 = make_basic_block_layer(filter_num=512,
blocks=layer_params[3],
stride=2)
self.avgpool = tf.keras.layers.GlobalAveragePooling2D()
self.fc = tf.keras.layers.Dense(units=NUM_CLASSES, activation=tf.keras.activations.softmax)
def call(self, inputs, training=None, mask=None):
x = self.conv1(inputs)
x = self.bn1(x, training=training)
x = tf.nn.relu(x)
x = self.pool1(x)
x = self.layer1(x, training=training)
x = self.layer2(x, training=training)
x = self.layer3(x, training=training)
x = self.layer4(x, training=training)
x = self.avgpool(x)
output = self.fc(x)

return output

class ResNetTypeII(tf.keras.Model):
def __init__(self, layer_params):
super(ResNetTypeII, self).__init__()
self.conv1 = tf.keras.layers.Conv2D(filters=64,
kernel_size=(7, 7),
strides=2,
padding="same")
self.bn1 = tf.keras.layers.BatchNormalization()
self.pool1 = tf.keras.layers.MaxPool2D(pool_size=(3, 3),
strides=2,
padding="same")
self.layer1 = make_bottleneck_layer(filter_num=64,
blocks=layer_params[0])
self.layer2 = make_bottleneck_layer(filter_num=128,
blocks=layer_params[1],
stride=2)
self.layer3 = make_bottleneck_layer(filter_num=256,
blocks=layer_params[2],
stride=2)
self.layer4 = make_bottleneck_layer(filter_num=512,
blocks=layer_params[3],
stride=2)
self.avgpool = tf.keras.layers.GlobalAveragePooling2D()
self.fc = tf.keras.layers.Dense(units=NUM_CLASSES, activation=tf.keras.activations.softmax)
def call(self, inputs, training=None, mask=None):
x = self.conv1(inputs)
x = self.bn1(x, training=training)
x = tf.nn.relu(x)
x = self.pool1(x)
x = self.layer1(x, training=training)
x = self.layer2(x, training=training)
x = self.layer3(x, training=training)
x = self.layer4(x, training=training)
x = self.avgpool(x)
output = self.fc(x)
return output

def resnet_18():
return ResNetTypeI(layer_params=[2, 2, 2, 2])

def resnet_34():
return ResNetTypeI(layer_params=[3, 4, 6, 3])

def resnet_50():
return ResNetTypeII(layer_params=[3, 4, 6, 3])

def resnet_101():
return ResNetTypeII(layer_params=[3, 4, 23, 3])

def resnet_152():
return ResNetTypeII(layer_params=[3, 8, 36, 3])

resnet模型没有问题,我可以用它来训练和预测,但我想得到resnet的中间层输出来创建图像特征提取器,它不起作用。

主.py

from ResNet.models import resnet
import tensorflow as tf
import config
res34 = resnet.resnet_34()
res34.build(input_shape=(None, config.image_height, config.image_width, config.channels))
res34.load_weights('./ResNet/saved_model/model')
res34.summary()
sub_model = tf.keras.Model(inputs=res34.input, outputs=res34.layers[-2].output)
sub_model.summary()

追溯信息:

追踪(最近一次通话(:文件";D:/Desktop/statsInfoRMHC/subModel.py";,第26行,insub_model=tf.keras.model(输入=res34.input,输出=res34.layers[-2].output(文件";C: \Users\longj\Anaconda3\envs\tf2\lib\site-packages\tensorflow_core\python\keras\engine\base_layer.py",第1576行,输出raise AttributeError("层"+self.name+"没有入站节点。"(AttributeError:层global_average_pooling2d没有入站节点。

一种方法是…

使用新的def扩展ResNetTypeI

在你的类ResNetTypeI 中创建一个新的def

def features(self,inputs):
x = self.conv1(inputs)
x = self.bn1(x, training=training)
x = tf.nn.relu(x)
x = self.pool1(x)
x = self.layer1(x, training=training)
x = self.layer2(x, training=training)
x = self.layer3(x, training=training)
x = self.layer4(x, training=training)
return self.avgpool(x)

此函数将允许您提取子图。现在,在程序的后面使用它而不是sub_model = tf.keras.Model(inputs=res34.input, outputs=res34.layers[-2].output),所以代码的最后一行现在将是

res34.summary()
sub_model_input = tf.layers.Input( ... )
sub_model_output = res34.features(sub_model_input)
sub_model = tf.keras.Model(sub_model_input,sub_model_output)
sub_model.summary()

这完全是黑客行为,这就是为什么我建议

减少ResNetTypeI以仅返回功能

与其做上述所有操作,不如删除resnet模型的最后一层,这样它就只返回特性"什么"你说;我的分类如何&";。没问题,看看这个方便的包装

def addClassificationToModel(feature_model):
classification_output = tf.keras.layers.Dense(units=NUM_CLASSES, activation=tf.keras.activations.softmax)(feature_model.output)
return tf.keras.Model(feature_model.input,classification_output)

其将任何特征模型转变为分类模型。训练分类模型时,它还训练基础特征模型。这真的很好,而且可以重复使用。比上面的和/或试图将特征模型从分类模型中拉出要光滑得多。

最新更新