喜欢就关注我们吧!
项目背景
网络结构介绍
数据处理及增强
def create_loss(predict, label, num_classes=2):
predict = fluid.layers.transpose(predict, perm=[0, 2, 3, 1])
predict = fluid.layers.reshape(predict, shape=[-1, num_classes])
predict = fluid.layers.softmax(predict)
label = fluid.layers.reshape(label, shape=[-1, 1])
label = fluid.layers.cast(label, "int64")
dice_loss = fluid.layers.dice_loss(predict, label) # 计算dice loss
ce_loss = fluid.layers.cross_entropy(predict, label) # 计算交叉熵
return fluid.layers.reduce_mean(ce_loss + dice_loss) # 最后使用的loss是dice和交叉熵的和
模型训练
with fluid.program_guard(train_program, train_init):
# 定义网络输入
image = fluid.layers.data(name="image", shape=[3, 512, 512], dtype="float32")
label = fluid.layers.data(name="label", shape=[1, 512, 512], dtype="int32")
# 定义给网络训练提供数据的loader
train_loader = fluid.io.DataLoader.from_generator(
feed_list=[image, label],
capacity=cfg.TRAIN.BATCH_SIZE * 2,
)
# 创建网络
prediction = create_model(image, 2)
# 定义 Loss
avg_loss = loss.create_loss(prediction, label, 2)
# 定义正则项
decay = paddle.fluid.regularizer.L2Decay(cfg.TRAIN.REG_COEFF)
# 选择优化器
if cfg.TRAIN.OPTIMIZER == "adam":
optimizer = fluid.optimizer.AdamOptimizer(learning_rate=0.003, regularization=decay)
optimizer.minimize(avg_loss)
def data_reader(part_start=0, part_end=8):
data_names = os.listdir(preprocess_path)
data_part=data_names[len(data_names) * part_start // 10: len(data_names) * part_end // 10] # 取所有数据中80%做训练数据
random.shuffle(data_part) # 打乱输入顺序
def reader():
for data_name in data_part:
data=np.load(os.path.join(preprocess_path, data_name) )
vol=data[0:3, :, :]
lab=data[3, :, :]
yield (vol, lab)
return reader
def aug_mapper(data):
vol = data[0]
lab = data[1]
vol, lab = aug.flip(vol, lab, cfg.AUG.FLIP.RATIO)
vol, lab = aug.rotate(vol, lab, cfg.AUG.ROTATE.RANGE, cfg.AUG.ROTATE.RATIO, 0)
vol, lab = aug.zoom(vol, lab, cfg.AUG.ZOOM.RANGE, cfg.AUG.ZOOM.RATIO)
vol, lab = aug.crop(vol, lab, cfg.AUG.CROP.SIZE, 0)
return vol, lab
train_reader = fluid.io.xmap_readers(aug_mapper, data_reader(0, 8), 8, cfg.TRAIN.BATCH_SIZE * 2)
train_loader.set_sample_generator(train_reader, batch_size=cfg.TRAIN.BATCH_SIZE, places=places)
step = 0
for pass_id in range(cfg.TRAIN.EPOCHS):
for train_data in train_loader():
step += 1
avg_loss_value = exe.run(compiled_train_program, feed=train_data, fetch_list=[avg_loss])
print(step, avg_loss_value)
推理预测
segmentation = np.zeros(scan.shape)
with fluid.scope_guard(inference_scope):
# 读取预训练权重
[inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(infer_param_path, infer_exe)
for slice_ind in tqdm(range(1, scan.shape[2]-1)):
# 2.5D的输入,每次取出CT中3个相邻的层作为模型输入
scan_slice = scan[:, :, slice_ind - 1: slice_ind + 2]
# 添加batch_size维度
scan_slice = scan_slice[np.newaxis, :, :, :]
# 模型的输入是 CWH 的, 通道在第一个维度,因此需要将数组中的第一和第三个维度互换
scan_slice = scan_slice.swapaxes(1,3)
result = infer_exe.run(inference_program, feed={feed_target_names[0]: scan_slice }, fetch_list=fetch_targets)
result = result[0][0][1].reshape([scan.shape[0], scan.shape[1]])
# 保存分割结果
segmentation[:, :, slice_ind] = result.swapaxes(0,1)
# 预测概率超过 0.5 的部分认为是前景,否则认为是背景
segmentation[segmentation >= 0.5] = 1
segmentation[segmentation < 0.5 ] = 0
觉得不错,请点个在看呀
本文始发于微信公众号(OSC开源社区):基于飞桨Res-Unet网络实现肝脏肿瘤分割任务
免责声明:文章中涉及的程序(方法)可能带有攻击性,仅供安全研究与教学之用,读者将其信息做其他用途,由读者承担全部法律及连带责任,本站不承担任何法律及连带责任;如有问题可邮件联系(建议使用企业邮箱或有效邮箱,避免邮件被拦截,联系方式见首页),望知悉。
- 左青龙
- 微信扫一扫
-
- 右白虎
- 微信扫一扫
-
评论