|
- # 如果多类别存在嵌套关系,则删除被包含的类别,并将其颜色处理为被保留下来的类别所对应的颜色,同时过滤掉面积较小的区域。
- # 优化建筑物分割区域的边界,使其更加规整。
-
- from models.model_stages import BiSeNet
- from predict_city.heliushuju import Heliushuju
- from torch.utils.data import DataLoader
- import torch.nn.functional as F
- import pandas as pd
- import numpy as np
- from PIL import Image
- import time
- import os
- os.environ["OPENCV_IO_MAX_IMAGE_PIXELS"] = pow(2, 40).__str__()
- import cv2
- import argparse
- import torch
- import torchvision.transforms as transforms
- from osgeo import gdal, gdal_array, ogr, osr
- from rdp_alg import rdp
- from cal_dist_ang import cal_ang, cal_dist, azimuthAngle
- from rotate_ang import Nrotation_angle_get_coor_coordinates, Srotation_angle_get_coor_coordinates
- from line_intersection import line, intersection, par_line_dist, point_in_line
- import shutil
- os.environ['CUDA_VISIBLE_DEVICES'] = '0'
- os.environ['PROJ_LIB'] = r'/home/thsw/anaconda3/envs/zyy-torch1.10/lib/python3.8/site-packages/pyproj/proj_dir/share/proj'
-
-
- # 将某类别的分割结果中一contours和该contours中的坐标点放在一个列表中存储起来
- def classInfo(contours):
- content = []
- if len(contours) != 0:
- for i in range(len(contours)):
- COOR = []
- x = contours[i][:, :, 0]
- y = contours[i][:, :, 1]
- for j in range(len(x)):
- COOR.append((x[j][0], y[j][0]))
- content.append([COOR, contours[i]])
- return content
-
-
- # 就某一类别而言,返回其两种类型的contours
- def single(Content1, Content2, Content3, Content4, Content5, Content6, Content7):
- tempCnt2 = []
- tempCnt3 = []
- tempCnt4 = []
- tempCnt5 = []
- tempCnt6 = []
- tempCnt7 = []
-
- tempCOOR2 = []
- tempCOOR3 = []
- tempCOOR4 = []
- tempCOOR5 = []
- tempCOOR6 = []
- tempCOOR7 = []
-
- # 将分割结果中某一类别的contours存储在tempCnt*中,并将contours中的坐标存储在tempCOOR*中
- for i in range(len(Content2)):
- tempCnt2.append(Content2[i][1])
- tempCOOR2 = tempCOOR2 + Content2[i][0]
- for i in range(len(Content3)):
- tempCnt3.append(Content3[i][1])
- tempCOOR3 = tempCOOR3 + Content3[i][0]
- for i in range(len(Content4)):
- tempCnt4.append(Content4[i][1])
- tempCOOR4 = tempCOOR4 + Content4[i][0]
- for i in range(len(Content5)):
- tempCnt5.append(Content5[i][1])
- tempCOOR5 = tempCOOR5 + Content5[i][0]
- for i in range(len(Content6)):
- tempCnt6.append(Content6[i][1])
- tempCOOR6 = tempCOOR6 + Content6[i][0]
- for i in range(len(Content7)):
- tempCnt7.append(Content7[i][1])
- tempCOOR7 = tempCOOR7 + Content7[i][0]
-
- # 就tempCnt1而言,把其他类别对应的contours汇总存储到elseCnt中
- elseCnt = tempCnt2 + tempCnt3 + tempCnt4 + tempCnt5 + tempCnt6 + tempCnt7
- # 就tempCOOR1而言,把其他类别对应的tempCOOR*汇总存储到elseCOOR中
- elseCOOR = tempCOOR2 + tempCOOR3 + tempCOOR4 + tempCOOR5 + tempCOOR6 + tempCOOR7
-
- target = []
- questionCnt = []
- for i in range(len(Content1)):
- selfCOOR = Content1[i][0] # 自身坐标列表
- selfCnt = Content1[i][1] # 自身contours
- flag1 = False
- flag2 = False
-
- # 判断某类别的一个contours中的坐标点是否位于其他类别的contours中,只要有一个坐标点位于其他类别的contours中,则将该类别的这个contours和contours中的坐标点存储在questionCnt中
- for j in range(len(elseCnt)):
- for k in range(len(selfCOOR)):
- x = int(selfCOOR[k][0])
- y = int(selfCOOR[k][1])
- flag = cv2.pointPolygonTest(elseCnt[j], (x, y), False) # 自身的contours是否在其他contours中
- if flag >= 0:
- questionCnt.append([selfCOOR, selfCnt])
- flag1 = True
- break
- if flag1 == True:
- break
-
- # 判断其他类别的一个contours中的坐标点是否位于该类别的contours中,只要有一个坐标点位于该类别的contours中,则将该类别的这个contours和contours中的坐标点存储在questionCnt中
- if flag1 == False:
- for m in range(len(elseCOOR)):
- x = int(elseCOOR[m][0])
- y = int(elseCOOR[m][1])
- flag = cv2.pointPolygonTest(selfCnt, (x, y), False) # 其他contours是否在自身contours内部
- if flag >= 0:
- questionCnt.append([selfCOOR, selfCnt])
- flag2 = True
- break
- # 就某一类别而言,如果它的一个contours既没有被包含于其他类别的contours中,也没有包含其他类别的contours,则将该类别的这一contours存储在target列表中
- if flag2 == False:
- target.append(selfCnt)
- return target, questionCnt
-
-
- # 就某一类别而言,基于single()函数返回结果中的questionCnt,如果该类别的contours没有被包含于其他类别的contours中,则将其存储在externalCnt列表中,
- # 最后,若externalCnt列表的长度不为零,则基于mask和externalCnt,进行填充,以过滤掉questionCnt对应的contours中包含的其他类别的contours
- def findExternalCnt(questionCnt1, questionCnt2, questionCnt3, questionCnt4, questionCnt5, questionCnt6, questionCnt7,
- mask, value):
- externalCnt = []
- elseCnt = []
- if len(questionCnt1) != 0:
- elseQuestionCnt = questionCnt2 + questionCnt3 + questionCnt4 + questionCnt5 + questionCnt6 + questionCnt7
- for m in range(len(elseQuestionCnt)):
- elseCnt.append(elseQuestionCnt[m][1])
- for i in range(len(questionCnt1)):
- selfCOOR = questionCnt1[i][0] # 自身坐标列表
- selfCnt = questionCnt1[i][1] # 自身contours
- flag1 = False
- for j in range(len(elseCnt)):
- for k in range(len(selfCOOR)):
- x = int(selfCOOR[k][0])
- y = int(selfCOOR[k][1])
- flag = cv2.pointPolygonTest(elseCnt[j], (x, y), False) # 自身的contours是否在其他contours中
- if flag >= 0:
- flag1 = True
- break
- if flag1 == True:
- break
- if flag1 == False:
- externalCnt.append(selfCnt)
- if len(externalCnt) != 0:
- cv2.fillPoly(mask, externalCnt, color=value)
- return questionCnt1, questionCnt2, questionCnt3, questionCnt4, questionCnt5, questionCnt6, questionCnt7, mask
-
-
- # 就某一类别而言,基于single()函数返回结果中的target,过滤掉面积小于2000的contours,然后对剩余contours进行填充,并返回填充结果mask
- def filterArea(targetCnt, mask, value):
- # 过滤面积较小的区域
- targetNew = []
- for i in range(len(targetCnt)):
- cnt = targetCnt[i]
- cntArea = cv2.contourArea(cnt)
- if cntArea >= 2000:
- targetNew.append(cnt)
- cv2.fillPoly(mask, targetNew, color=value)
- return mask
-
-
- # 返回基于面积和包含关系过滤后的分割图像
- def judgeTermination(questionBuild, questionRoad, questionWater, questionFarmland, questionGrass, questionWoodland,
- questionBareSoil, mask):
-
- questionBuild, questionRoad, questionWater, questionFarmland, questionGrass, questionWoodland, questionBareSoil, mask = findExternalCnt(
- questionBuild, questionRoad, questionWater, questionFarmland, questionGrass, questionWoodland, questionBareSoil,
- mask, 1)
- questionRoad, questionBuild, questionWater, questionFarmland, questionGrass, questionWoodland, questionBareSoil, mask = findExternalCnt(
- questionRoad, questionBuild, questionWater, questionFarmland, questionGrass, questionWoodland, questionBareSoil,
- mask, 2)
- questionWater, questionBuild, questionRoad, questionFarmland, questionGrass, questionWoodland, questionBareSoil, mask = findExternalCnt(
- questionWater, questionBuild, questionRoad, questionFarmland, questionGrass, questionWoodland, questionBareSoil,
- mask, 3)
- questionFarmland, questionBuild, questionRoad, questionWater, questionGrass, questionWoodland, questionBareSoil, mask = findExternalCnt(
- questionFarmland, questionBuild, questionRoad, questionWater, questionGrass, questionWoodland, questionBareSoil,
- mask, 4)
- questionGrass, questionBuild, questionRoad, questionWater, questionFarmland, questionWoodland, questionBareSoil, mask = findExternalCnt(
- questionGrass, questionBuild, questionRoad, questionWater, questionFarmland, questionWoodland, questionBareSoil,
- mask, 5)
- questionWoodland, questionBuild, questionRoad, questionWater, questionFarmland, questionGrass, questionBareSoil, mask = findExternalCnt(
- questionWoodland, questionBuild, questionRoad, questionWater, questionFarmland, questionGrass, questionBareSoil,
- mask, 6)
- questionBareSoil, questionBuild, questionRoad, questionWater, questionFarmland, questionGrass, questionWoodland, mask = findExternalCnt(
- questionBareSoil, questionBuild, questionRoad, questionWater, questionFarmland, questionGrass, questionWoodland,
- mask, 7)
-
- return mask
-
-
- # 依据某一类别分割区域的面积大小及该类别的分割区域和其他类别的分割区域之间的包含关系,对原始分割结果进行优化,并返回优化后的分割结果
- def optimize(preds):
- h, w = preds.shape[0], preds.shape[1]
- mask = np.zeros((h, w), dtype="uint8")
-
- building = preds.copy()
- road = preds.copy()
- water = preds.copy()
- farmland = preds.copy()
- grass = preds.copy()
- woodland = preds.copy()
- bareSoil = preds.copy()
-
- building[building != 1] = 0 # 建筑物
- road[road != 2] = 0 # 道路
- water[water != 3] = 0 # 水体
- farmland[farmland != 4] = 0 # 耕地
- grass[grass != 5] = 0 # 草地
- woodland[woodland != 6] = 0 # 林地
- bareSoil[bareSoil != 7] = 0 # 裸土
-
- # 返回分割结果中单一类别的contours
- buildCnt, hierarchy = cv2.findContours(np.uint8(building), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
- roadCnt, hierarchy = cv2.findContours(np.uint8(road), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
- waterCnt, hierarchy = cv2.findContours(np.uint8(water), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
- farmlandCnt, hierarchy = cv2.findContours(np.uint8(farmland), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
- grassCnt, hierarchy = cv2.findContours(np.uint8(grass), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
- woodlandCnt, hierarchy = cv2.findContours(np.uint8(woodland), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
- bareSoilCnt, hierarchy = cv2.findContours(np.uint8(bareSoil), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
-
- # 将某类别的分割结果中一contours和该contours中的坐标点放在一个列表中存储起来
- buildContent = classInfo(buildCnt) # [ [[(x1, y1), (x2, y2)], cnt], ... ]
- roadContent = classInfo(roadCnt)
- waterContent = classInfo(waterCnt)
- farmlandContent = classInfo(farmlandCnt)
- grassContent = classInfo(grassCnt)
- woodlandContent = classInfo(woodlandCnt)
- bareSoilContent = classInfo(bareSoilCnt)
-
- # 就某一类别而言,返回其两种类型的contours
- targetBuild, questionBuild = single(buildContent, roadContent, waterContent, farmlandContent, grassContent,
- woodlandContent, bareSoilContent)
- targetRoad, questionRoad = single(roadContent, buildContent, waterContent, farmlandContent, grassContent,
- woodlandContent, bareSoilContent)
- targetWater, questionWater = single(waterContent, buildContent, roadContent, farmlandContent, grassContent,
- woodlandContent, bareSoilContent)
- targetFarmland, questionFarmland = single(farmlandContent, buildContent, roadContent, waterContent, grassContent,
- woodlandContent, bareSoilContent)
- targetGrass, questionGrass = single(grassContent, buildContent, roadContent, waterContent, farmlandContent,
- woodlandContent, bareSoilContent)
- targetWoodland, questionWoodland = single(woodlandContent, buildContent, roadContent, waterContent, farmlandContent,
- grassContent, bareSoilContent)
- targetBareSoil, questionBareSoil = single(bareSoilContent, buildContent, roadContent, waterContent, farmlandContent,
- grassContent, woodlandContent)
-
- if len(targetBuild) != 0:
- # 就某一类别而言,基于single()函数返回结果中的target,过滤掉面积小于2000的contours,然后对剩余contours进行填充,并返回填充结果mask
- mask = filterArea(targetBuild, mask, 1)
- if len(targetRoad) != 0:
- mask = filterArea(targetRoad, mask, 2)
- if len(targetWater) != 0:
- mask = filterArea(targetWater, mask, 3)
- if len(targetFarmland) != 0:
- mask = filterArea(targetFarmland, mask, 4)
- if len(targetGrass) != 0:
- mask = filterArea(targetGrass, mask, 5)
- if len(targetWoodland) != 0:
- mask = filterArea(targetWoodland, mask, 6)
- if len(targetBareSoil) != 0:
- mask = filterArea(targetBareSoil, mask, 7)
-
- # 返回基于面积和包含关系过滤后的分割图像
- preds = judgeTermination(questionBuild, questionRoad, questionWater, questionFarmland, questionGrass,
- questionWoodland, questionBareSoil, mask)
- return preds
-
-
- # 对原始图像的宽进行讨论,判定是否需要填充像素,最终,返回x方向上可切分的图像个数及拼接后的图像
- def wideDirection(W, H, terrainClass, img):
- w_num = 0
- if W < terrainClass['w']:
- x_sup = terrainClass['w'] - W
- left = np.zeros((H, x_sup, 3), dtype='uint8')
- img = np.concatenate((img, left), axis=1)
- w_num = 1
- elif W == terrainClass['w']:
- w_num = 1
- else:
- for j in range(W):
- x_pixel = terrainClass['w'] * j - terrainClass['overlapX'] * (j - 1)
- if x_pixel - W == 0:
- w_num = j
- break
- elif x_pixel - W > 0:
- x_sup = x_pixel - W
- left = np.zeros((H, x_sup, 3), dtype='uint8')
- img = np.concatenate((img, left), axis=1)
- w_num = j
- break
- return w_num, img
-
-
- # 判定是否需要对原始图像进行填充,填充方向为高方向和宽方向,若需要填充,则对原始图像进行填充。最后返回填充后的图像及填充后的图像在高方向和宽方向上分别可被切分的个数。
- def img_sup(img, terrainClass):
- H, W = img.shape[0], img.shape[1]
- h_num = 0
- if H < terrainClass['h']:
- y_sup = terrainClass['h'] - H
- up = np.zeros((y_sup, W, 3), dtype='uint8')
- img = np.concatenate((img, up), axis=0)
- H = H + y_sup
- h_num = 1
- w_num, img = wideDirection(W, H, terrainClass, img)
- elif H == terrainClass['h']:
- h_num = 1
- w_num, img = wideDirection(W, H, terrainClass, img)
- else:
- for j in range(H):
- y_pixel = terrainClass['h'] * j - terrainClass['overlapY'] * (j - 1)
- if y_pixel - H == 0:
- h_num = j
- break
- elif y_pixel - H > 0:
- y_sup = y_pixel - H # 1264
- up = np.zeros((y_sup, W, 3), dtype='uint8')
- img = np.concatenate((img, up), axis=0)
- H = H + y_sup
- h_num = j
- break
- w_num, img = wideDirection(W, H, terrainClass, img)
- # print("line325", y_sup, x_sup, w_num, img.shape)
- return img, h_num, w_num
-
-
- # 利用STDC网络模型对切分的图像进行预测,返回预测结果
- def predict(img, self, size, net, label_info, terrainClass):
- img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
- img = self.to_tensor(img)
-
- img = img.cuda()
- img = torch.unsqueeze(img, dim=0)
- img = F.interpolate(img, size, mode='bilinear', align_corners=True)
- logits = net(img)[0]
- logits = F.interpolate(logits, size=size, mode='bilinear', align_corners=True)
- probs = torch.softmax(logits, dim=1)
- preds = torch.argmax(probs, dim=1)
- preds_squeeze = preds.squeeze(0)
- preds = preds_squeeze.cpu().numpy()
-
- # 调用优化函数optimize(),该优化函数的作用是:如果多类别间存在嵌套关系,则删除被包含的类别,并将其颜色处理为被保留下来的类别所对应的颜色,同时过滤掉面积较小的区域。
- preds = optimize(preds)
- preds = colour_code_segmentation(preds, label_info) # yuanshi
- preds = cv2.cvtColor(np.uint8(preds), cv2.COLOR_RGB2BGR)
- img = cv2.resize(preds.astype("uint8"), (terrainClass['h'], terrainClass['w']))
-
- return img
-
-
- # 裁剪形式1
- def Cropping1(img, terrainClass, i):
- img1 = img[0:terrainClass['h'], (i * terrainClass['w'] - i * terrainClass['overlapX']):(
- (i + 1) * terrainClass['w'] - i * terrainClass['overlapX'] - 1), :]
- return img1
-
-
- # 裁剪形式2
- def Cropping2(img, terrainClass, i):
- img1 = img[(i * terrainClass['h'] - i * terrainClass['overlapY']):(
- (i + 1) * terrainClass['h'] - i * terrainClass['overlapY'] - 1), 0:terrainClass['w'], :]
- return img1
-
-
- # 裁剪形式3
- def Cropping3(img, terrainClass, j):
- img1 = img[(j * terrainClass['h'] - j * terrainClass['overlapY']):(
- (j + 1) * terrainClass['h'] - j * terrainClass['overlapY'] - 1), 0:terrainClass['w'], :]
- return img1
-
-
- # 裁剪形式4
- def Cropping4(img, terrainClass, i, j):
- img1 = img[(j * terrainClass['h'] - j * terrainClass['overlapY']):(
- (j + 1) * terrainClass['h'] - j * terrainClass['overlapY'] - 1),
- (i * terrainClass['w'] - i * terrainClass['overlapX']):(
- (i + 1) * terrainClass['w'] - i * terrainClass['overlapX'] - 1), :]
- return img1
-
-
- # 切分图像的横向拼接
- def transverseConcatenate(savePredict):
- img1 = savePredict[0]
- for j in range(1, len(savePredict)):
- img1 = np.concatenate((img1, savePredict[j]), axis=1)
- return img1
-
-
- # 切分图像的纵向拼接
- def longitudinalConcatenate(concatenateList, img):
- if len(concatenateList) == 0:
- concatenateList.append(img)
- else:
- concatenateList = [np.concatenate((concatenateList[0], img), axis=0)]
- return concatenateList
-
-
- # 分四种情况讨论拼接过程
- def concatenateImage(h_num, w_num, self, size, img, net, label_info, terrainClass, h, w, save_path, tifFile, x1, x2, x3,
- x4, y1, y2, y3, y4):
- # 情形1,即:H <= terrain['h'], W <= terrain['w']
- if h_num == 1 and w_num == 1:
- img1 = img
- img1 = predict(img1, self, size, net, label_info, terrainClass)
- finalResult = img1[0:h, 0:w]
- finalResult = cv2.cvtColor(finalResult, cv2.COLOR_BGR2GRAY)
-
- # 情形2,即:H <= terrain['h'], W > terrain['w']
- elif h_num == 1 and w_num > 1:
- savePredict = []
- for i in range(w_num):
- if i == 0:
- img1 = img[0:terrainClass['h'], 0:terrainClass['w'], :]
- img1 = predict(img1, self, size, net, label_info, terrainClass)
- if terrainClass['overlapX'] % 2 == 1:
- img1 = img1[0:terrainClass['h'], 0:x3 + 1]
- else:
- img1 = img1[0:terrainClass['h'], 0:x4 + 1]
- savePredict.append(img1)
- elif i > 0 and i < w_num - 1:
- img1 = Cropping1(img, terrainClass, i)
- img1 = predict(img1, self, size, net, label_info, terrainClass)
- if terrainClass['overlapX'] % 2 == 1:
- img1 = img1[0:terrainClass['h'], x2:x3 + 1]
- else:
- img1 = img1[0:terrainClass['h'], x1:x4 + 1]
- savePredict.append(img1)
- else:
- img1 = Cropping1(img, terrainClass, i)
- img1 = predict(img1, self, size, net, label_info, terrainClass)
- if terrainClass['overlapX'] % 2 == 1:
- img1 = img1[0:terrainClass['h'], x2:]
- else:
- img1 = img1[0:terrainClass['h'], x1:]
- savePredict.append(img1)
- img1 = transverseConcatenate(savePredict)
- finalResult = img1[0:h, 0:w]
- finalResult = cv2.cvtColor(finalResult, cv2.COLOR_BGR2GRAY)
-
- # 情形3,即:H > terrain['h'], W <= terrain['w']
- elif h_num > 1 and w_num == 1:
- for i in range(h_num):
- if i == 0:
- img1 = img[0:terrainClass['h'], 0:terrainClass['w'], :]
- img1 = predict(img1, self, size, net, label_info, terrainClass)
- if terrainClass['overlapY'] % 2 == 1:
- img1 = img1[0:y3 + 1, 0:terrainClass['w']]
- else:
- img1 = img1[0:y4 + 1, 0:terrainClass['w']]
- concatenateList = [img1] # 存储拼接图象的中间结果
- elif i > 0 and i < h_num - 1:
- img1 = Cropping2(img, terrainClass, i)
- img1 = predict(img1, self, size, net, label_info, terrainClass)
- if terrainClass['overlapY'] % 2 == 1:
- img1 = img1[y2:y3 + 1, 0:terrainClass['w']]
- else:
- img1 = img1[y1:y4 + 1, 0:terrainClass['w']]
- concatenateList = [np.concatenate((concatenateList[0], img1), axis=0)]
- else:
- img1 = Cropping2(img, terrainClass, i)
- img1 = predict(img1, self, size, net, label_info, terrainClass)
- if terrainClass['overlapY'] % 2 == 1:
- img1 = img1[y2:, 0:terrainClass['w']]
- else:
- img1 = img1[y1:, 0:terrainClass['w']]
- concatenateList = [np.concatenate((concatenateList[0], img1), axis=0)]
- finalResult = concatenateList[0][0:h, 0:w]
- finalResult = cv2.cvtColor(finalResult, cv2.COLOR_BGR2GRAY)
-
- # 情形4,即:H > terrain['h'], W > terrain['w']
- elif h_num > 1 and w_num > 1:
- concatenateList = [] # 存储拼接图象的中间结果
- for j in range(h_num):
- if j == 0:
- savePredict = []
- for i in range(w_num):
- if i == 0:
- img1 = img[0:terrainClass['h'], 0:terrainClass['w'], :]
- img1 = predict(img1, self, size, net, label_info, terrainClass)
- if terrainClass['overlapX'] % 2 == 1 and terrainClass['overlapY'] % 2 == 1:
- savePredict.append(img1[0:y3 + 1, 0:x3 + 1])
- elif terrainClass['overlapX'] % 2 == 1 and terrainClass['overlapY'] % 2 == 0:
- savePredict.append(img1[0:y4 + 1, 0:x3 + 1])
- elif terrainClass['overlapX'] % 2 == 0 and terrainClass['overlapY'] % 2 == 0:
- savePredict.append(img1[0:y4 + 1, 0:x4 + 1])
- elif terrainClass['overlapX'] % 2 == 0 and terrainClass['overlapY'] % 2 == 1:
- savePredict.append(img1[0:y3 + 1, 0:x4 + 1])
- elif i > 0 and i < w_num - 1:
- img1 = Cropping1(img, terrainClass, i)
- img1 = predict(img1, self, size, net, label_info, terrainClass)
- if terrainClass['overlapX'] % 2 == 1 and terrainClass['overlapY'] % 2 == 1:
- savePredict.append(img1[0:y3 + 1, x2:x3 + 1])
- elif terrainClass['overlapX'] % 2 == 1 and terrainClass['overlapY'] % 2 == 0:
- savePredict.append(img1[0:y4 + 1, x2:x3 + 1])
- elif terrainClass['overlapX'] % 2 == 0 and terrainClass['overlapY'] % 2 == 0:
- savePredict.append(img1[0:y4 + 1, x1:x4 + 1])
- elif terrainClass['overlapX'] % 2 == 0 and terrainClass['overlapY'] % 2 == 1:
- savePredict.append(img1[0:y3 + 1, x1:x4 + 1])
- else:
- img1 = Cropping1(img, terrainClass, i)
- img1 = predict(img1, self, size, net, label_info, terrainClass)
- if terrainClass['overlapX'] % 2 == 1 and terrainClass['overlapY'] % 2 == 1:
- savePredict.append(img1[0:y3 + 1, x2:])
- elif terrainClass['overlapX'] % 2 == 1 and terrainClass['overlapY'] % 2 == 0:
- savePredict.append(img1[0:y4 + 1, x2:])
- elif terrainClass['overlapX'] % 2 == 0 and terrainClass['overlapY'] % 2 == 0:
- savePredict.append(img1[0:y4 + 1, x1:])
- elif terrainClass['overlapX'] % 2 == 0 and terrainClass['overlapY'] % 2 == 1:
- savePredict.append(img1[0:y3 + 1, x1:])
- img1 = transverseConcatenate(savePredict)
- concatenateList = longitudinalConcatenate(concatenateList, img1)
- elif j > 0 and j < h_num - 1:
- savePredict = []
- for i in range(w_num):
- if i == 0:
- img1 = Cropping3(img, terrainClass, j)
- img1 = predict(img1, self, size, net, label_info, terrainClass)
- if terrainClass['overlapX'] % 2 == 1 and terrainClass['overlapY'] % 2 == 1:
- savePredict.append(img1[y2:y3 + 1, 0:x3 + 1])
- elif terrainClass['overlapX'] % 2 == 1 and terrainClass['overlapY'] % 2 == 0:
- savePredict.append(img1[y1:y4 + 1, 0:x3 + 1])
- elif terrainClass['overlapX'] % 2 == 0 and terrainClass['overlapY'] % 2 == 0:
- savePredict.append(img1[y1:y4 + 1, 0:x4 + 1])
- elif terrainClass['overlapX'] % 2 == 0 and terrainClass['overlapY'] % 2 == 1:
- savePredict.append(img1[y2:y3 + 1, 0:x4 + 1])
- elif i > 0 and i < w_num - 1:
- img1 = Cropping4(img, terrainClass, i, j)
- img1 = predict(img1, self, size, net, label_info, terrainClass)
- if terrainClass['overlapX'] % 2 == 1 and terrainClass['overlapY'] % 2 == 1:
- savePredict.append(img1[y2:y3 + 1, x2:x3 + 1])
- elif terrainClass['overlapX'] % 2 == 1 and terrainClass['overlapY'] % 2 == 0:
- savePredict.append(img1[y1:y4 + 1, x2:x3 + 1])
- elif terrainClass['overlapX'] % 2 == 0 and terrainClass['overlapY'] % 2 == 0:
- savePredict.append(img1[y1:y4 + 1, x1:x4 + 1])
- elif terrainClass['overlapX'] % 2 == 0 and terrainClass['overlapY'] % 2 == 1:
- savePredict.append(img1[y2:y3 + 1, x1:x4 + 1])
- else:
- img1 = Cropping4(img, terrainClass, i, j)
- img1 = predict(img1, self, size, net, label_info, terrainClass)
- if terrainClass['overlapX'] % 2 == 1 and terrainClass['overlapY'] % 2 == 1:
- savePredict.append(img1[y2:y3 + 1, x2:])
- elif terrainClass['overlapX'] % 2 == 1 and terrainClass['overlapY'] % 2 == 0:
- savePredict.append(img1[y1:y4 + 1, x2:])
- elif terrainClass['overlapX'] % 2 == 0 and terrainClass['overlapY'] % 2 == 0:
- savePredict.append(img1[y1:y4 + 1, x1:])
- elif terrainClass['overlapX'] % 2 == 0 and terrainClass['overlapY'] % 2 == 1:
- savePredict.append(img1[y2:y3 + 1, x1:])
- img1 = transverseConcatenate(savePredict)
- concatenateList = longitudinalConcatenate(concatenateList, img1)
- else:
- savePredict = []
- for i in range(w_num):
- if i == 0:
- img1 = Cropping3(img, terrainClass, j)
- img1 = predict(img1, self, size, net, label_info, terrainClass)
- if terrainClass['overlapX'] % 2 == 1 and terrainClass['overlapY'] % 2 == 1:
- savePredict.append(img1[y2:, 0:x3 + 1])
- elif terrainClass['overlapX'] % 2 == 1 and terrainClass['overlapY'] % 2 == 0:
- savePredict.append(img1[y1:, 0:x3 + 1])
- elif terrainClass['overlapX'] % 2 == 0 and terrainClass['overlapY'] % 2 == 0:
- savePredict.append(img1[y1:, 0:x4 + 1])
- elif terrainClass['overlapX'] % 2 == 0 and terrainClass['overlapY'] % 2 == 1:
- savePredict.append(img1[y2:, 0:x4 + 1])
- elif i > 0 and i < w_num - 1:
- img1 = Cropping4(img, terrainClass, i, j)
- img1 = predict(img1, self, size, net, label_info, terrainClass)
- if terrainClass['overlapX'] % 2 == 1 and terrainClass['overlapY'] % 2 == 1:
- savePredict.append(img1[y2:, x2:x3 + 1])
- elif terrainClass['overlapX'] % 2 == 1 and terrainClass['overlapY'] % 2 == 0:
- savePredict.append(img1[y1:, x2:x3 + 1])
- elif terrainClass['overlapX'] % 2 == 0 and terrainClass['overlapY'] % 2 == 0:
- savePredict.append(img1[y1:, x1:x4 + 1])
- elif terrainClass['overlapX'] % 2 == 0 and terrainClass['overlapY'] % 2 == 1:
- savePredict.append(img1[y2:, x1:x4 + 1])
- else:
- img1 = Cropping4(img, terrainClass, i, j)
- img1 = predict(img1, self, size, net, label_info, terrainClass)
- if terrainClass['overlapX'] % 2 == 1 and terrainClass['overlapY'] % 2 == 1:
- savePredict.append(img1[y2:, x2:])
- elif terrainClass['overlapX'] % 2 == 1 and terrainClass['overlapY'] % 2 == 0:
- savePredict.append(img1[y1:, x2:])
- elif terrainClass['overlapX'] % 2 == 0 and terrainClass['overlapY'] % 2 == 0:
- savePredict.append(img1[y1:, x1:])
- elif terrainClass['overlapX'] % 2 == 0 and terrainClass['overlapY'] % 2 == 1:
- savePredict.append(img1[y2:, x1:])
- img1 = transverseConcatenate(savePredict)
- concatenateList = longitudinalConcatenate(concatenateList, img1)
- finalResult = concatenateList[0][0:h, 0:w]
- finalResult = cv2.cvtColor(finalResult, cv2.COLOR_BGR2GRAY)
- return finalResult
-
-
- # 读图像文件
- def read_img(filename):
- dataset = gdal.Open(filename) # 打开文件
- im_width = dataset.RasterXSize # 栅格矩阵的列数
- im_height = dataset.RasterYSize # 栅格矩阵的行数
- im_geotrans = dataset.GetGeoTransform() # 仿射矩阵
- im_proj = dataset.GetProjection() # 地图投影信息
- im_data = dataset.ReadAsArray(0, 0, im_width, im_height) # 将数据写成数组,对应栅格矩阵
- del dataset
- return im_proj, im_geotrans, im_data
-
-
- # 写文件,以写成tif为例
- def write_img(filename, im_proj, im_geotrans, im_data): # 文件名、地图投影信息、仿射矩阵,栅格矩阵
- # 判断栅格数据的数据类型
- if 'int8' in im_data.dtype.name:
- datatype = gdal.GDT_Byte
- elif 'int16' in im_data.dtype.name:
- datatype = gdal.GDT_UInt16
- else:
- datatype = gdal.GDT_Float32
-
- # 判读数组维数
- if len(im_data.shape) == 3:
- im_bands, im_height, im_width = im_data.shape
- else:
- im_bands, (im_height, im_width) = 1, im_data.shape
-
- # 创建文件
- driver = gdal.GetDriverByName("GTiff") # 数据类型必须有,因为要计算需要多大的内存空间
- dataset = driver.Create(filename, im_width, im_height, im_bands, datatype)
- dataset.SetGeoTransform(im_geotrans) # 写入仿射变换参数
- dataset.SetProjection(im_proj) # 写入投影
- if im_bands == 1:
- dataset.GetRasterBand(1).WriteArray(im_data) # 写入数组数据
- else:
- for i in range(im_bands):
- dataset.GetRasterBand(i + 1).WriteArray(im_data[i])
- del dataset
-
-
- # 优化建筑物轮廓
- def boundary_regularization(img, epsilon=6):
- h, w = img.shape[0:2]
- # 轮廓定位
- contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # 检索所有轮廓
- contours = np.squeeze(contours[0]) # [[x1,y1], [x2, y2],...]
- # 轮廓精简(DP)
- contours = rdp(contours, epsilon=epsilon)
- contours[:, 1] = h - contours[:, 1]
-
- # 轮廓规则化
- dists = []
- azis = []
- azis_index = []
-
- # 获取每条边的长度和方位角
- for i in range(contours.shape[0]):
- cur_index = i
- next_index = i + 1 if i < contours.shape[0] - 1 else 0
- prev_index = i - 1
- cur_point = contours[cur_index]
- nest_point = contours[next_index]
- prev_point = contours[prev_index]
- dist = cal_dist(cur_point, nest_point) # 当前点到下一个点的距离
- azi = azimuthAngle(cur_point, nest_point) # 计算线条的方位角,线条的方位角是线条的逆时针方向与水平方向的夹角
- dists.append(dist)
- azis.append(azi)
- azis_index.append([cur_index, next_index])
-
- # 以最长的边的方向作为主方向
- longest_edge_idex = np.argmax(dists)
- main_direction = azis[longest_edge_idex] # 主方向与水平线在逆时针方向上的夹角
-
- # 方向纠正,绕中心点旋转到与主方向垂直或者平行
- correct_points = []
- para_vetr_idxs = [] # 0平行 1垂直
- for i, (azi, (point_0_index, point_1_index)) in enumerate(zip(azis, azis_index)):
- if i == longest_edge_idex:
- correct_points.append([contours[point_0_index], contours[point_1_index]])
- para_vetr_idxs.append(0)
- else:
- # 确定旋转角度
- rotate_ang = main_direction - azi
- if np.abs(rotate_ang) < 180 / 4:
- rotate_ang = rotate_ang
- para_vetr_idxs.append(0)
- elif np.abs(rotate_ang) >= 90 - 180 / 4:
- rotate_ang = rotate_ang + 90
- para_vetr_idxs.append(1)
- # 执行旋转任务
- point_0 = contours[point_0_index] # 当前点
- point_1 = contours[point_1_index] # 当前点的下一个点
- point_middle = (point_0 + point_1) / 2
- if rotate_ang > 0:
- rotate_point_0 = Srotation_angle_get_coor_coordinates(point_0, point_middle, np.abs(rotate_ang))
- rotate_point_1 = Srotation_angle_get_coor_coordinates(point_1, point_middle, np.abs(rotate_ang))
- elif rotate_ang < 0:
- rotate_point_0 = Nrotation_angle_get_coor_coordinates(point_0, point_middle, np.abs(rotate_ang))
- rotate_point_1 = Nrotation_angle_get_coor_coordinates(point_1, point_middle, np.abs(rotate_ang))
- else:
- rotate_point_0 = point_0
- rotate_point_1 = point_1
- correct_points.append([rotate_point_0, rotate_point_1])
- correct_points = np.array(correct_points)
-
- # 相邻边校正,垂直取交点,平行平移短边或者加线
- final_points = []
- final_points.append(correct_points[0][0])
- for i in range(correct_points.shape[0] - 1):
- cur_index = i
- next_index = i + 1 if i < correct_points.shape[0] - 1 else 0
- cur_edge_point_0 = correct_points[cur_index][0]
- cur_edge_point_1 = correct_points[cur_index][1]
- next_edge_point_0 = correct_points[next_index][0]
- next_edge_point_1 = correct_points[next_index][1]
- cur_para_vetr_idx = para_vetr_idxs[cur_index]
- next_para_vetr_idx = para_vetr_idxs[next_index]
- if cur_para_vetr_idx != next_para_vetr_idx:
- # 垂直取交点
- L1 = line(cur_edge_point_0, cur_edge_point_1)
- L2 = line(next_edge_point_0, next_edge_point_1)
- point_intersection = intersection(L1, L2) # 交点
- final_points.append(point_intersection)
- elif cur_para_vetr_idx == next_para_vetr_idx:
- # 平行分两种,一种加短线,一种平移,取决于距离阈值
- L1 = line(cur_edge_point_0, cur_edge_point_1)
- L2 = line(next_edge_point_0, next_edge_point_1)
- marg = par_line_dist(L1, L2) # 两个平行线之间的距离
- if marg < 3:
- # 平移
- point_move = point_in_line(next_edge_point_0[0], next_edge_point_0[1], cur_edge_point_0[0],
- cur_edge_point_0[1], cur_edge_point_1[0], cur_edge_point_1[1])
- final_points.append(point_move)
- # 更新平移之后的下一条边
- correct_points[next_index][0] = point_move
- correct_points[next_index][1] = point_in_line(next_edge_point_1[0], next_edge_point_1[1],
- cur_edge_point_0[0], cur_edge_point_0[1],
- cur_edge_point_1[0], cur_edge_point_1[1])
- else:
- # 加线
- add_mid_point = (cur_edge_point_1 + next_edge_point_0) / 2
- add_point_1 = point_in_line(add_mid_point[0], add_mid_point[1], cur_edge_point_0[0],
- cur_edge_point_0[1], cur_edge_point_1[0], cur_edge_point_1[1])
- add_point_2 = point_in_line(add_mid_point[0], add_mid_point[1], next_edge_point_0[0],
- next_edge_point_0[1], next_edge_point_1[0], next_edge_point_1[1])
- final_points.append(add_point_1)
- final_points.append(add_point_2)
-
- final_points.append(final_points[0])
- final_points = np.array(final_points)
- final_points[:, 1] = h - final_points[:, 1]
- return final_points
-
-
- # 将切分图像的预测结果进行拼接时,按照以下方式进行,以确保预测结果能无缝拼接,不论切分图像间的重叠像素个数为奇数还是偶数,都适用。
- def concatenateCOOR(terrainClass):
- y1 = int(terrainClass['overlapY'] / 2) # 3
- y2 = int((terrainClass['overlapY'] - 1) / 2 + 1) # 3
- y3 = int(terrainClass['h'] - 1 - (terrainClass['overlapY'] - 1) / 2) # 重叠为奇数,到中间那个数 253
- y4 = int(terrainClass['h'] - 1 - terrainClass['overlapY'] / 2) # 重叠为偶数,重叠为6,则到第三个
- x1 = int(terrainClass['overlapX'] / 2) # 3
- x2 = int((terrainClass['overlapX'] - 1) / 2 + 1) # 3
- x3 = int(terrainClass['w'] - 1 - (terrainClass['overlapX'] - 1) / 2) # 重叠为奇数,到中间那个数
- x4 = int(terrainClass['w'] - 1 - terrainClass['overlapX'] / 2) # 重叠为偶数,重叠为6,则到第三个
- return y1, y2, y3, y4, x1, x2, x3, x4
-
-
- # 基于原始分割结果,优化建筑物的边界,返回优化后的分割图像
- def optimizeBuilding(img, labelsDict):
- sourceSegImg = img.copy()
- sourceSegImg[sourceSegImg == labelsDict['building']] = 0 # 删除建筑物
- img[img != labelsDict['building']] = 0 # 只保留建筑物
- contours, hierarch = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
- # 由于存在一些边界处的噪声点,通过面积过滤的方式将这些噪声点过滤掉
- for i in range(len(contours)):
- buildingCnt = contours[i]
- buildingCntArea = cv2.contourArea(buildingCnt)
- if buildingCntArea < 2000:
- cv2.drawContours(img, [buildingCnt], 0, 0, -1) # 该轮廓区域填0
-
- ori_img1 = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
- h, w = ori_img1.shape[0], ori_img1.shape[1]
- # 中值滤波,去噪
- ori_img = cv2.medianBlur(ori_img1, 5) # 滤波核大小为5
- ori_img = cv2.cvtColor(ori_img, cv2.COLOR_BGR2GRAY)
- ret, ori_img = cv2.threshold(ori_img, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
- # 连通域分析
- num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(ori_img,
- connectivity=8) # 参数8表示8连通。返回值:所有连通域的数目,图像上每一像素的标记,每一个标记的统计信息,连通域的中心点
- # 遍历连通域
- allCnt = []
- for i in range(1, num_labels):
- img = np.zeros_like(labels)
- index = np.where(labels == i)
- img[index] = 255
- img = np.array(img, dtype=np.uint8)
- regularization_contour = boundary_regularization(img).astype(np.int32)
- rows = regularization_contour.shape[0]
- regularization_contour = regularization_contour.reshape(rows, 1, 2)
- regularization_contour = regularization_contour.astype(int)
- allCnt.append(regularization_contour)
- buildingMask = np.zeros((h, w), dtype='uint8')
- cv2.fillPoly(buildingMask, allCnt, color=labelsDict['building'])
- buildingNew = buildingMask.copy()
- buildingMask[buildingMask == 0] = 255
- buildingMask[buildingMask == labelsDict['building']] = 0 # step2.png
- imgElse = cv2.bitwise_and(sourceSegImg, sourceSegImg, mask=buildingMask) # 在去掉建筑物区域的图像中,再去掉“优化后的建筑物”边界范围内的区域
- img = cv2.bitwise_or(buildingNew, imgElse)
- return img
-
-
- # 将原始图像处理为灰度图
- def sourceImg2gray(sourceImg, finalResult):
- imgGray = cv2.cvtColor(sourceImg, cv2.COLOR_BGR2GRAY)
- cv2.imwrite("/DATA/zyy/output/1-gray.tif", imgGray)
- maskNew = np.zeros((sourceImg.shape[0], sourceImg.shape[1]), dtype='uint8')
- contours, hierarch = cv2.findContours(imgGray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
- cntList = []
- for i in range(len(contours)):
- # 由于原始tif的边缘部分可能存在噪声像素点,这一步通过面积过滤掉噪声像素点的区域,只保留面积大于50000的contours
- if cv2.contourArea(contours[i]) >= 50000:
- cntList.append(contours[i])
- cv2.fillPoly(maskNew, cntList, color=255)
- cv2.imwrite("/DATA/zyy/output/1-binary.tif", maskNew)
- finalResult = cv2.bitwise_and(finalResult, finalResult, mask=maskNew)
- cv2.imwrite("/DATA/zyy/output/1-fill.tif", maskNew)
- return maskNew, finalResult
-
-
- # 生成单一类别的二值图像
- def generateBinaryImg(gdal_array, singleImgPath, outputImg):
- # 使用gdal库将图像载入numpy
- srcArr = gdal_array.LoadFile(singleImgPath)
- # 根据类别数将直方图分割成2个颜色区间,以便区分
- classes = gdal_array.numpy.histogram(srcArr, bins=2)[1]
- # 颜色查表的记录数必须为len(classes)+1
- lut = [[255, 0, 0], [0, 0, 0], [255, 255, 255]]
- # 分类的起始值
- start = 1
- # 建立输出图片
- rgb = gdal_array.numpy.zeros((3, srcArr.shape[0], srcArr.shape[1]), gdal_array.numpy.float32)
- # 处理所有类别并分配颜色
- for i in range(len(classes)):
- mask = gdal_array.numpy.logical_and(start <= srcArr, srcArr <= classes[i])
- for j in range(len(lut[i])):
- rgb[j] = gdal_array.numpy.choose(mask, (rgb[j], lut[i][j])) # 根据掩膜图层对图像进行裁剪
- start = classes[i] + 1
- # 保存图片
- output = gdal_array.SaveArray(rgb.astype(gdal_array.numpy.uint8), outputImg, format="GTIFF",
- prototype=singleImgPath)
- output = None
-
-
- # 生成shp文件的实现细节
- def generateShp(binaryImgPath, shp, shpLayer):
- # 打开输入的栅格文件
- # srcDS = gdal.Open(binaryImgPath) # 原始
- srcDS = gdal.Open(binaryImgPath, gdal.GA_ReadOnly)
- # 获取第一个波段
- band = srcDS.GetRasterBand(1)
- # 让gdal库使用该波段作为遮罩层
- mask = band
- # 创建输出的shapefile文件
- driver = ogr.GetDriverByName("ESRI Shapefile")
- shp = driver.CreateDataSource(shp)
- # 拷贝空间索引
- srs = osr.SpatialReference()
- srs.ImportFromWkt(srcDS.GetProjectionRef())
- layer = shp.CreateLayer(shpLayer, srs=srs)
- # 创建dbf文件
- fd = ogr.FieldDefn("DN", ogr.OFTInteger)
- layer.CreateField(fd)
- dst_field = 0
- # 从图片中自动提取特征
- extract = gdal.Polygonize(band, mask, layer, dst_field, [], None)
- extract = None
- shp = None # 一定要记得关闭,否则,shp打开后显示空白
-
-
- # 生成除other类别外的各类别对应的shp文件,并将其保存在shpfile目录下
- def generateShpFile(labelsDict, finalResult, tifFile, self, imgGray):
- # path2 = "./output/{}/addCOOR".format(tifFile[:-4])
- path2 = "/DATA/zyy/output/{}/addCOOR".format(tifFile[:-4])
- if not os.path.exists(path2):
- os.makedirs(path2)
- path3 = "/DATA/zyy/output/{}/binaryImage".format(tifFile[:-4])
- if not os.path.exists(path3):
- os.makedirs(path3)
- for k, v in labelsDict.items():
- singleClassImg = finalResult.copy()
- singleClassImg[singleClassImg != v] = 0
- path1 = "/DATA/zyy/output/{}/labelClass".format(tifFile[:-4])
- if not os.path.exists(path1):
- os.makedirs(path1)
- singleClassCnt, hierarch = cv2.findContours(singleClassImg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
-
- # 由于存在一些边界处的噪声点,通过面积过滤的方式将这些噪声点过滤掉
- for i in range(len(singleClassCnt)):
- cnt1 = singleClassCnt[i]
- cntArea = cv2.contourArea(cnt1)
- if cntArea < 2000:
- cv2.drawContours(singleClassImg, [cnt1], 0, 0, -1) # 该轮廓区域填0
- # else:
- # otherList.append(cnt1)
- cv2.imwrite(path1 + os.sep + str(k) + '.tif', singleClassImg)
-
- # 将非other类别的mask添加到原始图像对应的mask上
- addImg = singleClassImg.copy()
- addImg[addImg == 0] = 255
- addImg[addImg != 255] = 0
- imgGray = cv2.bitwise_and(imgGray, imgGray, mask=addImg)
-
- # path4 = "./output/{}/shpfile".format(tifFile[:-4]) + os.sep + str(k) + '_shpfile'
- path4 = "/DATA/zyy/output/{}/shpfile".format(tifFile[:-4]) + os.sep + str(k) + '_shpfile'
- if not os.path.exists(path4):
- os.makedirs(path4)
-
- # 向灰度图像中写入原始tif图像的空间信息
- proj, geotrans, data = read_img(self.testImagePath + tifFile) # 读取原始图像数据,返回地图投影信息、仿射矩阵,栅格矩阵
- proj_single, geotrans_single, data_single = read_img(
- path1 + os.sep + str(k) + '.tif') # 读取只包含一个类别的灰度图像的地图投影信息和仿射矩阵
- write_img(path2 + os.sep + str(k) + '.tif', proj, geotrans, data_single) # 向只包含一个类别的灰度图像中写入原始图像的地图投影信息和仿射矩阵
-
- # 1、步骤1, 先使用分类的方法将图像分为两类
- # 分类后的原始图像
- singleImgPath = path2 + os.sep + str(k) + '.tif'
- # 输出文件名称
- outputImg = path3 + os.sep + str(k) + '_binary.tif'
- generateBinaryImg(gdal_array, singleImgPath, outputImg)
-
- # 2、步骤2
- # 阈值化后的输出栅格文件名称
- binaryImgPath = path3 + os.sep + str(k) + '_binary.tif'
- # 输出的shapefile文件名称
- shp = path4 + os.sep + str(k) + '.shp'
- # 图层名称
- shpLayer = str(k)
- generateShp(binaryImgPath, shp, shpLayer)
- return imgGray
-
-
- # 生成other类别的shp文件
- def generateOtherShpFile(tifFile, self):
- path5 = "/DATA/zyy/output/{}/shpfile/other_shpfile".format(tifFile[:-4])
- if not os.path.exists(path5):
- os.makedirs(path5)
-
- # 向灰度图像中写入原始tif图像的空间信息
- proj, geotrans, data = read_img(self.testImagePath + tifFile) # 读取原始图像数据,返回地图投影信息、仿射矩阵,栅格矩阵
- proj_single, geotrans_single, data_single = read_img(
- "/DATA/zyy/output/{}/labelClass/other.tif".format(tifFile[:-4])) # 读取只包含一个类别的灰度图像的地图投影信息和仿射矩阵
- write_img("/DATA/zyy/output/{}/addCOOR".format(tifFile[:-4]) + os.sep + 'other.tif', proj, geotrans,
- data_single) # 向只包含一个类别的灰度图像中写入原始图像的地图投影信息和仿射矩阵
- # 1、步骤1, 先使用分类的方法将图像分为两类
- # 分类后的原始图像
- singleImgPath = "/DATA/zyy/output/{}/addCOOR".format(tifFile[:-4]) + os.sep + 'other.tif'
- # 输出文件名称
- outputImg = "/DATA/zyy/output/{}/binaryImage".format(tifFile[:-4]) + os.sep + 'other_binary.tif'
- generateBinaryImg(gdal_array, singleImgPath, outputImg)
-
- # 2、步骤二
- # 阈值化后的输出栅格文件名称
- binaryImgPath = "/DATA/zyy/output/{}/binaryImage".format(tifFile[:-4]) + os.sep + 'other_binary.tif'
- # 输出的shapefile文件名称
- shp = "/DATA/zyy/output/{}/shpfile/other_shpfile".format(tifFile[:-4]) + os.sep + 'other.shp'
- # 图层名称
- shpLayer = "other"
- generateShp(binaryImgPath, shp, shpLayer)
-
-
- class MscEvalV0(object):
- def __init__(self, scaleH=1 / 3, scaleW=1 / 3, ignore_label=255, testImagePath=''):
- self.ignore_label = ignore_label
- self.scaleH = scaleH
- self.scaleW = scaleW
- self.testImagePath = testImagePath
- self.to_tensor = transforms.Compose([
- transforms.ToTensor(),
- transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
- ])
-
- def __call__(self, net, dl, n_classes):
- label_info = get_label_info('./class_dict.csv')
- tifList = os.listdir(self.testImagePath)
- # 预测出的分割图像中各个类别像素所对应的灰度值,将其填写在下面的字典中
- terrainClass = {'overlapX': 500, 'overlapY': 500, 'h': 1024, 'w': 1024}
- labelsDict = {'building': 206, 'road': 245, 'water': 193, 'farmland': 235, 'grass': 159, 'woodland': 130,
- 'bareSoil': 211}
-
- size = [640, 360] # 用360x640尺寸测试图片
- path0 = "/DATA/zyy/output"
- if not os.path.exists(path0):
- os.makedirs(path0)
-
- for tifFile in tifList:
- sourceImg = cv2.imread(self.testImagePath + tifFile)
- h, w = sourceImg.shape[0], sourceImg.shape[1]
- print("line991", sourceImg.shape)
- paddingImg, h_num, w_num = img_sup(sourceImg, terrainClass) # 返回填充后的图像及h和w方向上可被切分的个数
- save_path = './demo/'
- if not os.path.exists(save_path):
- os.makedirs(save_path)
-
- # 将切分图像的预测结果进行拼接时,按照以下方式进行,以确保预测结果能无缝拼接,不论切分图像间的重叠像素个数为奇数还是偶数,都适用。
- y1, y2, y3, y4, x1, x2, x3, x4 = concatenateCOOR(terrainClass)
-
- # 将预测结果拼接起来,返回拼接后的图像
- finalResult = concatenateImage(h_num, w_num, self, size, paddingImg, net, label_info, terrainClass, h, w,
- save_path, tifFile, x1,
- x2, x3, x4, y1, y2, y3, y4)
-
- cv2.imwrite(save_path + tifFile[:-4] + "_1024x1024-500-04-18.tif", finalResult)
-
- # 基于原始的分割结果,优化建筑物的边界,返回优化后的结果
- finalResult = optimizeBuilding(finalResult, labelsDict)
- cv2.imwrite("./demo/finalResult.tif", finalResult)
-
- # 将原始图像处理为灰度图
- imgGray, finalResult = sourceImg2gray(sourceImg, finalResult)
- # 生成除other类别外的各类别对应的shp文件,并将其保存在shpfile目录下
- imgGray = generateShpFile(labelsDict, finalResult, tifFile, self, imgGray)
- cv2.imwrite("/DATA/zyy/output/{}/labelClass/other.tif".format(tifFile[:-4]), imgGray)
- # 生成other类别的shp文件
- generateOtherShpFile(tifFile, self)
-
- # 删除过程中生成的文件
- shutil.rmtree("/DATA/zyy/output/{}/addCOOR".format(tifFile[:-4]))
- shutil.rmtree("/DATA/zyy/output/{}/binaryImage".format(tifFile[:-4]))
- shutil.rmtree("/DATA/zyy/output/{}/labelClass".format(tifFile[:-4]))
-
-
- def colour_code_segmentation(image, label_values): # x表示只包含speedRoad,y表示只包含vehicle
- label_values = [label_values[key] for key in label_values] # [[0,0,0],[128,0,0],[0,128,0]] list类型
- colour_codes = np.array(label_values) # [[0 0 0],[128 0 0],[0 128 0]] ndarray类型
- image = colour_codes[image]
- return image
-
-
- def get_label_info(csv_path):
- ann = pd.read_csv(csv_path)
- label = {}
- for iter, row in ann.iterrows():
- label_name = row['name']
- r = row['r']
- g = row['g']
- b = row['b']
- label[label_name] = [int(r), int(g), int(b)]
- return label
-
-
- def evaluatev0(respth='', dspth='', backbone='', testImagePath='', scaleH=1 / 3, scaleW=1 / 3, use_boundary_2=False,
- use_boundary_4=False, use_boundary_8=False, use_boundary_16=False, use_conv_last=False):
- # dataset
- batchsize = 1
- n_workers = 0
- dsval = Heliushuju(dspth, mode='test')
- dl = DataLoader(dsval,
- batch_size=batchsize,
- shuffle=False,
- num_workers=n_workers,
- drop_last=False)
-
- n_classes = 8
- # print("backbone:", backbone)
- net = BiSeNet(backbone=backbone, n_classes=n_classes,
- use_boundary_2=use_boundary_2, use_boundary_4=use_boundary_4,
- use_boundary_8=use_boundary_8, use_boundary_16=use_boundary_16,
- use_conv_last=use_conv_last)
- net.load_state_dict(torch.load(respth))
- net.cuda()
- net.eval()
-
- with torch.no_grad():
- single_scale = MscEvalV0(scaleH=scaleH, scaleW=scaleW, testImagePath='./data/test/images/')
- single_scale(net, dl, 8)
-
-
- if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument('--weights', nargs='+', type=str, default='./model_save/pths/best.pt', help='model.pt path(s)')
- parser.add_argument('--source', type=str, default='./data/test/images', help='source') # file/folder, 0 for webcam
- parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
- parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
- parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
- parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
- parser.add_argument('--view-img', action='store_true', help='display results')
- parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
- parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
- parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
- parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
- parser.add_argument('--augment', action='store_true', help='augmented inference')
- parser.add_argument('--update', action='store_true', help='update all models')
- opt = parser.parse_args()
-
- t1 = time.time()
- evaluatev0(respth='./model_save/pths/model_final.pth',
- dspth='../trafficDetectionTestData/trafficAccidentTest/masks', backbone='STDCNet813', scaleH=1 / 3,
- testImagePath='./data/test/images/',
- scaleW=1 / 3, use_boundary_2=False, use_boundary_4=False, use_boundary_8=False,
- use_boundary_16=False, use_conv_last=False)
- t2 = time.time()
- print("line532", t2 - t1)
-
|