Semantic-Postprocess/main.py

1388 lines
54 KiB
Python
Raw Permalink Blame History

This file contains ambiguous Unicode characters!

This file contains ambiguous Unicode characters that may be confused with others in your current locale. If your use case is intentional and legitimate, you can safely ignore this warning. Use the Escape button to highlight these characters.

import copy
import math
import cv2
import numpy as np
from matplotlib import pyplot
from numpy import where, mean
from scipy import optimize
from skimage import morphology
from sklearn.cluster import OPTICS
from config import opt
# 图片预处理
def process_map(img, opt):
def get_wall_area(map, area):
'''
. map 获取墙壁的图像
. stats 连通域信息
. area 面积
'''
map_ = copy.copy(map)
map_[map != 255] = 0
num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(map_, connectivity=8)
temp = []
for i in range(1, num_labels):
if stats[i, 4] > area:
temp.append(i)
wall_area = np.zeros((map.shape[0], map.shape[1]), np.uint8)
for i in range(0, len(temp)):
mask = labels == temp[i]
wall_area[:, :][mask] = 255
return wall_area
def get_NONwall_area(map, stats, area, num_labels, labels):
'''
. map 获取墙壁的图像
. stats 连通域信息
. area 面积
'''
temp = []
for i in range(1, num_labels):
mask = labels == i
if stats[i, 4] < area:
temp.append(i)
elif np.max(map[:, :][mask]) in Intensity_value:
temp.append(i)
NONwall_area = np.zeros((map.shape[0], map.shape[1]), np.uint8)
for i in range(0, len(temp)):
mask = labels == temp[i]
NONwall_area[:, :][mask] = 255
return NONwall_area
def get_rotate_angle(wall_area, length, opt):
temp = copy.copy(wall_area)
# temp = cv2.pyrUp(temp)
# temp = cv2.pyrUp(temp)
edges = cv2.Canny(temp, 50, 150, apertureSize=5)
lines = cv2.HoughLines(edges, 1, np.pi / 180, length)
# if (lines == None):
# return wall_area
temp = np.zeros((wall_area.shape[0], wall_area.shape[1]))
np.zeros((wall_area.shape[0], wall_area.shape[1]))
alfa = 0
theta_temp = [0]
for line in lines:
n = 0
rho = line[0][0]
theta = line[0][1]
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * (a))
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * (a))
# alfa = alfa + np.mod(theta, 90 / 360 * np.pi)
# alfa = theta
theta_array = np.array(theta_temp)
n = 0
for i in range(theta_array.shape[0]):
x = np.abs(theta - theta_array[i])
if (0 < x < 0.15 * np.pi):
n = 1
break
if (n == 1):
continue
theta_temp.append(theta)
wall_area = rotate_bound(wall_area, -np.mod(theta, 180 / 360 * np.pi) / np.pi * 180) # 摆正图像
temp = rotate_bound(temp, -np.mod(theta, 180 / 360 * np.pi) / np.pi * 180) # 摆正图像
# wall_area_ = cv2.dilate(wall_area_, kernel) # 腐蚀膨胀,去除墙上语义点
temp_ = connect_door(wall_area, opt.k, opt.m) # 封闭区域
temp[temp_ > 0] = 0
temp = temp_ + temp
wall_area = rotate_bound(wall_area, np.mod(theta, 180 / 360 * np.pi) / np.pi * 180)
temp = rotate_bound(temp, np.mod(theta, 180 / 360 * np.pi) / np.pi * 180)
wall_area[temp > 0] = 255
return wall_area
def rotate_bound(image, angle):
'''
. 旋转图片
. @param image opencv读取后的图像
. @param angle (逆)旋转角度
'''
# img = cv2.imread("img/1.jpg")
(h, w) = image.shape[:2] # 返回(高,宽,色彩通道数),此处取前两个值返回
# 抓取旋转矩阵(应用角度的负值顺时针旋转)。参数1为旋转中心点;参数2为旋转角度,正的值表示逆时针旋转;参数3为各向同性的比例因子
M = cv2.getRotationMatrix2D((w / 2, h / 2), -angle, 1.0)
# 计算图像的新边界维数
# 调整旋转矩阵以考虑平移
# 执行实际的旋转并返回图像
return cv2.warpAffine(image, M, (image.shape[1], image.shape[0])) # borderValue 缺省,默认是黑色
# return M
def get_map(img, ratio):
"""
:param img:
:param ratio: 获取地图2表示有效面积的2倍
:return:
"""
ratio = (ratio - 1) / 2
# b = np.nonzero(img)
# x_min = np.min(b[0])
# y_min = np.min(b[1])
# x_max = np.max(b[0])
# y_max = np.max(b[1])
# x_l = x_max - x_min
# y_l = y_max - y_min
# if x_l < 150:
# ratio = ratio + 0.5
# map = np.zeros((x_max - x_min, y_max - y_min))
# temp = img[int(max(x_min - ratio * x_l, 0)):int(min(x_max + x_l * ratio, img.shape[1])),
# int(max(0, y_min - ratio * y_l)):int(min(img.shape[0], ratio * y_l + y_max))]
'''
去掉未占用区域
'''
temp_x = np.zeros((img.shape[0], 50), np.uint8)
map = np.hstack((temp_x, img))
temp_y = np.zeros((50, map.shape[1]), np.uint8)
map = np.vstack((temp_y, map))
# map = np.zeros((img.shape[0], 50), np.uint8)
temp_x = np.zeros((map.shape[0], 40), np.uint8)
map = np.hstack((map, temp_x))
temp_y = np.zeros((40, map.shape[1]), np.uint8)
map = np.vstack((map, temp_y))
# map = copy.copy(img)
map[map < 70] = 255
map[map <= 100] = 0
map[map == 0] = 1
map[map == 255] = 0
map[map == 1] = 255
"墙壁灰度值为1"
return map
def draw_rgb(map):
def rgb_semantics(number):
output = np.array((5, 5, 5), np.uint8)
if number == 115:
'0号物体'
output[0] = 232
output[1] = 221
output[2] = 203
elif number == 116:
'1号物体'
output[0] = 205
output[1] = 179
output[2] = 128
elif number == 117:
'2号物体'
output[0] = 3
output[1] = 101
output[2] = 100
elif number == 118:
'3号物体'
output[0] = 3
output[1] = 54
output[2] = 73
elif number == 119:
'4号物体'
output[0] = 3
output[1] = 22
output[2] = 52
elif number == 120:
'5号物体'
output[0] = 237
output[1] = 222
output[2] = 139
elif number == 121:
'6号物体'
output[0] = 251
output[1] = 178
output[2] = 23
elif number == 150:
'7号物体'
output[0] = 96
output[1] = 143
output[2] = 159
elif number == 151:
'8号物体'
output[0] = 1
output[1] = 77
output[2] = 103
elif number == 152:
'9号物体'
output[0] = 254
output[1] = 67
output[2] = 101
elif number == 153:
'10号物'
output[0] = 252
output[1] = 157
output[2] = 154
elif number == 154:
'11号物体'
output[0] = 249
output[1] = 205
output[2] = 173
elif number == 155:
'12号物体'
output[0] = 200
output[1] = 200
output[2] = 169
elif number == 156:
'13号物体'
output[0] = 131
output[1] = 175
output[2] = 155
elif number == 200:
'14号物体'
output[0] = 229
output[1] = 187
output[2] = 129
elif number == 201:
'15号物体'
output[0] = 161
output[1] = 23
output[2] = 21
elif number == 202:
'16号物体'
output[0] = 118
output[1] = 77
output[2] = 57
elif number == 203:
'17号物体'
output[0] = 17
output[1] = 63
output[2] = 61
elif number == 204:
'18号物体'
output[0] = 60
output[1] = 79
output[2] = 57
elif number == 205:
'19号物体'
output[0] = 95
output[1] = 92
output[2] = 51
elif number == 206:
'20号物体'
output[0] = 179
output[1] = 214
output[2] = 110
elif number == 207:
'21号物体'
output[0] = 227
output[1] = 160
output[2] = 93
elif number == 208:
'22号物体'
output[0] = 178
output[1] = 190
output[2] = 126
elif number == 209:
'23号物体'
output[0] = 56
output[1] = 13
output[2] = 49
else:
output[0] = 5
output[1] = 5
output[2] = 5
return output
output = np.zeros((map.shape[0], map.shape[1], 3), np.uint8)
output_b = copy.copy(map)
output_g = copy.copy(map)
output_r = copy.copy(map)
for i in range(np.array(Intensity_value).shape[0]):
intensity = Intensity_value[i]
output_r[map == intensity] = rgb_semantics(intensity)[0]
output_g[map == intensity] = rgb_semantics(intensity)[1]
output_b[map == intensity] = rgb_semantics(intensity)[2]
output[:, :, 0] = output_b
output[:, :, 1] = output_g
output[:, :, 2] = output_r
return output
# 行扫描间隔k时进行填充填充值为1
# 将门口连起来
def connect_door(wall_area, k, m):
"""
:param wall_area:
:param k: 门宽
:param m: 墙壁长度
:return:
"""
def edge_connection(img, size, k, m):
for i in range(size):
Yi = np.where(img[i, :] > 220)
if len(Yi[0]) >= m: # 可调整 (墙壁长度)
for j in range(0, len(Yi[0]) - 1):
if Yi[0][j + 1] - Yi[0][j] <= k:
# img[min(i - 1, img.shape[0] - 1), Yi[0][j]:Yi[0][j + 1]] = 255
img[min(i, img.shape[0] - 1), Yi[0][j]:Yi[0][j + 1]] = 255
# img[i, Yi[0][j]:Yi[0][j + 1]] = 255
# img[min(i + 1, img.shape[0]-1), Yi[0][j]:Yi[0][j + 1]] = 255
return img
img = copy.copy(wall_area)
g = edge_connection(img, img.shape[0], k, m) # k设的是门的宽度, m是判为墙壁的长度
g = cv2.rotate(g, 0)
g = edge_connection(g, img.shape[1], k, m)
g = cv2.rotate(g, 2)
g = g.astype(np.uint8)
return g
'''
用于绘制地图及物体形状绘制
'''
'定义语义强度'
Intensity_value = [115, 116, 117, 118, 119, 120, 121, 150, 151, 152, 153,
154, 155, 156, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209]
# Intensity_value = [153]
map = get_map(img, 4)
'插值处理,提高分辨率'
map = cv2.resize(map, (map.shape[1] * 2, map.shape[0] * 2), interpolation=cv2.INTER_NEAREST)
map_ori = copy.copy(map)
wall_area = get_wall_area(map, opt.acreage) # 获取墙壁区域
NON_wall_area = map - wall_area
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
wall_area = cv2.dilate(wall_area, kernel)
connect_line = get_rotate_angle(wall_area, opt.length, opt) # 连接
connect_line = cv2.erode(connect_line, kernel, iterations=3)
connect_line[connect_line < 150] = 0
connect_line[connect_line > 0] = 255
temp1 = copy.copy(connect_line)
temp2 = copy.copy(NON_wall_area)
temp1[temp1 > 0] = 1
temp = temp1 * temp2
NON_wall_area[temp != 0] = 0
result = connect_line + NON_wall_area
result[result > 255] = 255
result = result.astype(np.uint8)
result[result == 0] = 254
result[result == 255] = 0
result[result == 254] = 255
# cv2.imwrite("result.png", result)
'''
用于提取区域
'''
map_ = copy.copy(map) # 用于封闭区域
map_[map_ > 0] = 255
num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(map_, connectivity=8)
wall_area_ = get_wall_area(map_, opt.acreage) # 获取墙壁区域
NON_wall_area_ = map_ - wall_area_
wall_area_ = cv2.dilate(wall_area_, kernel)
connect_line_ = get_rotate_angle(wall_area_, opt.length, opt) # 连接
connect_line_[connect_line_ < 150] = 0
connect_line_[connect_line_ > 0] = 255
temp1_ = copy.copy(connect_line_)
temp2_ = copy.copy(NON_wall_area_)
temp1_[temp1_ > 0] = 1
temp_ = temp1_ * temp2_
NON_wall_area[temp_ != 0] = 0
result_ = connect_line_ + NON_wall_area_
result_[result_ > 255] = 255
result_ = result_.astype(np.uint8)
result_[result_ == 0] = 254
result_[result_ == 255] = 0
result_[result_ == 254] = 255
# wall_area_ = rotate_bound(wall_area, -alfa / np.pi * 180) # 摆正图像
# # wall_area_ = cv2.dilate(wall_area_, kernel) # 腐蚀膨胀,去除墙上语义点
#
# connect_line = connect_door(wall_area_, opt.k, opt.m) # 封闭区域
# connect_line_ = rotate_bound(connect_line, alfa / np.pi * 180)
map_ori[map_ori == 0] = 254
map_ori[map_ori == 255] = 0
map_ori[map_ori == 254] = 255
return result, result_, map_ori
# 绘制物体形状
def obj_outline(map, opt, map_ori):
'''
. 绘制物体形状
. @param map 语义地图
. @param Semantic_map 场景分类图
. @param opt 参数
'''
def rotate_bound(image, angle):
'''
. 旋转图片
. @param image opencv读取后的图像
. @param angle (逆)旋转角度
'''
# img = cv2.imread("img/1.jpg")
(h, w) = image.shape[:2] # 返回(高,宽,色彩通道数),此处取前两个值返回
# 抓取旋转矩阵(应用角度的负值顺时针旋转)。参数1为旋转中心点;参数2为旋转角度,正的值表示逆时针旋转;参数3为各向同性的比例因子
M = cv2.getRotationMatrix2D((w / 2, h / 2), -angle, 1.0)
# 计算图像的新边界维数
# 调整旋转矩阵以考虑平移
# 执行实际的旋转并返回图像
return cv2.warpAffine(image, M, (image.shape[1], image.shape[0])) # borderValue 缺省,默认是黑色
# return M
# 根据不同语义物体得到不同参数
def get_parameter(item, opt):
'''
. 得到物体参数
. @param item 物体的强度值
. @param opt 物体参数
'''
if (item == 115):
min_samples = opt.min_samples_115
max_eps = opt.max_eps_115
shape = opt.shape_115 # 0是圆形1是矩形
size = opt.size_115 # 0是按轮廓1是直接画正矩形
color = [232, 221, 203]
filter_point_num = 0
elif (item == 116):
min_samples = opt.min_samples_116
max_eps = opt.max_eps_116
shape = opt.shape_116 # 0是圆形1是矩形
size = opt.size_116 # 0是按轮廓1是直接画正矩形
color = [205, 179, 128]
filter_point_num = 0
elif (item == 117):
min_samples = opt.min_samples_117
max_eps = opt.max_eps_117
shape = opt.shape_117 # 0是圆形1是矩形
size = opt.size_117 # 0是按轮廓1是直接画正矩形
color = [3, 101, 100]
filter_point_num = 0
elif (item == 118):
min_samples = opt.min_samples_118
max_eps = opt.max_eps_118
shape = opt.shape_118 # 0是圆形1是矩形
size = opt.size_118 # 0是按轮廓1是直接画正矩形
color = [3, 54, 73]
filter_point_num = 0
elif (item == 119):
min_samples = opt.min_samples_119
max_eps = opt.max_eps_119
shape = opt.shape_119 # 0是圆形1是矩形
size = opt.size_119 # 0是按轮廓1是直接画正矩形
color = [3, 22, 52]
filter_point_num = 0
elif (item == 120):
min_samples = opt.min_samples_120
max_eps = opt.max_eps_120
shape = opt.shape_120 # 0是圆形1是矩形
size = opt.size_120 # 0是按轮廓1是直接画正矩形
color = [237, 22, 139]
filter_point_num = 0
elif (item == 121):
min_samples = opt.min_samples_121
max_eps = opt.max_eps_121
shape = opt.shape_121 # 0是圆形1是矩形
size = opt.size_121 # 0是按轮廓1是直接画正矩形
color = [251, 178, 23]
filter_point_num = 0
elif (item == 150):
min_samples = opt.min_samples_150
max_eps = opt.max_eps_150
shape = opt.shape_150 # 0是圆形1是矩形
size = opt.size_150 # 0是按轮廓1是直接画正矩形
color = [96, 143, 159]
filter_point_num = 0
elif (item == 151):
min_samples = opt.min_samples_151
max_eps = opt.max_eps_151
shape = opt.shape_151 # 0是圆形1是矩形
size = opt.size_151 # 0是按轮廓1是直接画正矩形
color = [1, 77, 103]
filter_point_num = 0
elif (item == 152):
min_samples = opt.min_samples_152
max_eps = opt.max_eps_152
shape = opt.shape_152 # 0是圆形1是矩形
size = opt.size_152 # 0是按轮廓1是直接画正矩形
color = [254, 67, 101]
filter_point_num = 0
elif (item == 153):
min_samples = opt.min_samples_153
max_eps = opt.max_eps_153
shape = opt.shape_153 # 0是圆形1是矩形
size = opt.size_153 # 0是按轮廓1是直接画正矩形
color = [252, 157, 154]
filter_point_num = 0
elif (item == 154):
min_samples = opt.min_samples_154
max_eps = opt.max_eps_154
shape = opt.shape_154 # 0是圆形1是矩形
size = opt.size_154 # 0是按轮廓1是直接画正矩形
color = [249, 205, 173]
filter_point_num = 0
elif (item == 155):
min_samples = opt.min_samples_155
max_eps = opt.max_eps_155
shape = opt.shape_155 # 0是圆形1是矩形
size = opt.size_155 # 0是按轮廓1是直接画正矩形
color = [200, 200, 169]
filter_point_num = 0
elif (item == 156):
min_samples = opt.min_samples_156
max_eps = opt.max_eps_156
shape = opt.shape_156 # 0是圆形1是矩形
size = opt.size_156 # 0是按轮廓1是直接画正矩形
color = [131, 175, 155]
filter_point_num = 0
elif (item == 200):
min_samples = opt.min_samples_200
max_eps = opt.max_eps_200
shape = opt.shape_200 # 0是圆形1是矩形
size = opt.size_200 # 0是按轮廓1是直接画正矩形
color = [229, 187, 129]
filter_point_num = 0
elif (item == 201):
min_samples = opt.min_samples_201
max_eps = opt.max_eps_201
shape = opt.shape_201 # 0是圆形1是矩形
size = opt.size_201 # 0是按轮廓1是直接画正矩形
color = [161, 23, 21]
filter_point_num = 0
elif (item == 202):
min_samples = opt.min_samples_202
max_eps = opt.max_eps_202
shape = opt.shape_202 # 0是圆形1是矩形
size = opt.size_202 # 0是按轮廓1是直接画正矩形
color = [118, 77, 57]
filter_point_num = 0
elif (item == 203):
min_samples = opt.min_samples_203
max_eps = opt.max_eps_203
shape = opt.shape_203 # 0是圆形1是矩形
size = opt.size_203 # 0是按轮廓1是直接画正矩形
color = [17, 63, 61]
filter_point_num = 0
elif (item == 204):
min_samples = opt.min_samples_204
max_eps = opt.max_eps_204
shape = opt.shape_204 # 0是圆形1是矩形
size = opt.size_204 # 0是按轮廓1是直接画正矩形
color = [60, 79, 57]
filter_point_num = 0
elif (item == 205):
min_samples = opt.min_samples_205
max_eps = opt.max_eps_205
shape = opt.shape_205 # 0是圆形1是矩形
size = opt.size_205 # 0是按轮廓1是直接画正矩形
color = [95, 92, 51]
filter_point_num = 0
elif (item == 209):
min_samples = opt.min_samples_209
max_eps = opt.max_eps_209
shape = opt.shape_209 # 0是圆形1是矩形
size = opt.size_209 # 0是按轮廓1是直接画正矩形
color = [56, 13, 49]
filter_point_num = 0
return min_samples, max_eps, shape, size, color, filter_point_num
def filter_point(X, filter_point_num):
obj_collect = np.zeros((map.shape[0], map.shape[1]), np.uint8)
row = X[:, 0].astype(int)
col = X[:, 1].astype(int)
obj_collect[row, col] = 1
num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(obj_collect, connectivity=8)
if (item == 204):
for i in range(num_labels):
if (stats[i, 4] <= filter_point_num):
mask = labels == i
obj_collect[:, :][mask] = 0
y, x = np.where(obj_collect == 1) # y是行x是列
X = np.zeros((x.shape[0], 2))
X[:, 0] = y
X[:, 1] = x
return X
# 画物体轮廓
def draw_outline(X, item, min_samples, max_eps, shape, color, size, filter_point_num, output):
'''
. 得到物体参数
. @param X 语义物体的行列坐标
. @param item 语义物体的强度值
. @param min_samples 聚类的最小样本数
. @param max_eps 聚类的最大半径
. @param shape 语义物体绘制形状参数( 0代表绘制圆形1代表绘制矩形
. @param color 语义物体的色彩
. @param size 语义物体绘制轮廓的参数0代表按轮廓绘制1代表绘制正矩形
. @param output 已经绘制过某个物体形状的 map图片
'''
# 聚类OPTICS算法
# X = X.astype(np.uint8)
yhat = OPTICS(min_samples=min_samples, max_eps=max_eps, cluster_method='dbscan').fit_predict(X)
# 检索唯一群集
clusters = np.unique(yhat)
'''
画散点图
为每个群集的样本创建散点图
'''
# for cluster in clusters: # 获取此群集的示例的行索引
# row_ix = where(yhat == cluster) # 创建这些样本的散布
# pyplot.scatter(X[row_ix, 0], X[row_ix, 1]) # 绘制散点图
# pyplot.show()
'''
找最小外接矩形 (绘制形状)
'''
color = color[::-1]
for cluster in clusters:
n = 0
# 获取此群集的示例的行索引将每个物体从map中提取出来
if (cluster == -1):
n = 1
continue
row_ix = where(yhat == cluster)
if row_ix[0].shape[0] <= 1:
continue
obj = np.zeros((map.shape[0], map.shape[1]), np.uint8)
row = X[row_ix, 0].astype(int)
col = X[row_ix, 1].astype(int)
obj[row, col] = 1
if (item == 118 or item == 119 or item == 153 or item == 202):
# if (item == 899):
skeleton0 = morphology.skeletonize(obj)
skeleton = skeleton0.astype(np.uint8) * 255
temps = copy.copy(skeleton)
temp_outline = np.zeros((output.shape[0], output.shape[1], 3), np.uint8)
for i in range(60, 0,-1):
lines = cv2.HoughLines(skeleton, 1, np.pi / 180, i)
if (lines is not None):
if lines.shape[0] >0:
break
if (lines is None):
n = 1
continue
alfa = 0
for line in lines:
rho = line[0][0]
theta = line[0][1]
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
theta = np.mod(theta, 90 / 180 * np.pi)
if (theta > 45 / 180 * np.pi):
theta = abs(theta - 90 / 180 * np.pi)
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * (a))
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * (a))
# cv2.line(temps, (x1, y1), (x2, y2), 255, 2)
alfa = theta + alfa
alfa = alfa / lines.shape[0]
skeleton = rotate_bound(skeleton, np.mod(alfa, 90 / 180 * np.pi) / np.pi * 180) # 摆正图像 角度为正值的时候,是顺时针旋转
skeleton_ = copy.copy(skeleton)
# skeleton_[skeleton_ != 0] = 9
skeleton_ = (skeleton_ / 255 * 9).astype(np.uint8)
# output = rotate_bound(output, -np.mod(alfa, 90 / 360 * np.pi) / np.pi * 180)
img_blur = cv2.blur(copy.copy(skeleton_), (3, 3))
# img_blur = img_blur * 9
img_blur[img_blur != 2] = 0
img_blur[img_blur == 2] = 255
# img_blur[img_blur == 2] = 255
# img_blur[img_blur != 255] = 0
img_blur[skeleton == 0] = 0
corners = np.where(img_blur != 0)
corners = np.array(corners)
#
corners = cv2.goodFeaturesToTrack(skeleton, 50, 0.1, 10)
corners = np.int0(corners)
temp_x = []
temp_y = []
for i in corners:
x, y = i.ravel() # x是行y是列
temp_x.append(x)
temp_y.append(y)
temp_X = np.array(temp_x)
temp_Y = np.array(temp_y)
a = np.min(temp_X)
b = np.min(temp_Y)
c = np.max(temp_X)
d = np.max(temp_Y)
cv2.rectangle(temp_outline, (a, b), (c, d), color, -1)
temp_outline = rotate_bound(temp_outline, -np.mod(alfa, 90 / 180 * np.pi) / np.pi * 180)
temp = cv2.cvtColor(temp_outline, cv2.COLOR_BGR2GRAY) # 方便用mask
temp[temp > 0] = 255
output[temp_outline > 0] = 0
mask = 255 == temp
output[:, :, 0][mask] = color[0]
output[:, :, 1][mask] = color[1]
output[:, :, 2][mask] = color[2]
continue
if (shape == 1):
# 计算外接矩形
box = outer_rectangle(obj, size)
# color = color[::-1]
# 画图
if (size == 0):
# 外接矩形缩放
# vertices = shrink_rectangle(box)
vertices = box
output = cv2.drawContours(output, [vertices], 0, color, -1, lineType=cv2.LINE_4)
elif (size == 1):
# 外接矩形缩放
# vertices = shrink_rectangle(box)
vertices = box
cv2.rectangle(output, (vertices[0, 0], vertices[0, 1]), (vertices[2, 0], vertices[2, 1]), color, -1)
elif (shape == 0):
# 计算最小拟合圆
circle_x = col
circle_y = row
center, radius = fit_circle(circle_x, circle_y)
# binary, contours, hierarchy = cv2.findContours(obj, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# center, radius = outer_circle(obj)
cv2.circle(output, center, radius, color, -1)
# else:
# ellipse = outer_ellipse(obj)
# if (len(ellipse) != 0):
# cv2.ellipse(output, ellipse, color, -1)
return output, n
# 计算最小外接矩形
def outer_rectangle(obj, size):
'''
. 计算最小外接矩形
. @param obj 语义物体的位置信息
. @param size 语义物体绘制轮廓的参数0代表按轮廓绘制1代表绘制正矩形
'''
binary, contours, hierarchy = cv2.findContours(obj, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contour = []
for cont in contours:
contour.extend(cont)
if (size == 0):
min_rect = cv2.minAreaRect(np.array(contour))
box = cv2.boxPoints(min_rect)
# box = np.round(box)
box = np.int0(box)
# 排序
temp = np.where(box == np.min(box[:, 0])) # box的第0列是列第1列是行
if temp[0].shape[0] > 1:
left = np.min(box[:, 0])
right = np.max(box[:, 0])
up = np.min(box[:, 1])
down = np.max(box[:, 1])
top_point_x = left
top_point_y = up
right_point_x = right
right_point_y = up
bottom_point_x = right
bottom_point_y = down
left_point_x = left
left_point_y = down
vertices = np.array(
[[top_point_x, top_point_y], [right_point_x, right_point_y], [bottom_point_x, bottom_point_y],
[left_point_x, left_point_y]])
return vertices
left_point_x = np.min(box[:, 0])
right_point_x = np.max(box[:, 0])
top_point_y = np.min(box[:, 1])
bottom_point_y = np.max(box[:, 1])
left_point_y = box[:, 1][np.where(box[:, 0] == left_point_x)][0]
right_point_y = box[:, 1][np.where(box[:, 0] == right_point_x)][0]
top_point_x = box[:, 0][np.where(box[:, 1] == top_point_y)][0]
bottom_point_x = box[:, 0][np.where(box[:, 1] == bottom_point_y)][0]
vertices = np.array(
[[top_point_x, top_point_y], [right_point_x, right_point_y], [bottom_point_x, bottom_point_y],
[left_point_x, left_point_y]])
return vertices
elif (size == 1):
x, y, w, h = cv2.boundingRect(np.array(contour))
vertices = np.array([[x, y], [x + w, y], [x + w, y + h],
[x, y + h]])
return vertices
# 计算最小拟合椭圆
def outer_ellipse(obj):
'''
. 计算最小拟合椭圆
. @param obj 语义物体的位置信息
'''
binary, contours, hierarchy = cv2.findContours(obj, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contour = []
for cont in contours:
contour.extend(cont)
if (len(contour) < 6):
ellipse = ()
else:
ellipse = cv2.fitEllipse(np.array(contour))
return ellipse
# 计算最小外接圆
def outer_circle(obj):
'''
. 计算最小外接圆
. @param obj 语义物体的位置信息
'''
binary, contours, hierarchy = cv2.findContours(obj, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contour = []
for cont in contours:
contour.extend(cont)
(x, y), radius = cv2.minEnclosingCircle(np.array(contour))
center = (int(round(x)), int(round(y)))
radius = int(round(radius))
return center, radius
# 计算最小拟合圆
def calc_R(xc, yc, x, y):
return np.sqrt((x - xc) ** 2 + (y - yc) ** 2)
def f_2(c, x, y):
Ri = calc_R(*c, x, y)
return Ri - Ri.mean()
def fit_circle(x, y):
'''
. 计算最小外接圆
. @param x 语义物体的所在列
. @param x 语义物体的所在行
'''
x_m = mean(x)
y_m = mean(y)
x = x.ravel()
y = y.ravel()
center_estimate = x_m, y_m
center_2, _ = optimize.leastsq(f_2, center_estimate, args=(x, y))
xc_2, yc_2 = center_2
Ri_2 = calc_R(xc_2, yc_2, x, y)
# 拟合圆的半径
R_2 = Ri_2.mean()
center = (int(round(xc_2)), int(round(yc_2)))
radius = int(round(R_2))
return center, radius
# 外接矩形内缩
def shrink_rectangle(box):
'''
. 外接矩形内缩
. @param box 语义物体的轮廓信息
'''
temp = np.where(box == np.min(box[:, 0])) # box的第0列是列第1列是行
if temp[0].shape[0] > 1:
left = np.min(box[:, 0])
right = np.max(box[:, 0])
up = np.min(box[:, 1])
down = np.max(box[:, 1])
top_point_x = left + 1
top_point_y = up + 1
right_point_x = right - 1
right_point_y = up + 1
bottom_point_x = right - 1
bottom_point_y = down - 1
left_point_x = left + 1
left_point_y = down - 1
vertices = np.array(
[[top_point_x, top_point_y], [right_point_x, right_point_y], [bottom_point_x, bottom_point_y],
[left_point_x, left_point_y]])
return vertices
left_point_x = np.min(box[:, 0])
right_point_x = np.max(box[:, 0])
top_point_y = np.min(box[:, 1])
bottom_point_y = np.max(box[:, 1])
left_point_y = box[:, 1][np.where(box[:, 0] == left_point_x)][0]
right_point_y = box[:, 1][np.where(box[:, 0] == right_point_x)][0]
top_point_x = box[:, 0][np.where(box[:, 1] == top_point_y)][0]
bottom_point_x = box[:, 0][np.where(box[:, 1] == bottom_point_y)][0]
vertices = np.array(
[[top_point_x, top_point_y], [right_point_x, right_point_y], [bottom_point_x, bottom_point_y],
[left_point_x, left_point_y]])
if top_point_x > bottom_point_x:
vertices[0, 0] = vertices[0, 0] - 1
vertices[0, 1] = vertices[0, 1] + 1
vertices[2, 0] = vertices[2, 0] + 1
vertices[2, 1] = vertices[2, 1] - 1
if top_point_x < bottom_point_x:
vertices[0, 0] = vertices[0, 0] + 1
vertices[0, 1] = vertices[0, 1] + 1
vertices[2, 0] = vertices[2, 0] - 1
vertices[2, 1] = vertices[2, 1] - 1
if right_point_y > left_point_y:
vertices[1, 0] = vertices[1, 0] - 1
vertices[1, 1] = vertices[1, 1] - 1
vertices[3, 0] = vertices[3, 0] + 1
vertices[3, 1] = vertices[3, 1] + 1
if right_point_y < left_point_y:
vertices[1, 0] = vertices[1, 0] - 1
vertices[1, 1] = vertices[1, 1] + 1
vertices[3, 0] = vertices[3, 0] + 1
vertices[3, 1] = vertices[3, 1] - 1
return vertices
# 添加图例
def draw_legend(img, item, color, k):
obj_name = {'115': 'Dog basin', '116': 'Bar chair base', '117': 'Fan base', '118': 'Washing machine',
'119': 'Refrigerator', '120': 'Toilet', '121': 'Weighing scale', '150': 'Wire', '151': 'TV',
'152': 'Desk',
'153': 'Carpet', '154': 'Rag', '155': 'Tea table', '156': 'TV cabinet', '200': 'Slippers',
'201': 'Sock', '202': 'Wardrobe', '203': 'Bed', '204': 'Sofa', '205': 'Chair', '209': 'Pet feces'}
color = color[::-1]
text = obj_name[str(item)]
font = cv2.FONT_HERSHEY_TRIPLEX
# cv2.rectangle(img, (output.shape[1] - 140, 10 + k * 17), (output.shape[1] - 120, 20 + k * 17), color, -1)
# cv2.putText(img, text, (output.shape[1] - 100, 20 + k * 17), font, 0.4, color, 1, cv2.LINE_AA)
cv2.rectangle(img, (10, 10 + k * 17), (30, 20 + k * 17), color, -1)
cv2.putText(img, text, (40, 20 + k * 17), font, 0.5, color, 1, cv2.LINE_8)
return img
# 语义物体的强度值
# obj_value = [152, 121, 200, 115, 205, 117, 203]
obj_value = [115, 116, 117, 118, 119, 120, 121, 150, 151, 152, 153,
154, 155, 156, 200, 201, 202, 203, 204, 205, 209]
# obj_value = [153]
obj_legend = []
# output是最后画上物体形状的总图
# output = Semantic_map
# output = np.zeros((map.shape[0], map.shape[1], 3), np.uint8)
# output[:, :, 0] = map
# output[:, :, 1] = map
# output[:, :, 2] = map
k = 0
output = np.zeros((map.shape[0], map.shape[1], 3), np.uint8)
legend_color = np.zeros((map.shape[0], map.shape[1], 3), np.uint8)
# output[:, :, 0] = map
# output[:, :, 1] = map
# output[:, :, 2] = map
# 对每个物体绘制形状
for item in iter(obj_value):
if ((map == item).any()):
# min_samples, max_eps, shape, size, color = get_parameter(item)
min_samples, max_eps, shape, size, color, filter_point_num = get_parameter(item, opt)
# num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(map, connectivity=8)
y, x = np.where(map == item) # y是行x是列
X = np.zeros((x.shape[0], 2))
X[:, 0] = y
X[:, 1] = x
X = filter_point(X, filter_point_num)
if ((X.shape[0] == 0) or (min_samples > X.shape[0])):
continue
else:
n = 0
output, n = draw_outline(X, item, min_samples, max_eps, shape, color, size, filter_point_num, output)
if n != 1:
if item not in obj_legend:
obj_legend.append(item)
legend_color = draw_legend(legend_color, item, color, k)
k = k + 1
output = output + legend_color
# legend_color[legend_color == 0] = 255
# img1 = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY) # 方便统计尺寸
return output
# 场景识别
def get_labels(img, semantic, opt, img_, map_ori):
def get_wall_area(map, num_labels, labels, stats, area):
'''
. map 获取墙壁的图像
. stats 连通域信息
. area 面积
'''
temp = []
for i in range(1, num_labels):
if stats[i, 4] > area: # 第i个区域的面积大于阈值则为墙壁
temp.append(i)
wall_area = np.zeros((map.shape[0], map.shape[1]), np.uint8)
for i in range(0, len(temp)):
mask = labels == temp[i]
wall_area[:, :][mask] = 255
return wall_area
# def draw_legend(img,scene,color,k):
# obj_name = {'0':'Dog basin','1':'Fan base','2':'Weighing scale','3':'Desk','4':'Slippers'}
# color = color[::-1]
# text = obj_name[str(scene)]
# font = cv2.FONT_HERSHEY_DUPLEX
# cv2.rectangle(img, (output.shape[1] - 140, 50+k*17), (output.shape[1] - 120, 60+k*17), color, -1)
# cv2.putText(img, text, (output.shape[1] - 100, 60+k*17), font, 0.4, color, 1, cv2.LINE_AA)
# return img
def Scene(obj_label):
'''
:param obj_label:
:return: index "bedroom","livingroom","bathroom","kitchen" ,"unknow"
'''
Intensity_value = [115, 116, 117, 118, 119, 120, 121, 150, 151, 152, 153,
154, 155, 156, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209]
p_sce = np.ones((1, 5)) * 0.25
p_sce[0][4] = 0
p_obj_sce = np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0.01, 0.04, 0.8, 0],
[0.02, 0.2, 0, 0.8],
[0, 0, 0.99, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0.4, 0.4, 0, 0],
[0.2, 0.4, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0.1, 0.4, 0, 0],
[0.4, 0.6, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0.8, 0, 0, 0],
[0.99, 0, 0, 0],
[0.05, 0.5, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]
])
p_temp = 1.0
p_obj = 0.3 # 目标检测出的置信度
pre_scene = np.zeros((1, 5))
if (len(obj_label) == 0):
pre_scene[0][4] = 1
else:
for i in range(4):
for j in range(len(obj_label)):
k = obj_label[j]
sum_temp = np.sum(p_obj_sce[k])
if (sum_temp == 0):
pre_scene[0][4] = 1
continue
m = p_obj_sce[k, i]
p_temp = (1.0 - (m / sum_temp * p_obj)) * p_temp
pre_scene[0][i] = 1 - p_temp
p_temp = 1.0
if np.max(pre_scene[0, 0:4]) > 0.1:
pre_scene[0][4] = 0
sce_index = np.argmax(pre_scene)
return sce_index
def connect(pic, opt):
hahaha = copy.copy(pic)
# hahaha[hahaha == 0] = 254
# hahaha[hahaha == 255] = 0
hahaha[hahaha == 255] = 1
skeleton0 = morphology.skeletonize(hahaha)
skeleton_ = skeleton0.astype(np.uint8) * 9
skeleton = skeleton0.astype(np.uint8) * 255
img_blur = cv2.blur(copy.copy(skeleton_), (3, 3))
# img_blur = img_blur * 9
img_blur[img_blur != 2] = 0
img_blur[img_blur == 2] = 255
img_blur[skeleton_ == 0] = 0
corners = np.where(img_blur != 0)
corners = np.array(corners)
# corners = cv2.goodFeaturesToTrack(skeleton, 200, 0.01, 3)
# corners = np.int0(corners)
# skeleton = skeleton / 255
# kernel = np.ones((1, 9))
temp_x = []
temp_y = []
for i in range(corners.shape[1]):
x = corners[0, i]
y = corners[1, i]
# cov_corner = np.array([[skeleton[y - 1, x - 1], skeleton[y, x - 1], skeleton[y + 1, x - 1]],
# [skeleton[y - 1, x], skeleton[y, x], skeleton[y + 1, x]],
# [skeleton[y - 1, x + 1], skeleton[y, x + 1], skeleton[y + 1, x + 1]]])
# cov_corner[cov_corner > 0] = 1
# cov_corner = cov_corner.reshape(9, 1)
# a3 = np.dot(kernel, cov_corner)
# if a3 == 2:
temp_x.append(x)
temp_y.append(y)
cv2.circle(skeleton, (y, x), 3, 255, -1)
temp_X = np.array(temp_x)
temp_Y = np.array(temp_y)
temp = np.vstack((temp_X, temp_Y)).transpose()
distance = 10000 * np.ones((temp.shape[0], temp.shape[0]))
for i in range(temp.shape[0]):
for j in range(i, temp.shape[0]):
distance[i, j] = np.sqrt((temp[i][0] - temp[j][0]) ** 2 + (temp[i][1] - temp[j][1]) ** 2)
distance[distance == 0] = 10000
if distance[i, j] < opt.distance:
# z = np.argmin(distance[i, :])
cv2.line(pic, (temp[i, 1], temp[i, 0]), (temp[j, 1], temp[j, 0]), 255, 3)
return pic
# 计算最小外接矩形
def outer_rectangle(obj):
'''
. 计算最小外接矩形
. @param obj 语义物体的位置信息
. @param size 语义物体绘制轮廓的参数0代表按轮廓绘制1代表绘制正矩形
'''
binary, contours, hierarchy = cv2.findContours(obj, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contour = []
for cont in contours:
contour.extend(cont)
min_rect = cv2.minAreaRect(np.array(contour))
box = cv2.boxPoints(min_rect)
# box = np.round(box)
box = np.int0(box)
# 排序
temp = np.where(box == np.min(box[:, 0])) # box的第0列是列第1列是行
if temp[0].shape[0] > 1:
left = np.min(box[:, 0])
right = np.max(box[:, 0])
up = np.min(box[:, 1])
down = np.max(box[:, 1])
top_point_x = left
top_point_y = up
right_point_x = right
right_point_y = up
bottom_point_x = right
bottom_point_y = down
left_point_x = left
left_point_y = down
vertices = np.array(
[[top_point_x, top_point_y], [right_point_x, right_point_y], [bottom_point_x, bottom_point_y],
[left_point_x, left_point_y]])
return vertices
left_point_x = np.min(box[:, 0])
right_point_x = np.max(box[:, 0])
top_point_y = np.min(box[:, 1])
bottom_point_y = np.max(box[:, 1])
left_point_y = box[:, 1][np.where(box[:, 0] == left_point_x)][0]
right_point_y = box[:, 1][np.where(box[:, 0] == right_point_x)][0]
top_point_x = box[:, 0][np.where(box[:, 1] == top_point_y)][0]
bottom_point_x = box[:, 0][np.where(box[:, 1] == bottom_point_y)][0]
vertices = np.array(
[[top_point_x, top_point_y], [right_point_x, right_point_y], [bottom_point_x, bottom_point_y],
[left_point_x, left_point_y]])
return vertices
img1 = cv2.cvtColor(semantic, cv2.COLOR_BGR2GRAY) # 方便统计尺寸
img1[img1 > 0] = 1 # 提取语义
img2 = img * img1 # 提取语义
Intensity_value = [115, 116, 117, 118, 119, 120, 121, 150, 151, 152, 153,
154, 155, 156, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209]
'只保留墙壁空间'
temp = copy.copy(img)
temp[temp == 0] = 254
temp[temp == 255] = 0
temp[temp == 254] = 255
temp[temp < 254] = 0
img3 = copy.copy(temp) # 提取非语义非墙壁区域
img3[img3 > 0] = 1
img3[img1 > 0] = 0
Semantic_map = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)
temp = temp.astype(np.uint8)
num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(temp, connectivity=8)
wall_area = get_wall_area(img, num_labels, labels, stats, opt.length)
img_[img_ == 0] = 254
img_[img_ == 255] = 0
img_[img_ == 254] = 255
num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(img_, connectivity=8)
wall_area_ = get_wall_area(img_, num_labels, labels, stats, opt.length)
"pic 为分割区域的结果作为一张区域分割后的地图将分割结果传给后续场景识别场景识别结果绘制在图Semantic_map"
pic = copy.copy(wall_area_)
pic = connect(pic, opt)
img3[wall_area > 0] = 0
img3 = img3 * 255
img3[img3 == 0] = 254
img3[img3 == 255] = 0
img3[img3 == 254] = 255
wall_area[wall_area == 0] = 254
wall_area[wall_area == 255] = 0
wall_area[wall_area == 254] = 255
pic[pic == 0] = 254
pic[pic == 255] = 0
pic[pic == 254] = 255
wall_area_color = np.zeros((wall_area.shape[0], wall_area.shape[1], 3), np.uint8)
wall_area_color[:, :, 0] = wall_area
wall_area_color[:, :, 1] = wall_area
wall_area_color[:, :, 2] = wall_area
'提取房间'
room_num_labels, room_labels, room_stats, room_centroids = cv2.connectedComponentsWithStats(pic,
connectivity=8)
k = 0
scene_legend = []
for i in range(2, room_num_labels):
if room_stats[i, 4] < 200:
continue
room = np.zeros((wall_area.shape[0], wall_area.shape[1]))
obj_label = []
mask = room_labels == i
room[:, :][mask] = 1
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
room = cv2.dilate(room, kernel, iterations=1)
room_temp = copy.copy(room)
room_temp = room_temp.astype(np.uint8)
room_box = outer_rectangle(room_temp)
room_outline_RGB = np.zeros((room_temp.shape[0], room_temp.shape[1], 3), np.uint8)
room_outline_RGB = cv2.drawContours(room_outline_RGB, [room_box], 0, (0, 255, 0), -1, lineType=cv2.LINE_4)
room_outline = cv2.cvtColor(room_outline_RGB, cv2.COLOR_BGR2GRAY)
room_outline[room_outline > 0] = 1
room = room_outline
# Semantic_area = room_outline * img2
Semantic_area = room * img2
Semantic_area = list(Semantic_area.flatten())
Semantic_area = list(set(Semantic_area))
Semantic_area.sort()
for i in range(len(Semantic_area)):
if Semantic_area[i] in Intensity_value:
obj_label.append(Intensity_value.index(Semantic_area[i]))
# obj_label_test = [] # 用于测试
# cv2.imshow("room",room*255)
# cv2.waitKey(0)
scene = Scene(obj_label)
if scene == 0:
Semantic_map[:, :, 0][room > 0] = 70
Semantic_map[:, :, 1][room > 0] = 80
Semantic_map[:, :, 2][room > 0] = 90
color = [70, 80, 90]
if scene == 1:
Semantic_map[:, :, 0][room > 0] = 100
Semantic_map[:, :, 1][room > 0] = 180
Semantic_map[:, :, 2][room > 0] = 120
color = [100, 180, 120]
if scene == 2:
Semantic_map[:, :, 0][room > 0] = 210
Semantic_map[:, :, 1][room > 0] = 67
Semantic_map[:, :, 2][room > 0] = 170
color = [210, 67, 170]
if scene == 3:
Semantic_map[:, :, 0][room > 0] = 150
Semantic_map[:, :, 1][room > 0] = 48
Semantic_map[:, :, 2][room > 0] = 88
color = [150, 48, 88]
# if scene == 4:
# Semantic_map[:, :, 0][mask] = 134
# Semantic_map[:, :, 1][mask] = 231
# Semantic_map[:, :, 2][mask] = 143
# color = [134, 231, 143]
# if scene not in scene_legend:
# scene_legend.append(scene)
# Semantic_map = draw_legend(Semantic_map, scene, color, k)
# k = k + 1
# img3[img3 > 0] = 255
Semantic_map = Semantic_map + wall_area_color
Semantic_map[img1 > 0] = 0
# semantic[img1 < 1] = 0
Semantic_map = Semantic_map + semantic
Semantic_map[img3 == 0] = 0
p = cv2.cvtColor(copy.copy(Semantic_map), cv2.COLOR_BGR2GRAY) # 方便统计尺寸
map_ori_RGB = np.zeros((map_ori.shape[0], map_ori.shape[1], 3), np.uint8)
map_ori_RGB[:, :, 0][map_ori > 0] = 255
map_ori_RGB[:, :, 1][map_ori > 0] = 255
map_ori_RGB[:, :, 2][map_ori > 0] = 255
map_ori[p > 0] = 0
map_ori_RGB = map_ori_RGB + Semantic_map
map_ori_RGB[img1 > 0] = 0
map_ori_RGB = map_ori_RGB + semantic
map_ori_RGB[img3 == 0] = 0
return map_ori_RGB
if __name__ == "__main__":
img = cv2.imread("path/map/map6.png", 0)
cv2.flip(img, 0, img) # 图片翻转
map, map_, map_ori = process_map(img, opt) # map:仅把门连起来; map_:把门连起来,成片语义点当成墙; map_ori: 无处理原图
# map_test = cv2.imread("mappp.jpg", 0)
output = obj_outline(map_ori, opt, map_ori) # 参考map画map_ori上
Semantic_map = get_labels(map, output, opt, map_, map_ori)
cv2.imwrite("/path/result/Semantic_map.png",Semantic_map)
cv2.imshow("Semantic_map",Semantic_map)
cv2.waitKey(0)