Compare commits

...

12 Commits

Author SHA1 Message Date
weixin_46229132
973d3835c8 修改main的引用odm_preprocess 2025-02-14 19:36:49 +08:00
weixin_46229132
3df39f76d2 修改metadata.xml,添加boundingbox 2025-02-14 17:31:39 +08:00
weixin_46229132
9b151d0768 调整odm参数 2025-02-13 09:49:33 +08:00
weixin_46229132
55808e434b 调整odm参数 2025-02-12 22:35:34 +08:00
weixin_46229132
0ebb362bb4 mian odm_preprocess_fast改回去 2025-02-11 16:08:57 +08:00
weixin_46229132
8999b49de5 更新merge_obj算法 2025-02-11 16:05:23 +08:00
weixin_46229132
817fadd8cd 修改单个grid处理obj的bug 2025-02-10 13:13:23 +08:00
weixin_46229132
34f19c430b 修改三维模式参数 2025-02-09 21:21:58 +08:00
weixin_46229132
954e87121f 先合并obj再转换osgb 2025-02-09 21:05:33 +08:00
weixin_46229132
ad6f4fb1ae 修改合并obj的算法 2025-02-09 19:23:12 +08:00
weixin_46229132
82fdcdca87 修bug 2025-02-09 15:42:31 +08:00
weixin_46229132
59d6ef44a8 删除进度条 2025-02-08 15:28:41 +08:00
6 changed files with 389 additions and 247 deletions

View File

@ -19,6 +19,7 @@ from utils.logger import setup_logger
from utils.visualizer import FilterVisualizer from utils.visualizer import FilterVisualizer
from post_pro.merge_tif import MergeTif from post_pro.merge_tif import MergeTif
from post_pro.merge_obj import MergeObj from post_pro.merge_obj import MergeObj
from post_pro.obj_post_pro import ObjPostProcessor
from post_pro.merge_laz import MergePly from post_pro.merge_laz import MergePly
@ -235,7 +236,7 @@ class ImagePreprocessor:
os.makedirs(output_dir, exist_ok=True) os.makedirs(output_dir, exist_ok=True)
for point in tqdm(points, desc=f"复制网格 ({grid_id[0]},{grid_id[1]}) 的图像"): for point in points:
src = os.path.join(self.config.image_dir, point["file"]) src = os.path.join(self.config.image_dir, point["file"])
dst = os.path.join(output_dir, point["file"]) dst = os.path.join(output_dir, point["file"])
shutil.copy(src, dst) shutil.copy(src, dst)
@ -255,10 +256,16 @@ class ImagePreprocessor:
merger.merge_grid_laz(grid_points) merger.merge_grid_laz(grid_points)
def merge_obj(self, grid_points: Dict[tuple, pd.DataFrame], translations: Dict[tuple, tuple]): def merge_obj(self, grid_points: Dict[tuple, pd.DataFrame], translations: Dict[tuple, tuple]):
"""合并所有网格的OBJ模型""" """合并所有网格的OBJ模型并转换为OSGB格式"""
self.logger.info("开始合并OBJ模型") self.logger.info("开始合并OBJ模型")
merger = MergeObj(self.config.output_dir) merger = MergeObj(self.config.output_dir)
merger.merge_grid_obj(grid_points, translations) center_lon, center_lat, bounding_box = merger.merge_grid_obj(grid_points)
# 转换为OSGB格式
self.logger.info("开始转换为OSGB格式")
processor = ObjPostProcessor(self.config.output_dir)
if not processor.convert_to_osgb(center_lon, center_lat, bounding_box):
self.logger.error("OSGB转换失败")
def post_process(self, successful_grid_points: Dict[tuple, pd.DataFrame], grid_points: Dict[tuple, pd.DataFrame], translations: Dict[tuple, tuple]): def post_process(self, successful_grid_points: Dict[tuple, pd.DataFrame], grid_points: Dict[tuple, pd.DataFrame], translations: Dict[tuple, tuple]):
"""后处理:合并或复制处理结果""" """后处理:合并或复制处理结果"""
@ -271,11 +278,11 @@ class ImagePreprocessor:
if self.config.mode == "快拼模式": if self.config.mode == "快拼模式":
self.merge_tif(successful_grid_points, self.config.produce_dem) self.merge_tif(successful_grid_points, self.config.produce_dem)
elif self.config.mode == "三维模式": elif self.config.mode == "三维模式":
self.merge_ply(successful_grid_points) # self.merge_ply(successful_grid_points)
self.merge_obj(successful_grid_points, translations) self.merge_obj(successful_grid_points, translations)
else: else:
self.merge_tif(successful_grid_points, self.config.produce_dem) self.merge_tif(successful_grid_points, self.config.produce_dem)
self.merge_ply(successful_grid_points) # self.merge_ply(successful_grid_points)
self.merge_obj(successful_grid_points, translations) self.merge_obj(successful_grid_points, translations)
def process(self): def process(self):
@ -284,7 +291,7 @@ class ImagePreprocessor:
self.extract_gps() self.extract_gps()
self.cluster() self.cluster()
self.filter_isolated_points() self.filter_isolated_points()
self.filter_time_group_overlap() # self.filter_time_group_overlap()
# self.filter_alternate_images() # self.filter_alternate_images()
grid_points, translations = self.divide_grids() grid_points, translations = self.divide_grids()
self.copy_images(grid_points) self.copy_images(grid_points)

View File

@ -19,6 +19,7 @@ from utils.logger import setup_logger
from utils.visualizer import FilterVisualizer from utils.visualizer import FilterVisualizer
from post_pro.merge_tif import MergeTif from post_pro.merge_tif import MergeTif
from post_pro.merge_obj import MergeObj from post_pro.merge_obj import MergeObj
from post_pro.obj_post_pro import ObjPostProcessor
from post_pro.merge_laz import MergePly from post_pro.merge_laz import MergePly
@ -187,6 +188,21 @@ class ImagePreprocessor:
self.visualizer.visualize_filter_step( self.visualizer.visualize_filter_step(
self.gps_points, previous_points, "3-Time Group Overlap") self.gps_points, previous_points, "3-Time Group Overlap")
def filter_alternate_images(self):
"""按时间顺序隔一个删一个图像来降低密度"""
previous_points = self.gps_points.copy()
# 按时间戳排序
self.gps_points = self.gps_points.sort_values('date')
# 保留索引为偶数的行(即隔一个保留一个)
self.gps_points = self.gps_points.iloc[::2].reset_index(drop=True)
self.visualizer.visualize_filter_step(
self.gps_points, previous_points, "4-Alternate Images")
self.logger.info(f"交替过滤后剩余 {len(self.gps_points)} 个点")
def divide_grids(self) -> Tuple[Dict[tuple, pd.DataFrame], Dict[tuple, tuple]]: def divide_grids(self) -> Tuple[Dict[tuple, pd.DataFrame], Dict[tuple, tuple]]:
"""划分网格 """划分网格
Returns: Returns:
@ -220,7 +236,7 @@ class ImagePreprocessor:
os.makedirs(output_dir, exist_ok=True) os.makedirs(output_dir, exist_ok=True)
for point in tqdm(points, desc=f"复制网格 ({grid_id[0]},{grid_id[1]}) 的图像"): for point in points:
src = os.path.join(self.config.image_dir, point["file"]) src = os.path.join(self.config.image_dir, point["file"])
dst = os.path.join(output_dir, point["file"]) dst = os.path.join(output_dir, point["file"])
shutil.copy(src, dst) shutil.copy(src, dst)
@ -240,10 +256,16 @@ class ImagePreprocessor:
merger.merge_grid_laz(grid_points) merger.merge_grid_laz(grid_points)
def merge_obj(self, grid_points: Dict[tuple, pd.DataFrame], translations: Dict[tuple, tuple]): def merge_obj(self, grid_points: Dict[tuple, pd.DataFrame], translations: Dict[tuple, tuple]):
"""合并所有网格的OBJ模型""" """合并所有网格的OBJ模型并转换为OSGB格式"""
self.logger.info("开始合并OBJ模型") self.logger.info("开始合并OBJ模型")
merger = MergeObj(self.config.output_dir) merger = MergeObj(self.config.output_dir)
merger.merge_grid_obj(grid_points, translations) center_lon, center_lat, bounding_box = merger.merge_grid_obj(grid_points)
# 转换为OSGB格式
self.logger.info("开始转换为OSGB格式")
processor = ObjPostProcessor(self.config.output_dir)
if not processor.convert_to_osgb(center_lon, center_lat, bounding_box):
self.logger.error("OSGB转换失败")
def post_process(self, successful_grid_points: Dict[tuple, pd.DataFrame], grid_points: Dict[tuple, pd.DataFrame], translations: Dict[tuple, tuple]): def post_process(self, successful_grid_points: Dict[tuple, pd.DataFrame], grid_points: Dict[tuple, pd.DataFrame], translations: Dict[tuple, tuple]):
"""后处理:合并或复制处理结果""" """后处理:合并或复制处理结果"""
@ -256,11 +278,11 @@ class ImagePreprocessor:
if self.config.mode == "快拼模式": if self.config.mode == "快拼模式":
self.merge_tif(successful_grid_points, self.config.produce_dem) self.merge_tif(successful_grid_points, self.config.produce_dem)
elif self.config.mode == "三维模式": elif self.config.mode == "三维模式":
self.merge_ply(successful_grid_points) # self.merge_ply(successful_grid_points)
self.merge_obj(successful_grid_points, translations) self.merge_obj(successful_grid_points, translations)
else: else:
self.merge_tif(successful_grid_points, self.config.produce_dem) self.merge_tif(successful_grid_points, self.config.produce_dem)
self.merge_ply(successful_grid_points) # self.merge_ply(successful_grid_points)
self.merge_obj(successful_grid_points, translations) self.merge_obj(successful_grid_points, translations)
def process(self): def process(self):
@ -269,7 +291,8 @@ class ImagePreprocessor:
self.extract_gps() self.extract_gps()
self.cluster() self.cluster()
self.filter_isolated_points() self.filter_isolated_points()
self.filter_time_group_overlap() # self.filter_time_group_overlap()
# self.filter_alternate_images()
grid_points, translations = self.divide_grids() grid_points, translations = self.divide_grids()
# self.copy_images(grid_points) # self.copy_images(grid_points)
self.logger.info("预处理任务完成") self.logger.info("预处理任务完成")

View File

@ -1,18 +1,27 @@
import os import os
import logging import logging
import numpy as np
from typing import Dict
import pandas as pd import pandas as pd
from typing import Dict, List, Tuple
import numpy as np
import shutil import shutil
import time import time
import cv2 import cv2
import subprocess import subprocess
from pyproj import Transformer
class MergeObj: class MergeObj:
def __init__(self, output_dir: str): def __init__(self, output_dir: str):
self.output_dir = output_dir self.output_dir = output_dir
self.logger = logging.getLogger('UAV_Preprocess.MergeObj') self.logger = logging.getLogger('UAV_Preprocess.MergeObj')
# 用于存储所有grid的UTM范围
self.min_east = float('inf')
self.min_north = float('inf')
self.max_east = float('-inf')
self.max_north = float('-inf')
# 初始化UTM到WGS84的转换器
self.transformer = Transformer.from_crs(
"EPSG:32649", "EPSG:4326", always_xy=True)
def read_obj(self, file_path): def read_obj(self, file_path):
"""读取.obj文件返回顶点、纹理坐标、法线、面的列表和MTL文件名""" """读取.obj文件返回顶点、纹理坐标、法线、面的列表和MTL文件名"""
@ -103,89 +112,204 @@ class MergeObj:
face_str += "/" face_str += "/"
file.write(face_str + "\n") file.write(face_str + "\n")
def translate_vertices(self, vertices, translation): def merge_grid_obj(self, grid_points: Dict[tuple, pd.DataFrame]) -> Tuple[float, float]:
"""平移顶点""" """合并所有网格的OBJ模型
return [[v[0] + translation[0], v[1] + translation[1], v[2] + translation[2]] for v in vertices] Args:
grid_points: 网格点数据字典
def merge_two_objs(self, obj1_path: str, obj2_path: str, output_path: str, translation, grid_id1: tuple, grid_id2: tuple): Returns:
"""合并两个OBJ文件""" Tuple[float, float]: (longitude, latitude)中心点经纬度坐标
"""
try: try:
self.logger.info(f"开始合并OBJ模型:\n输入1: {obj1_path}\n输入2: {obj2_path}") # 创建输出目录
output_model_dir = os.path.join(self.output_dir, "texturing")
os.makedirs(output_model_dir, exist_ok=True)
# 读取两个obj文件 # 初始化全局边界框坐标
vertices1, tex_coords1, normals1, faces1, face_materials1, mtl1 = self.read_obj( global_min_lon = float('inf')
obj1_path) global_min_lat = float('inf')
vertices2, tex_coords2, normals2, faces2, face_materials2, mtl2 = self.read_obj( global_max_lon = float('-inf')
obj2_path) global_max_lat = float('-inf')
# 读取MTL文件内容以获取正确的材质名称 # 第一次遍历获取所有grid的UTM范围
src_dir1 = os.path.dirname(obj1_path) for grid_id, points in grid_points.items():
src_dir2 = os.path.dirname(obj2_path) base_dir = os.path.join(
mtl1_path = os.path.join(src_dir1, mtl1) self.output_dir,
mtl2_path = os.path.join(src_dir2, mtl2) f"grid_{grid_id[0]}_{grid_id[1]}",
"project"
)
log_file = os.path.join(
base_dir, "odm_orthophoto", "odm_orthophoto_log.txt")
east_offset, north_offset = self.read_utm_offset(log_file)
# 读取并更新材质内容 # 更新UTM范围
materials1 = self.read_mtl(mtl1_path) self.min_east = min(self.min_east, east_offset)
materials2 = self.read_mtl(mtl2_path) self.min_north = min(self.min_north, north_offset)
self.max_east = max(self.max_east, east_offset)
self.max_north = max(self.max_north, north_offset)
# 创建材质名称映射使用与MTL文件相同的命名格式 # 收集所有grid的数据
material_map1 = {} all_vertices = [] # 所有顶点
material_map2 = {} all_tex_coords = [] # 所有纹理坐标
all_normals = [] # 所有法线
all_faces = [] # 所有面
all_face_materials = [] # 所有面的材质
all_materials = {} # 所有材质信息
grid_centers = [] # 所有grid的中心点
# 处理第一个模型的材质映射 # 处理每个grid
for old_name in materials1.keys(): for grid_id, points in grid_points.items():
if "grid_0_0" in obj1_path: base_dir = os.path.join(
material_map1[old_name] = f"material_{grid_id1[0]}_{grid_id1[1]}_{old_name}" self.output_dir,
else: f"grid_{grid_id[0]}_{grid_id[1]}",
# 更新完一次后,之后就不用再更新了 "project",
material_map1[old_name] = old_name "odm_texturing"
)
obj_path = os.path.join(base_dir, "odm_textured_model_geo.obj")
mtl_path = os.path.join(base_dir, "odm_textured_model_geo.mtl")
# 处理第二个模型的材质映射 if not os.path.exists(obj_path) or not os.path.exists(mtl_path):
for old_name in materials2.keys(): self.logger.warning(
material_map2[old_name] = f"material_{grid_id2[0]}_{grid_id2[1]}_{old_name}" f"网格 ({grid_id[0]},{grid_id[1]}) 的文件不存在")
continue
# 平移第二个模型的顶点 # 读取UTM偏移量并修改obj文件的顶点坐标
vertices2_translated = self.translate_vertices( log_file = os.path.join(
vertices2, translation) base_dir, "..", "odm_orthophoto", "odm_orthophoto_log.txt")
utm_offset = self.read_utm_offset(log_file)
modified_obj = self.modify_obj_coordinates(
obj_path, utm_offset)
# 计算偏移量 # 读取obj文件内容
v_offset = len(vertices1) vertices, tex_coords, normals, faces, face_materials, _ = self.read_obj(
vt_offset = len(tex_coords1) modified_obj)
vn_offset = len(normals1)
# 合并顶点、纹理坐标和法线 # 计算当前grid的中心点
all_vertices = vertices1 + vertices2_translated grid_center_lon, grid_center_lat, grid_bounding_box = self.get_center_coordinates(
all_tex_coords = tex_coords1 + tex_coords2 vertices)
all_normals = normals1 + normals2 grid_centers.append((grid_center_lon, grid_center_lat))
self.logger.info(
f"网格 ({grid_id[0]},{grid_id[1]}) 中心点经纬度: ({grid_center_lon}, {grid_center_lat})")
# 更新全局边界框坐标
global_min_lon = min(
global_min_lon, grid_bounding_box['LB_lon'])
global_min_lat = min(
global_min_lat, grid_bounding_box['LB_lat'])
global_max_lon = max(
global_max_lon, grid_bounding_box['RU_lon'])
global_max_lat = max(
global_max_lat, grid_bounding_box['RU_lat'])
# 调整第二个模型的面索引和材质名称 # 复制并重命名纹理文件
all_faces = faces1.copy() texture_map = self.copy_and_rename_texture(
all_face_materials = [] base_dir,
output_model_dir,
grid_id
)
# 更新第一个模型的材质名称 # 读取并更新材质内容
for material in face_materials1: materials = self.read_mtl(mtl_path)
all_face_materials.append(material_map1.get(material)) updated_materials = self.update_mtl_content(
materials,
texture_map,
grid_id
)
all_materials.update(updated_materials)
# 更新第二个模型的面索引和材质名称 # 计算顶点偏移量
for face, material in zip(faces2, face_materials2): v_offset = len(all_vertices)
new_face_v = [f + v_offset for f in face[0]] vt_offset = len(all_tex_coords)
new_face_vt = [ vn_offset = len(all_normals)
f + vt_offset for f in face[1]] if face[1] else []
new_face_vn = [
f + vn_offset for f in face[2]] if face[2] else []
all_faces.append((new_face_v, new_face_vt, new_face_vn))
all_face_materials.append(material_map2.get(material))
# 写入合并后的obj文件使用与MTL文件相同的名称 # 添加顶点、纹理坐标和法线
mtl_filename = "textured_model.mtl" # 使用固定的MTL文件名 all_vertices.extend(vertices)
self.write_obj(output_path, all_vertices, all_tex_coords, all_normals, all_tex_coords.extend(tex_coords)
all_faces, all_face_materials, mtl_filename) all_normals.extend(normals)
self.logger.info(f"模型合并成功,已保存至: {output_path}")
# 添加面和材质
for face, material in zip(faces, face_materials):
# 调整面的索引
new_face_v = [f + v_offset for f in face[0]]
new_face_vt = [
f + vt_offset for f in face[1]] if face[1] else []
new_face_vn = [
f + vn_offset for f in face[2]] if face[2] else []
all_faces.append((new_face_v, new_face_vt, new_face_vn))
# 添加材质前缀
if material:
all_face_materials.append(
f"material_{grid_id[0]}_{grid_id[1]}_{material}")
else:
all_face_materials.append(material)
if not all_vertices:
self.logger.error("没有找到有效的文件")
return
# 写入合并后的MTL文件
final_mtl = os.path.join(output_model_dir, "textured_model.mtl")
with open(final_mtl, 'w') as f:
for mat_name, content in all_materials.items():
f.write(f"newmtl {mat_name}\n")
for line in content:
f.write(f"{line}\n")
f.write("\n")
# 写入合并后的OBJ文件
final_obj = os.path.join(output_model_dir, "textured_model.obj")
self.write_obj(final_obj, all_vertices, all_tex_coords, all_normals,
all_faces, all_face_materials, "textured_model.mtl")
# 计算整体中心点
center_lon = sum(center[0]
for center in grid_centers) / len(grid_centers)
center_lat = sum(center[1]
for center in grid_centers) / len(grid_centers)
self.logger.info(f"模型整体中心点经纬度: ({center_lon}, {center_lat})")
# 计算整个区域的边界框
bounding_box = [global_min_lon, global_min_lat, global_max_lon, global_max_lat]
self.logger.info(
f"模型整体边界框: ({bounding_box[0]}, {bounding_box[1]}) - ({bounding_box[2]}, {bounding_box[3]})")
return center_lon, center_lat, bounding_box
except Exception as e: except Exception as e:
self.logger.error(f"合并OBJ模型时发生错误: {str(e)}", exc_info=True) self.logger.error(f"合并过程中发生错误: {str(e)}", exc_info=True)
raise raise
def get_center_coordinates(self, vertices: List[List[float]]) -> Tuple[float, float, Dict[str, float]]:
"""计算顶点的中心点UTM坐标并转换为WGS84经纬度。
注意顶点坐标是相对于整体最小UTM坐标的偏移值需要加回最小UTM坐标
Args:
vertices: 顶点列表每个顶点是[x, y, z]格式x和y是相对于最小UTM坐标的偏移
Returns:
Tuple[float, float, Dict[str, float]]: (longitude, latitude, bounding_box)
"""
# 计算相对坐标的边界框
x_coords = [v[0] for v in vertices]
y_coords = [v[1] for v in vertices]
# 计算中心点相对坐标
center_x_relative = (min(x_coords) + max(x_coords)) / 2
center_y_relative = (min(y_coords) + max(y_coords)) / 2
# 加回最小UTM坐标得到实际的UTM坐标
center_x_utm = center_x_relative + self.min_east
center_y_utm = center_y_relative + self.min_north
# 转换为WGS84经纬度
lon, lat = self.transformer.transform(center_x_utm, center_y_utm)
# 计算边界框并转换为经纬度
bounding_box = {
'LB_lon': self.transformer.transform(min(x_coords) + self.min_east, min(y_coords) + self.min_north)[0],
'LB_lat': self.transformer.transform(min(x_coords) + self.min_east, min(y_coords) + self.min_north)[1],
'RU_lon': self.transformer.transform(max(x_coords) + self.min_east, max(y_coords) + self.min_north)[0],
'RU_lat': self.transformer.transform(max(x_coords) + self.min_east, max(y_coords) + self.min_north)[1]
}
self.logger.info(f"模型UTM中心点: ({center_x_utm}, {center_y_utm})")
return lon, lat, bounding_box
def read_mtl(self, mtl_path: str) -> dict: def read_mtl(self, mtl_path: str) -> dict:
"""读取MTL文件内容 """读取MTL文件内容
Returns: Returns:
@ -291,153 +415,49 @@ class MergeObj:
return updated_materials return updated_materials
def merge_grid_obj(self, grid_points: Dict[tuple, pd.DataFrame], translations: Dict[tuple, tuple]): def read_utm_offset(self, log_file: str) -> tuple:
"""合并所有网格的OBJ模型""" """读取UTM偏移量"""
if len(grid_points) == 1: try:
grid_id = list(grid_points.keys())[0] east_offset = None
shutil.copytree(os.path.join(self.output_dir, north_offset = None
f"grid_{grid_id[0]}_{grid_id[1]}",
"project", with open(log_file, 'r') as f:
"odm_texturing"), lines = f.readlines()
os.path.join(self.output_dir, "texturing")) for i, line in enumerate(lines):
os.rename(os.path.join(self.output_dir, "texturing", "odm_textured_model_geo.obj"), if 'utm_north_offset' in line and i + 1 < len(lines):
os.path.join(self.output_dir, "texturing", "textured_model.obj")) north_offset = float(lines[i + 1].strip())
self.logger.info(f"开始执行格式转换") elif 'utm_east_offset' in line and i + 1 < len(lines):
docker_command = ( east_offset = float(lines[i + 1].strip())
f"docker run --rm -it "
f"-v {self.output_dir}/texturing:/data " if east_offset is None or north_offset is None:
f"-e LD_LIBRARY_PATH=/opt/osg/build/lib:$LD_LIBRARY_PATH " raise ValueError("未找到UTM偏移量")
f"osg-ubuntu2004 osgconv /data/textured_model.obj /data/textured_model.osgb"
) return east_offset, north_offset
self.logger.info(docker_command) except Exception as e:
subprocess.run( self.logger.error(f"读取UTM偏移量时发生错误: {str(e)}")
docker_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) raise
self.logger.info(f"格式转换完成")
return def modify_obj_coordinates(self, obj_file: str, utm_offset: tuple) -> str:
"""修改obj文件中的顶点坐标使用相对坐标系"""
east_offset, north_offset = utm_offset
output_obj = obj_file.replace('.obj', '_utm.obj')
try: try:
# 创建输出目录 with open(obj_file, 'r') as f_in, open(output_obj, 'w') as f_out:
output_model_dir = os.path.join(self.output_dir, "texturing") for line in f_in:
os.makedirs(output_model_dir, exist_ok=True) if line.startswith('v '):
# 处理顶点坐标行
# 获取所有有效的网格文件 parts = line.strip().split()
grid_files = {} # 使用相对于整体最小UTM坐标的偏移
for grid_id, points in grid_points.items(): x = float(parts[1]) + (east_offset - self.min_east)
base_dir = os.path.join( y = float(parts[2]) + (north_offset - self.min_north)
self.output_dir, z = float(parts[3])
f"grid_{grid_id[0]}_{grid_id[1]}", f_out.write(f'v {x:.6f} {y:.6f} {z:.6f}\n')
"project", else:
"odm_texturing" # 其他行直接写入
) f_out.write(line)
obj_path = os.path.join(base_dir, "odm_textured_model_geo.obj")
mtl_path = os.path.join(base_dir, "odm_textured_model_geo.mtl")
if not os.path.exists(obj_path) or not os.path.exists(mtl_path):
self.logger.warning(
f"网格 ({grid_id[0]},{grid_id[1]}) 的文件不存在")
continue
grid_files[grid_id] = {
'obj': obj_path,
'mtl': mtl_path,
'dir': base_dir
}
if not grid_files:
self.logger.error("没有找到有效的文件")
return
# 收集所有材质和纹理信息
all_materials = {}
for grid_id, files in grid_files.items():
# 复制并重命名纹理文件
texture_map = self.copy_and_rename_texture(
files['dir'],
output_model_dir,
grid_id
)
# 读取并更新MTL内容
materials = self.read_mtl(files['mtl'])
updated_materials = self.update_mtl_content(
materials,
texture_map,
grid_id
)
all_materials.update(updated_materials)
# 写入合并后的MTL文件
final_mtl = os.path.join(output_model_dir, "textured_model.mtl")
with open(final_mtl, 'w') as f:
for mat_name, content in all_materials.items():
f.write(f"newmtl {mat_name}\n")
for line in content:
f.write(f"{line}\n")
f.write("\n")
# 合并OBJ文件
reference_id = list(grid_files.keys())[0]
merged_obj = grid_files[reference_id]['obj']
temp_files = [] # 记录所有中间文件
for grid_id, files in list(grid_files.items())[1:]:
translation = translations[grid_id]
translation = (translation[0], translation[1], 0)
# 生成临时输出文件名
temp_output = os.path.join(
output_model_dir,
f"temp_merged_{int(time.time())}.obj"
)
temp_files.append(temp_output) # 添加到临时文件列表
self.merge_two_objs(
merged_obj, files['obj'], temp_output, translation, reference_id, grid_id)
merged_obj = temp_output
# 最终结果
final_obj = os.path.join(output_model_dir, "textured_model.obj")
try:
if os.path.exists(final_obj):
os.remove(final_obj)
os.rename(merged_obj, final_obj)
except Exception as e:
self.logger.warning(f"重命名最终文件失败: {str(e)}")
shutil.copy2(merged_obj, final_obj)
try:
os.remove(merged_obj)
except:
pass
# 清理所有临时文件
for temp_file in temp_files:
if os.path.exists(temp_file):
try:
os.remove(temp_file)
except Exception as e:
self.logger.warning(
f"删除临时文件失败: {temp_file}, 错误: {str(e)}")
self.logger.info(
f"模型合并完成,输出目录: {output_model_dir}\n"
f"- OBJ文件: textured_model.obj\n"
f"- MTL文件: textured_model.mtl\n"
f"- 纹理文件: {len(os.listdir(output_model_dir)) - 2}"
)
self.logger.info(f"开始执行格式转换")
docker_command = (
f"docker run --rm -it "
f"-v {self.output_model_dir}:/data "
f"-e LD_LIBRARY_PATH=/opt/osg/build/lib:$LD_LIBRARY_PATH "
f"osg-ubuntu2004 osgconv /data/textured_model.obj /data/textured_model.osgb"
)
self.logger.info(docker_command)
subprocess.run(
docker_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.logger.info(f"格式转换完成")
return output_obj
except Exception as e: except Exception as e:
self.logger.error(f"合并过程中发生错误: {str(e)}", exc_info=True) self.logger.error(f"修改obj坐标时发生错误: {str(e)}")
raise raise

102
post_pro/obj_post_pro.py Normal file
View File

@ -0,0 +1,102 @@
import os
import logging
import subprocess
from typing import Tuple, Dict
class ObjPostProcessor:
def __init__(self, output_dir: str):
self.output_dir = output_dir
self.logger = logging.getLogger('UAV_Preprocess.ObjPostProcessor')
def create_metadata_xml(self, osgb_dir: str, lon: float, lat: float, bounding_box):
"""创建metadata.xml文件包含地理参考信息
Args:
osgb_dir: osgb输出目录
lon: 中心点经度
lat: 中心点纬度
"""
try:
metadata_content = f'''<?xml version="1.0" encoding="utf-8"?>
<ModelMetadata version="1">
<!-- Spatial Reference System -->
<SRS>EPSG:4326</SRS>
<!-- Center point in Spatial Reference System (in Longitude, Latitude, Height) -->
<SRSOrigin>{lon},{lat},0.000000</SRSOrigin>
<!-- Bounding Box with Two Points -->
<BoundingBox>
<!-- Left-Bottom Corner (Longitude, Latitude) -->
<LB_lon>{bounding_box[0]}</LB_lon>
<LB_lat>{bounding_box[1]}</LB_lat>
<!-- Right-Top Corner (Longitude, Latitude) -->
<RU_lon>{bounding_box[2]}</RU_lon>
<RU_lat>{bounding_box[3]}</RU_lat>
</BoundingBox>
<Texture>
<ColorSource>Visible</ColorSource>
</Texture>
</ModelMetadata>
'''
# metadata.xml 放在根目录
metadata_path = os.path.join(osgb_dir, 'metadata.xml')
with open(metadata_path, 'w', encoding='utf-8') as f:
f.write(metadata_content)
self.logger.info(f"已创建metadata.xml: {metadata_path}")
except Exception as e:
self.logger.error(f"创建metadata.xml时发生错误: {str(e)}")
raise
def convert_to_osgb(self, center_lon, center_lat, bounding_box):
"""将obj转换为osgb并创建metadata.xml
Args:
center_coords: (longitude, latitude)中心点经纬度坐标
"""
try:
# 获取合并后的obj文件路径
obj_dir = os.path.join(self.output_dir, 'texturing')
obj_file = os.path.join(obj_dir, 'textured_model.obj')
if not os.path.exists(obj_file):
raise Exception(f"未找到obj文件: {obj_file}")
# 创建osgb目录结构
osgb_dir = os.path.join(self.output_dir, 'osgb')
osgb_data_dir = os.path.join(osgb_dir, 'Data', 'textured_model')
os.makedirs(osgb_data_dir, exist_ok=True)
# 输出文件路径
output_osgb = os.path.join(osgb_data_dir, 'textured_model.osgb')
# 构建osgconv命令
cmd = [
'osgconv',
'--compressed',
'--smooth',
'--fix-transparency',
'-o', '0,1,0-0,0,-1',
obj_file,
output_osgb
]
# 执行命令
self.logger.info(f"执行osgconv命令{' '.join(cmd)}")
result = subprocess.run(cmd, capture_output=True, text=True)
if result.returncode != 0:
raise Exception(f"osgb格式转换失败: {result.stderr}")
self.logger.info(f"转换完成: {output_osgb}")
# 创建metadata.xml
self.create_metadata_xml(osgb_dir, center_lon, center_lat, bounding_box)
return True
except Exception as e:
self.logger.error(f"转换osgb时发生错误: {str(e)}")
return False

View File

@ -1,12 +0,0 @@
import subprocess
def run_docker_command(command):
result = subprocess.run(command, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return result.stdout.decode('utf-8'), result.stderr.decode('utf-8')
if __name__ == "__main__":
command = "docker run -ti --rm -v g:/ODM_output/20241024100834/grid_1:/datasets opendronemap/odm --project-path /datasets project --max-concurrency 10 --force-gps --feature-quality lowest --orthophoto-resolution 10 --fast-orthophoto --skip-3dmodel --rerun-all"
stdout, stderr = run_docker_command(command)
print(stdout)

View File

@ -125,16 +125,14 @@ class ODMProcessMonitor:
def run_odm_with_monitor(self, grid_dir: str, grid_id: tuple, produce_dem: bool = False) -> Tuple[bool, str]: def run_odm_with_monitor(self, grid_dir: str, grid_id: tuple, produce_dem: bool = False) -> Tuple[bool, str]:
"""运行ODM命令""" """运行ODM命令"""
# if produce_dem and self.mode == "快拼模式":
# self.logger.error("快拼模式下无法生成DEM请调整生产参数")
# return False, "快拼模式下无法生成DEM请调整生产参数"
self.logger.info(f"开始处理网格 ({grid_id[0]},{grid_id[1]})") self.logger.info(f"开始处理网格 ({grid_id[0]},{grid_id[1]})")
success = False success = False
error_msg = "" error_msg = ""
max_retries = 3 max_retries = 3
current_try = 0 current_try = 0
use_lowest_quality = True # 初始使用lowest quality
# 根据模式设置是否使用lowest quality
use_lowest_quality = self.mode == "快拼模式"
while current_try < max_retries: while current_try < max_retries:
current_try += 1 current_try += 1
@ -151,6 +149,10 @@ class ODMProcessMonitor:
f"--project-path /datasets project " f"--project-path /datasets project "
f"--max-concurrency 15 " f"--max-concurrency 15 "
f"--force-gps " f"--force-gps "
f"--use-exif "
f"--use-hybrid-bundle-adjustment "
f"--optimize-disk-space "
# f"--feature-quality ultra "
) )
# 根据是否使用lowest quality添加参数 # 根据是否使用lowest quality添加参数
@ -170,10 +172,10 @@ class ODMProcessMonitor:
#f"--fast-orthophoto " #f"--fast-orthophoto "
f"--skip-3dmodel " f"--skip-3dmodel "
) )
elif self.mode == "三维模式": # elif self.mode == "三维模式":
docker_command += ( # docker_command += (
f"--skip-orthophoto " # f"--skip-orthophoto "
) # )
docker_command += "--rerun-all" docker_command += "--rerun-all"
self.logger.info(docker_command) self.logger.info(docker_command)
@ -223,7 +225,7 @@ class ODMProcessMonitor:
raise NotOverlapError raise NotOverlapError
except NotOverlapError: except NotOverlapError:
if use_lowest_quality: if use_lowest_quality and self.mode == "快拼模式":
self.logger.warning( self.logger.warning(
"检测到not overlap错误移除lowest quality参数后重试") "检测到not overlap错误移除lowest quality参数后重试")
use_lowest_quality = False use_lowest_quality = False
@ -231,7 +233,7 @@ class ODMProcessMonitor:
continue continue
else: else:
self.logger.error( self.logger.error(
"即使移除lowest quality参数后仍然出现错误") "出现错误,需要人工检查数据集")
error_msg = "图像重叠度不足,需要人工检查数据集的采样间隔情况" error_msg = "图像重叠度不足,需要人工检查数据集的采样间隔情况"
break break