二维坐标修改bug

This commit is contained in:
龙澳 2024-12-31 21:37:44 +08:00
parent 466bc06b47
commit b3d7c37399
7 changed files with 105 additions and 163 deletions

View File

@ -197,7 +197,7 @@ class ImagePreprocessor:
return self.gps_points return self.gps_points
def divide_grids(self) -> Dict[int, pd.DataFrame]: def divide_grids(self) -> Dict[tuple, pd.DataFrame]:
"""划分网格""" """划分网格"""
self.logger.info(f"开始划分网格 (重叠率: {self.config.grid_overlap})") self.logger.info(f"开始划分网格 (重叠率: {self.config.grid_overlap})")
grid_divider = GridDivider( grid_divider = GridDivider(
@ -208,50 +208,29 @@ class ImagePreprocessor:
self.gps_points, grid_size=self.config.grid_size self.gps_points, grid_size=self.config.grid_size
) )
grid_points = grid_divider.assign_to_grids(self.gps_points, grids) grid_points = grid_divider.assign_to_grids(self.gps_points, grids)
# 将grid_divider添加到grid_points中 # -1是因为包含了grid_divider
grid_points['grid_divider'] = grid_divider self.logger.info(f"成功划分为 {len(grid_points)} 个网格")
self.logger.info(f"成功划分为 {len(grid_points)-1} 个网格") # -1是因为包含了grid_divider
# 生成image_groups.txt文件
try:
groups_file = os.path.join(self.config.output_dir, "image_groups.txt")
self.logger.info(f"开始生成分组文件: {groups_file}")
with open(groups_file, 'w') as f:
for grid_idx, points_lt in grid_points.items():
# 使用ASCII字母作为组标识A, B, C...
group_letter = chr(65 + grid_idx) # 65是ASCII中'A'的编码
# 为每个网格中的图像写入分组信息
for point in points_lt:
f.write(f"{point['file']} {group_letter}\n")
self.logger.info(f"分组文件生成成功: {groups_file}")
except Exception as e:
self.logger.error(f"生成分组文件时发生错误: {str(e)}", exc_info=True)
raise
return grid_points return grid_points
def copy_images(self, grid_points: Dict[int, pd.DataFrame]): def copy_images(self, grid_points: Dict[tuple, pd.DataFrame]):
"""复制图像到目标文件夹""" """复制图像到目标文件夹"""
self.logger.info("开始复制图像文件") self.logger.info("开始复制图像文件")
self.logger.info("开始复制图像文件")
for grid_idx, points in grid_points.items(): for grid_id, points in grid_points.items():
output_dir = os.path.join( output_dir = os.path.join(
self.config.output_dir, f"grid_{grid_idx + 1}", "project", "images" self.config.output_dir, f"grid_{grid_id[0]}_{grid_id[1]}", "project", "images"
) )
os.makedirs(output_dir, exist_ok=True) os.makedirs(output_dir, exist_ok=True)
for point in tqdm(points, desc=f"复制网格 {grid_idx + 1} 的图像"): for point in tqdm(points, desc=f"复制网格 {grid_id} 的图像"):
src = os.path.join(self.config.image_dir, point["file"]) src = os.path.join(self.config.image_dir, point["file"])
dst = os.path.join(output_dir, point["file"]) dst = os.path.join(output_dir, point["file"])
shutil.copy(src, dst) shutil.copy(src, dst)
self.logger.info(f"网格 {grid_idx + 1} 包含 {len(points)} 张图像") self.logger.info(f"网格 {grid_id} 包含 {len(points)} 张图像")
def merge_tif(self, grid_points: Dict[int, pd.DataFrame]): def merge_tif(self, grid_points: Dict[int, pd.DataFrame]):
"""合并所有网格的影像产品""" """合并所有网格的影像产品"""
@ -269,7 +248,7 @@ class ImagePreprocessor:
"""合并所有网格的PLY点云""" """合并所有网格的PLY点云"""
self.logger.info("开始合并PLY点云") self.logger.info("开始合并PLY点云")
merger = MergePly(self.config.output_dir) merger = MergePly(self.config.output_dir)
merger.merge_grid_ply(grid_points) merger.merge_grid_laz(grid_points)
def process(self): def process(self):
"""执行完整的预处理流程""" """执行完整的预处理流程"""
@ -283,9 +262,9 @@ class ImagePreprocessor:
self.logger.info("预处理任务完成") self.logger.info("预处理任务完成")
self.odm_monitor.process_all_grids(grid_points) self.odm_monitor.process_all_grids(grid_points)
self.merge_tif(grid_points) # self.merge_tif(grid_points)
self.merge_obj(grid_points) # self.merge_obj(grid_points)
self.merge_ply(grid_points) # self.merge_ply(grid_points)
except Exception as e: except Exception as e:
self.logger.error(f"处理过程中发生错误: {str(e)}", exc_info=True) self.logger.error(f"处理过程中发生错误: {str(e)}", exc_info=True)
raise raise

View File

@ -197,7 +197,7 @@ class ImagePreprocessor:
return self.gps_points return self.gps_points
def divide_grids(self) -> Dict[int, pd.DataFrame]: def divide_grids(self) -> Dict[tuple, pd.DataFrame]:
"""划分网格""" """划分网格"""
self.logger.info(f"开始划分网格 (重叠率: {self.config.grid_overlap})") self.logger.info(f"开始划分网格 (重叠率: {self.config.grid_overlap})")
grid_divider = GridDivider( grid_divider = GridDivider(
@ -209,63 +209,43 @@ class ImagePreprocessor:
) )
grid_points = grid_divider.assign_to_grids(self.gps_points, grids) grid_points = grid_divider.assign_to_grids(self.gps_points, grids)
self.logger.info(f"成功划分为 {len(grid_points)} 个网格") self.logger.info(f"成功划分为 {len(grid_points)} 个网格")
# 生成image_groups.txt文件
try:
groups_file = os.path.join(self.config.output_dir, "image_groups.txt")
self.logger.info(f"开始生成分组文件: {groups_file}")
with open(groups_file, 'w') as f:
for grid_idx, points_lt in grid_points.items():
# 使用ASCII字母作为组标识A, B, C...
group_letter = chr(65 + grid_idx) # 65是ASCII中'A'的编码
# 为每个网格中的图像写入分组信息
for point in points_lt:
f.write(f"{point['file']} {group_letter}\n")
self.logger.info(f"分组文件生成成功: {groups_file}")
except Exception as e:
self.logger.error(f"生成分组文件时发生错误: {str(e)}", exc_info=True)
raise
return grid_points return grid_points
def copy_images(self, grid_points: Dict[int, pd.DataFrame]): def copy_images(self, grid_points: Dict[tuple, pd.DataFrame]):
"""复制图像到目标文件夹""" """复制图像到目标文件夹"""
self.logger.info("开始复制图像文件") self.logger.info("开始复制图像文件")
self.logger.info("开始复制图像文件")
for grid_idx, points in grid_points.items(): for grid_id, points in grid_points.items():
output_dir = os.path.join( output_dir = os.path.join(
self.config.output_dir, f"grid_{grid_idx + 1}", "project", "images" self.config.output_dir, f"grid_{grid_id[0]}_{grid_id[1]}", "project", "images"
) )
os.makedirs(output_dir, exist_ok=True) os.makedirs(output_dir, exist_ok=True)
for point in tqdm(points, desc=f"复制网格 {grid_idx + 1} 的图像"): for point in tqdm(points, desc=f"复制网格 ({grid_id[0]},{grid_id[1]}) 的图像"):
src = os.path.join(self.config.image_dir, point["file"]) src = os.path.join(self.config.image_dir, point["file"])
dst = os.path.join(output_dir, point["file"]) dst = os.path.join(output_dir, point["file"])
shutil.copy(src, dst) shutil.copy(src, dst)
self.logger.info(f"网格 {grid_idx + 1} 包含 {len(points)} 张图像") self.logger.info(f"网格 ({grid_id[0]},{grid_id[1]}) 包含 {len(points)} 张图像")
def merge_tif(self, grid_points: Dict[int, pd.DataFrame]): def merge_tif(self, grid_points: Dict[tuple, pd.DataFrame]):
"""合并所有网格的影像产品""" """合并所有网格的影像产品"""
self.logger.info("开始合并所有影像产品") self.logger.info("开始合并所有影像产品")
merger = MergeTif(self.config.output_dir) merger = MergeTif(self.config.output_dir)
merger.merge_all_tifs(grid_points) merger.merge_all_tifs(grid_points)
def merge_obj(self, grid_points: Dict[int, pd.DataFrame]): def merge_obj(self, grid_points: Dict[tuple, pd.DataFrame]):
"""合并所有网格的OBJ模型""" """合并所有网格的OBJ模型"""
self.logger.info("开始合并OBJ模型") self.logger.info("开始合并OBJ模型")
merger = MergeObj(self.config.output_dir) merger = MergeObj(self.config.output_dir)
merger.merge_grid_obj(grid_points) merger.merge_grid_obj(grid_points, self.config.grid_size)
def merge_ply(self, grid_points: Dict[int, pd.DataFrame]): def merge_ply(self, grid_points: Dict[tuple, pd.DataFrame]):
"""合并所有网格的PLY点云""" """合并所有网格的PLY点云"""
self.logger.info("开始合并PLY点云") self.logger.info("开始合并PLY点云")
merger = MergePly(self.config.output_dir) merger = MergePly(self.config.output_dir)
merger.merge_grid_ply(grid_points) merger.merge_grid_laz(grid_points)
def process(self): def process(self):
"""执行完整的预处理流程""" """执行完整的预处理流程"""
@ -280,7 +260,7 @@ class ImagePreprocessor:
# self.odm_monitor.process_all_grids(grid_points) # self.odm_monitor.process_all_grids(grid_points)
# self.merge_tif(grid_points) # self.merge_tif(grid_points)
self.merge_ply(grid_points) # self.merge_ply(grid_points)
self.merge_obj(grid_points) self.merge_obj(grid_points)
except Exception as e: except Exception as e:
self.logger.error(f"处理过程中发生错误: {str(e)}", exc_info=True) self.logger.error(f"处理过程中发生错误: {str(e)}", exc_info=True)

View File

@ -11,7 +11,7 @@ class MergePly:
self.output_dir = output_dir self.output_dir = output_dir
self.logger = logging.getLogger('UAV_Preprocess.MergePly') self.logger = logging.getLogger('UAV_Preprocess.MergePly')
def merge_grid_laz(self, grid_points: Dict[int, list]): def merge_grid_laz(self, grid_points: Dict[tuple, list]):
"""合并所有网格的点云""" """合并所有网格的点云"""
self.logger.info("开始合并所有网格的laz点云") self.logger.info("开始合并所有网格的laz点云")
@ -21,26 +21,26 @@ class MergePly:
try: try:
laz_lt = [] laz_lt = []
for grid_idx, points in grid_points.items(): for grid_id, points in grid_points.items():
grid_laz = os.path.join( grid_laz = os.path.join(
self.output_dir, self.output_dir,
f"grid_{grid_idx + 1}", f"grid_{grid_id[0]}_{grid_id[1]}",
"project", "project",
"odm_georeferencing", "odm_georeferencing",
"odm_georeferenced_model.laz" "odm_georeferenced_model.laz"
) )
if not os.path.exists(grid_laz): if not os.path.exists(grid_laz):
self.logger.warning(f"参考网格的laz文件不存在: {grid_laz}") self.logger.warning(f"网格 ({grid_id[0]},{grid_id[1]}) 的laz文件不存在: {grid_laz}")
continue continue
laz_lt.append(grid_laz) laz_lt.append(grid_laz)
kwargs = { kwargs = {
'all_inputs': " ".join(laz_lt), 'all_inputs': " ".join(laz_lt),
'output': os.path.join(self.output_dir, 'merged_pointcloud.laz') 'output': os.path.join(self.output_dir, 'merged_pointcloud.laz')
} }
subprocess.run('D:\\software\\LAStools\\bin\\lasmerge.exe -i {all_inputs} -o "{output}"'.format(**kwargs)) subprocess.run('D:\\software\\LAStools\\bin\\lasmerge64.exe -i {all_inputs} -o "{output}"'.format(**kwargs))
except Exception as e: except Exception as e:
self.logger.error(f"PLY点云合并过程中发生错误: {str(e)}", exc_info=True) self.logger.error(f"PLY点云合并过程中发生错误: {str(e)}", exc_info=True)
@ -56,10 +56,10 @@ if __name__ == "__main__":
# 构造测试用的grid_points字典 # 构造测试用的grid_points字典
grid_points = { grid_points = {
0: [], # 不再需要GPS点信息 (0, 0): [], # 不再需要GPS点信息
1: [] (0, 1): []
} }
# 创建MergePly实例并执行合并 # 创建MergePly实例并执行合并
merge_ply = MergePly(output_dir) merge_ply = MergePly(output_dir)
merge_ply.merge_grid_ply(grid_points) merge_ply.merge_grid_laz(grid_points)

View File

@ -69,30 +69,22 @@ class MergeObj:
self.logger.error(f"合并OBJ模型时发生错误: {str(e)}", exc_info=True) self.logger.error(f"合并OBJ模型时发生错误: {str(e)}", exc_info=True)
raise raise
def calculate_translation(self, grid_idx: int, grid_points: Dict[int, pd.DataFrame], grid_size: float) -> tuple: def calculate_translation(self, grid_id: tuple, grid_size: float) -> tuple:
"""根据网格索引和大小计算平移量""" """根据网格坐标和大小计算平移量"""
# 从grid_points中获取网格划分器 # 直接使用网格的二维坐标计算平移量
grid_divider = grid_points.get('grid_divider', None) col, row = grid_id # grid_id是(width_idx, height_idx)格式
if grid_divider is None:
# 如果没有grid_divider使用默认的计算方式
row = grid_idx // 2
col = grid_idx % 2
else:
# 使用grid_divider获取正确的网格坐标
row, col = grid_divider.get_grid_coordinates(grid_idx)
# 计算平移量,考虑到重叠 # 计算平移量,考虑到重叠
overlap_factor = 0.9 # 重叠因子与grid_divider中的overlap对应 x_translation = col * grid_size
x_translation = col * grid_size * overlap_factor y_translation = row * grid_size
y_translation = row * grid_size * overlap_factor
self.logger.info( self.logger.info(
f"网格 {grid_idx} 的位置: 行={row}, 列={col}" f"网格 ({col},{row}) 的平移量: x={x_translation}, y={y_translation}"
) )
return (x_translation, y_translation, 0) # z轴不需要平移 return (x_translation, y_translation, 0) # z轴不需要平移
def merge_grid_obj(self, grid_points: Dict[int, pd.DataFrame], grid_size: float = 500): def merge_grid_obj(self, grid_points: Dict[tuple, pd.DataFrame], grid_size: float = 500):
"""合并所有网格的OBJ模型""" """合并所有网格的OBJ模型"""
self.logger.info("开始合并所有网格的OBJ模型") self.logger.info("开始合并所有网格的OBJ模型")
@ -104,20 +96,17 @@ class MergeObj:
merge_count = 0 merge_count = 0
try: try:
for grid_idx, points in grid_points.items(): for grid_id, points in grid_points.items():
if grid_idx == 'grid_divider': # 跳过grid_divider对象
continue
grid_obj = os.path.join( grid_obj = os.path.join(
self.output_dir, self.output_dir,
f"grid_{grid_idx + 1}", f"grid_{grid_id[0]}_{grid_id[1]}",
"project", "project",
"odm_texturing", "odm_texturing",
"odm_textured_model_geo.obj" "odm_textured_model_geo.obj"
) )
if not os.path.exists(grid_obj): if not os.path.exists(grid_obj):
self.logger.warning(f"网格 {grid_idx + 1} 的OBJ文件不存在: {grid_obj}") self.logger.warning(f"网格 ({grid_id[0]},{grid_id[1]}) 的OBJ文件不存在: {grid_obj}")
continue continue
if input_obj1 is None: if input_obj1 is None:
@ -128,7 +117,7 @@ class MergeObj:
output_obj = os.path.join(self.output_dir, f"merged_model_{merge_count}.obj") output_obj = os.path.join(self.output_dir, f"merged_model_{merge_count}.obj")
# 计算当前网格的平移量 # 计算当前网格的平移量
translation = self.calculate_translation(grid_idx, grid_points, grid_size) translation = self.calculate_translation(grid_id, grid_size)
self.logger.info( self.logger.info(
f"开始合并第 {merge_count + 1} 次:\n" f"开始合并第 {merge_count + 1} 次:\n"
@ -169,14 +158,13 @@ if __name__ == "__main__":
setup_logger(output_dir) setup_logger(output_dir)
# 构造测试用的grid_points字典 # 构造测试用的grid_points字典
# 假设我们有两个网格每个网格包含一些GPS点的DataFrame
grid_points = { grid_points = {
0: pd.DataFrame({ (0, 0): pd.DataFrame({
'latitude': [39.9, 39.91], 'latitude': [39.9, 39.91],
'longitude': [116.3, 116.31], 'longitude': [116.3, 116.31],
'altitude': [100, 101] 'altitude': [100, 101]
}), }),
1: pd.DataFrame({ (0, 1): pd.DataFrame({
'latitude': [39.92, 39.93], 'latitude': [39.92, 39.93],
'longitude': [116.32, 116.33], 'longitude': [116.32, 116.33],
'altitude': [102, 103] 'altitude': [102, 103]

View File

@ -73,7 +73,7 @@ class MergeTif:
self.logger.error(f"影像拼接过程中发生错误: {str(e)}", exc_info=True) self.logger.error(f"影像拼接过程中发生错误: {str(e)}", exc_info=True)
raise raise
def merge_grid_tif(self, grid_points: Dict[int, pd.DataFrame], product_info: dict): def merge_grid_tif(self, grid_points: Dict[tuple, pd.DataFrame], product_info: dict):
"""合并指定产品的所有网格""" """合并指定产品的所有网格"""
product_name = product_info['name'] product_name = product_info['name']
product_path = product_info['path'] product_path = product_info['path']
@ -89,10 +89,10 @@ class MergeTif:
merge_count = 0 merge_count = 0
try: try:
for grid_idx, points in grid_points.items(): for grid_id, points in grid_points.items():
grid_tif = os.path.join( grid_tif = os.path.join(
self.output_dir, self.output_dir,
f"grid_{grid_idx + 1}", f"grid_{grid_id[0]}_{grid_id[1]}",
"project", "project",
product_path, product_path,
filename filename
@ -100,7 +100,7 @@ class MergeTif:
if not os.path.exists(grid_tif): if not os.path.exists(grid_tif):
self.logger.warning( self.logger.warning(
f"网格 {grid_idx + 1}{product_name}不存在: {grid_tif}") f"网格 ({grid_id[0]},{grid_id[1]}){product_name}不存在: {grid_tif}")
continue continue
if input_tif1 is None: if input_tif1 is None:
@ -134,7 +134,7 @@ class MergeTif:
f"{product_name}合并过程中发生错误: {str(e)}", exc_info=True) f"{product_name}合并过程中发生错误: {str(e)}", exc_info=True)
raise raise
def merge_all_tifs(self, grid_points: Dict[int, pd.DataFrame]): def merge_all_tifs(self, grid_points: Dict[tuple, pd.DataFrame]):
"""合并所有产品正射影像、DSM和DTM""" """合并所有产品正射影像、DSM和DTM"""
try: try:
products = [ products = [
@ -181,12 +181,12 @@ if __name__ == "__main__":
# 构造测试用的grid_points字典 # 构造测试用的grid_points字典
# 假设我们有两个网格每个网格包含一些GPS点的DataFrame # 假设我们有两个网格每个网格包含一些GPS点的DataFrame
grid_points = { grid_points = {
0: pd.DataFrame({ (0, 0): pd.DataFrame({
'latitude': [39.9, 39.91], 'latitude': [39.9, 39.91],
'longitude': [116.3, 116.31], 'longitude': [116.3, 116.31],
'altitude': [100, 101] 'altitude': [100, 101]
}), }),
1: pd.DataFrame({ (0, 1): pd.DataFrame({
'latitude': [39.92, 39.93], 'latitude': [39.92, 39.93],
'longitude': [116.32, 116.33], 'longitude': [116.32, 116.33],
'altitude': [102, 103] 'altitude': [102, 103]

View File

@ -37,8 +37,6 @@ class GridDivider:
lon_step = (max_lon - min_lon) / self.num_grids_width lon_step = (max_lon - min_lon) / self.num_grids_width
grids = [] grids = []
grid_indices = {} # 存储网格的二维索引
grid_idx = 0
for i in range(self.num_grids_height): for i in range(self.num_grids_height):
for j in range(self.num_grids_width): for j in range(self.num_grids_width):
@ -47,61 +45,54 @@ class GridDivider:
grid_min_lon = min_lon + j * lon_step - self.overlap * lon_step grid_min_lon = min_lon + j * lon_step - self.overlap * lon_step
grid_max_lon = min_lon + (j + 1) * lon_step + self.overlap * lon_step grid_max_lon = min_lon + (j + 1) * lon_step + self.overlap * lon_step
grid_id = (j, i) # 使用(width_idx, height_idx)元组作为网格标识
grids.append((grid_min_lat, grid_max_lat, grid_min_lon, grid_max_lon)) grids.append((grid_min_lat, grid_max_lat, grid_min_lon, grid_max_lon))
grid_indices[grid_idx] = (i, j) # 存储每个网格的行列索引
self.logger.debug( self.logger.debug(
f"网格[{i},{j}] (索引{grid_idx}): 纬度[{grid_min_lat:.6f}, {grid_max_lat:.6f}], " f"网格[{j},{i}]: 纬度[{grid_min_lat:.6f}, {grid_max_lat:.6f}], "
f"经度[{grid_min_lon:.6f}, {grid_max_lon:.6f}]" f"经度[{grid_min_lon:.6f}, {grid_max_lon:.6f}]"
) )
grid_idx += 1
self.logger.info( self.logger.info(
f"成功划分为 {len(grids)} 个网格 ({self.num_grids_width}x{self.num_grids_height})") f"成功划分为 {len(grids)} 个网格 ({self.num_grids_width}x{self.num_grids_height})")
# 保存网格索引信息
self.grid_indices = grid_indices
# 添加可视化调用 # 添加可视化调用
self.visualize_grids(points_df, grids) self.visualize_grids(points_df, grids)
return grids return grids
def get_grid_coordinates(self, grid_idx):
"""获取网格的二维坐标"""
return self.grid_indices.get(grid_idx, (0, 0))
def get_grid_dimensions(self):
"""获取网格的维度"""
return self.num_grids_width, self.num_grids_height
def assign_to_grids(self, points_df, grids): def assign_to_grids(self, points_df, grids):
"""将点分配到对应网格""" """将点分配到对应网格"""
self.logger.info(f"开始将 {len(points_df)} 个点分配到网格中") self.logger.info(f"开始将 {len(points_df)} 个点分配到网格中")
grid_points = {i: [] for i in range(len(grids))} grid_points = {} # 使用字典存储每个网格的点
points_assigned = 0 points_assigned = 0
multiple_grid_points = 0 multiple_grid_points = 0
for i in range(self.num_grids_height):
for j in range(self.num_grids_width):
grid_points[(j, i)] = [] # 使用(width_idx, height_idx)元组
for _, point in points_df.iterrows(): for _, point in points_df.iterrows():
point_assigned = False point_assigned = False
for i, (min_lat, max_lat, min_lon, max_lon) in enumerate(grids): for i in range(self.num_grids_height):
if min_lat <= point['lat'] <= max_lat and min_lon <= point['lon'] <= max_lon: for j in range(self.num_grids_width):
grid_points[i].append(point.to_dict()) grid_idx = i * self.num_grids_width + j
if point_assigned: min_lat, max_lat, min_lon, max_lon = grids[grid_idx]
multiple_grid_points += 1
else: if min_lat <= point['lat'] <= max_lat and min_lon <= point['lon'] <= max_lon:
points_assigned += 1 grid_points[(j, i)].append(point.to_dict())
point_assigned = True if point_assigned:
multiple_grid_points += 1
self.logger.debug( else:
f"{point['file']} (纬度: {point['lat']:.6f}, 经度: {point['lon']:.6f}) " points_assigned += 1
f"被分配到网格" point_assigned = True
)
# 记录每个网格的点数 # 记录每个网格的点数
for grid_idx, points in grid_points.items(): for grid_id, points in grid_points.items():
self.logger.info(f"网格 {grid_idx} 包含 {len(points)} 个点") self.logger.info(f"网格 {grid_id} 包含 {len(points)} 个点")
self.logger.info( self.logger.info(
f"点分配完成: 总点数 {len(points_df)}, " f"点分配完成: 总点数 {len(points_df)}, "
@ -122,15 +113,19 @@ class GridDivider:
c='blue', s=10, alpha=0.6, label='GPS点') c='blue', s=10, alpha=0.6, label='GPS点')
# 绘制网格 # 绘制网格
for i, (min_lat, max_lat, min_lon, max_lon) in enumerate(grids): for i in range(self.num_grids_height):
plt.plot([min_lon, max_lon, max_lon, min_lon, min_lon], for j in range(self.num_grids_width):
[min_lat, min_lat, max_lat, max_lat, min_lat], grid_idx = i * self.num_grids_width + j
'r-', alpha=0.5) min_lat, max_lat, min_lon, max_lon = grids[grid_idx]
# 在网格中心添加网格编号
center_lon = (min_lon + max_lon) / 2 plt.plot([min_lon, max_lon, max_lon, min_lon, min_lon],
center_lat = (min_lat + max_lat) / 2 [min_lat, min_lat, max_lat, max_lat, min_lat],
plt.text(center_lon, center_lat, str(i), 'r-', alpha=0.5)
horizontalalignment='center', verticalalignment='center') # 在网格中心添加网格编号
center_lon = (min_lon + max_lon) / 2
center_lat = (min_lat + max_lat) / 2
plt.text(center_lon, center_lat, f"({j},{i})", # 显示(width_idx, height_idx)
horizontalalignment='center', verticalalignment='center')
plt.title('网格划分与GPS点分布图') plt.title('网格划分与GPS点分布图')
plt.xlabel('经度') plt.xlabel('经度')

View File

@ -20,9 +20,9 @@ class ODMProcessMonitor:
success_markers.append('odm_texturing') success_markers.append('odm_texturing')
return all(os.path.exists(os.path.join(grid_dir, 'project', marker)) for marker in success_markers) return all(os.path.exists(os.path.join(grid_dir, 'project', marker)) for marker in success_markers)
def run_odm_with_monitor(self, grid_dir: str, grid_idx: int, fast_mode: bool = True) -> Tuple[bool, str]: def run_odm_with_monitor(self, grid_dir: str, grid_id: tuple, fast_mode: bool = True) -> Tuple[bool, str]:
"""运行ODM命令""" """运行ODM命令"""
self.logger.info(f"开始处理网格 {grid_idx + 1}") self.logger.info(f"开始处理网格 ({grid_id[0]},{grid_id[1]})")
# 构建Docker命令 # 构建Docker命令
grid_dir = grid_dir[0].lower()+grid_dir[1:].replace('\\', '/') grid_dir = grid_dir[0].lower()+grid_dir[1:].replace('\\', '/')
@ -54,25 +54,25 @@ class ODMProcessMonitor:
self.logger.error(f"==========stderr==========: {stderr}") self.logger.error(f"==========stderr==========: {stderr}")
# 检查执行结果 # 检查执行结果
if self._check_success(grid_dir): if self._check_success(grid_dir):
self.logger.info(f"网格 {grid_idx + 1} 处理成功") self.logger.info(f"网格 ({grid_id[0]},{grid_id[1]}) 处理成功")
return True, "" return True, ""
else: else:
self.logger.error(f"网格 {grid_idx + 1} 处理失败") self.logger.error(f"网格 ({grid_id[0]},{grid_id[1]}) 处理失败")
return False, f"网格 {grid_idx + 1} 处理失败" return False, f"网格 ({grid_id[0]},{grid_id[1]}) 处理失败"
def process_all_grids(self, grid_points: Dict[int, pd.DataFrame]): def process_all_grids(self, grid_points: Dict[tuple, pd.DataFrame]):
"""处理所有网格""" """处理所有网格"""
self.logger.info("开始执行网格处理") self.logger.info("开始执行网格处理")
for grid_idx in grid_points.keys(): for grid_id in grid_points.keys():
grid_dir = os.path.join( grid_dir = os.path.join(
self.output_dir, f'grid_{grid_idx + 1}' self.output_dir, f'grid_{grid_id[0]}_{grid_id[1]}'
) )
success, error_msg = self.run_odm_with_monitor( success, error_msg = self.run_odm_with_monitor(
grid_dir=grid_dir, grid_dir=grid_dir,
grid_idx=grid_idx, grid_id=grid_id,
fast_mode=(self.mode == "快拼模式") fast_mode=(self.mode == "快拼模式")
) )
if not success: if not success:
raise Exception(f"网格 {grid_idx + 1} 处理失败: {error_msg}") raise Exception(f"网格 ({grid_id[0]},{grid_id[1]}) 处理失败: {error_msg}")