diff --git a/app_plugin.py b/app_plugin.py index 9518b15..c7b91a0 100644 --- a/app_plugin.py +++ b/app_plugin.py @@ -1,12 +1,12 @@ import os import shutil -from datetime import timedelta from dataclasses import dataclass from typing import Dict, Tuple import psutil import pandas as pd from filter.cluster_filter import GPSCluster +from utils.directory_manager import DirectoryManager from utils.odm_monitor import ODMProcessMonitor from utils.gps_extractor import GPSExtractor from utils.grid_divider import GridDivider @@ -19,115 +19,38 @@ from post_pro.conv_obj import ConvertOBJ @dataclass class ProcessConfig: """预处理配置类""" - image_dir: str output_dir: str # 聚类过滤参数 cluster_eps: float = 0.01 cluster_min_samples: int = 5 - # 时间组重叠过滤参数 - time_group_overlap_threshold: float = 0.7 - time_group_interval: timedelta = timedelta(minutes=5) - # 孤立点过滤参数 - filter_distance_threshold: float = 0.001 # 经纬度距离 - filter_min_neighbors: int = 6 - # 密集点过滤参数 - filter_grid_size: float = 0.001 - filter_dense_distance_threshold: float = 10 # 普通距离,单位:米 - filter_time_threshold: timedelta = timedelta(minutes=5) + # 网格划分参数 grid_overlap: float = 0.05 grid_size: float = 500 - # 几个pipline过程是否开启 - mode: str = "快拼模式" - accuracy: str = "medium" - produce_dem: bool = False + + mode: str = "三维模式" class ODM_Plugin: def __init__(self, config): self.config = config - # 检查磁盘空间 - self._check_disk_space() - + # 初始化目录管理器 + self.dir_manager = DirectoryManager(config) # 清理并重建输出目录 - if os.path.exists(config.output_dir): - self._clean_output_dir() - self._setup_output_dirs() + self.dir_manager.clean_output_dir() + self.dir_manager.setup_output_dirs() + # 检查磁盘空间 + self.dir_manager.check_disk_space() # 初始化其他组件 self.logger = setup_logger(config.output_dir) - self.gps_points = None + self.gps_points = pd.DataFrame(columns=["file", "lat", "lon"]) self.odm_monitor = ODMProcessMonitor( config.output_dir, mode=config.mode) self.visualizer = FilterVisualizer(config.output_dir) - def _clean_output_dir(self): - """清理输出目录""" - try: - shutil.rmtree(self.config.output_dir) - print(f"已清理输出目录: {self.config.output_dir}") - except Exception as e: - print(f"清理输出目录时发生错误: {str(e)}") - raise - - def _setup_output_dirs(self): - """创建必要的输出目录结构""" - try: - # 创建主输出目录 - os.makedirs(self.config.output_dir) - - # 创建过滤图像保存目录 - os.makedirs(os.path.join(self.config.output_dir, 'filter_imgs')) - - # 创建日志目录 - os.makedirs(os.path.join(self.config.output_dir, 'logs')) - - print(f"已创建输出目录结构: {self.config.output_dir}") - except Exception as e: - print(f"创建输出目录时发生错误: {str(e)}") - raise - - def _get_directory_size(self, path): - """获取目录的总大小(字节)""" - total_size = 0 - for dirpath, dirnames, filenames in os.walk(path): - for filename in filenames: - file_path = os.path.join(dirpath, filename) - try: - total_size += os.path.getsize(file_path) - except (OSError, FileNotFoundError): - continue - return total_size - - def _check_disk_space(self): - """检查磁盘空间是否足够""" - # 获取输入目录大小 - input_size = self._get_directory_size(self.config.image_dir) - - # 获取输出目录所在磁盘的剩余空间 - output_drive = os.path.splitdrive( - os.path.abspath(self.config.output_dir))[0] - if not output_drive: # 处理Linux/Unix路径 - output_drive = '/home' - - disk_usage = psutil.disk_usage(output_drive) - free_space = disk_usage.free - - # 计算所需空间(输入大小的1.5倍) - required_space = input_size * 12 - - if free_space < required_space: - error_msg = ( - f"磁盘空间不足!\n" - f"输入目录大小: {input_size / (1024**3):.2f} GB\n" - f"所需空间: {required_space / (1024**3):.2f} GB\n" - f"可用空间: {free_space / (1024**3):.2f} GB\n" - f"在驱动器 {output_drive}" - ) - raise RuntimeError(error_msg) - def extract_gps(self) -> pd.DataFrame: """提取GPS数据""" self.logger.info("开始提取GPS数据") @@ -150,10 +73,9 @@ class ODM_Plugin: self.visualizer.visualize_filter_step( self.gps_points, previous_points, "1-Clustering") - def divide_grids(self) -> Tuple[Dict[tuple, pd.DataFrame], Dict[tuple, tuple]]: + def divide_grids(self) -> Dict[tuple, pd.DataFrame]: """划分网格 Returns: - tuple: (grid_points, translations) - grid_points: 网格点数据字典 - translations: 网格平移量字典 """ @@ -162,14 +84,14 @@ class ODM_Plugin: grid_size=self.config.grid_size, output_dir=self.config.output_dir ) - grids, translations, grid_points = grid_divider.adjust_grid_size_and_overlap( + grids, grid_points = grid_divider.adjust_grid_size_and_overlap( self.gps_points ) grid_divider.visualize_grids(self.gps_points, grids) if len(grids) >= 20: self.logger.warning("网格数量已超过20, 需要人工调整分区") - return grid_points, translations + return grid_points def copy_images(self, grid_points: Dict[tuple, pd.DataFrame]): """复制图像到目标文件夹""" @@ -192,46 +114,45 @@ class ODM_Plugin: self.logger.info( f"网格 ({grid_id[0]},{grid_id[1]}) 包含 {len(points)} 张图像") - def merge_tif(self, grid_points: Dict[tuple, pd.DataFrame], mode: str, produce_dem: bool): + def merge_tif(self, grid_lt): """合并所有网格的影像产品""" self.logger.info("开始合并所有影像产品") merger = MergeTif(self.config.output_dir) - merger.merge_orthophoto(grid_points) + merger.merge_orthophoto(grid_lt) - def convert_obj(self, grid_points: Dict[tuple, pd.DataFrame]): + def convert_obj(self, grid_lt): """转换OBJ模型""" self.logger.info("开始转换OBJ模型") converter = ConvertOBJ(self.config.output_dir) - converter.convert_grid_obj(grid_points) + converter.convert_grid_obj(grid_lt) - def post_process(self, successful_grid_points: Dict[tuple, pd.DataFrame], grid_points: Dict[tuple, pd.DataFrame], translations: Dict[tuple, tuple]): + def post_process(self, successful_grid_lt: list, grid_points: Dict[tuple, pd.DataFrame]): """后处理:合并或复制处理结果""" - if len(successful_grid_points) < len(grid_points): + if len(successful_grid_lt) < len(grid_points): self.logger.warning( - f"有 {len(grid_points) - len(successful_grid_points)} 个网格处理失败," - f"将只合并成功处理的 {len(successful_grid_points)} 个网格" + f"有 {len(grid_points) - len(successful_grid_lt)} 个网格处理失败," + f"将只合并成功处理的 {len(successful_grid_lt)} 个网格" ) - self.merge_tif(successful_grid_points, self.config.mode, - self.config.produce_dem) + self.merge_tif(successful_grid_lt) if self.config.mode == "三维模式": - - self.convert_obj(successful_grid_points) + self.convert_obj(successful_grid_lt) + else: + pass def process(self): """执行完整的预处理流程""" try: self.extract_gps() self.cluster() - grid_points, translations = self.divide_grids() + grid_points = self.divide_grids() self.copy_images(grid_points) self.logger.info("预处理任务完成") - successful_grid_points = self.odm_monitor.process_all_grids( - grid_points, self.config.produce_dem, self.config.accuracy) + successful_grid_lt = self.odm_monitor.process_all_grids( + grid_points) - self.post_process(successful_grid_points, - grid_points, translations) + self.post_process(successful_grid_lt, grid_points) self.logger.info("重建任务完成") except Exception as e: diff --git a/main.py b/main.py index b29b856..a2cea32 100644 --- a/main.py +++ b/main.py @@ -1,5 +1,4 @@ import argparse -from datetime import timedelta from app_plugin import ProcessConfig, ODM_Plugin @@ -12,13 +11,10 @@ def parse_args(): # 可选参数 parser.add_argument('--mode', default='三维模式', - choices=['快拼模式', '三维模式', '重建模式'], help='处理模式') - parser.add_argument('--accuracy', default='medium', - choices=['high', 'medium', 'low'], help='精度') + choices=['快拼模式', '三维模式'], help='处理模式') parser.add_argument('--grid_size', type=float, default=800, help='网格大小(米)') parser.add_argument('--grid_overlap', type=float, default=0.05, help='网格重叠率') - # parser.add_argument('--produce_dem', action='store_true', help='是否生成DEM') args = parser.parse_args() return args @@ -32,21 +28,12 @@ def main(): image_dir=args.image_dir, output_dir=args.output_dir, mode=args.mode, - accuracy=args.accuracy, grid_size=args.grid_size, - grid_overlap=args.grid_overlap, - produce_dem=True, # 其他参数使用默认值 + grid_overlap=0.05, cluster_eps=0.01, cluster_min_samples=5, - time_group_overlap_threshold=0.7, - time_group_interval=timedelta(minutes=5), - filter_distance_threshold=0.001, - filter_min_neighbors=6, - filter_grid_size=0.001, - filter_dense_distance_threshold=10, - filter_time_threshold=timedelta(minutes=5), ) # 创建处理器并执行 diff --git a/post_pro/conv_obj.py b/post_pro/conv_obj.py index 97a84a6..0112790 100644 --- a/post_pro/conv_obj.py +++ b/post_pro/conv_obj.py @@ -18,13 +18,13 @@ class ConvertOBJ: "EPSG:32649", "EPSG:4326", always_xy=True) self.logger = logging.getLogger('UAV_Preprocess.ConvertOBJ') - def convert_grid_obj(self, grid_points): + def convert_grid_obj(self, grid_lt): """转换每个网格的OBJ文件为OSGB格式""" os.makedirs(os.path.join(self.output_dir, "osgb", "Data"), exist_ok=True) # 以第一个grid的UTM坐标作为参照系 - first_grid_id = list(grid_points.keys())[0] + first_grid_id = grid_lt[0] first_grid_dir = os.path.join( self.output_dir, f"grid_{first_grid_id[0]}_{first_grid_id[1]}", @@ -34,15 +34,15 @@ class ConvertOBJ: first_grid_dir, "odm_orthophoto", "odm_orthophoto_log.txt") self.ref_east, self.ref_north = self.read_utm_offset(log_file) - for grid_id in grid_points.keys(): + for grid_id in grid_lt: try: - self._convert_single_grid(grid_id, grid_points) + self._convert_single_grid(grid_id) except Exception as e: self.logger.error(f"网格 {grid_id} 转换失败: {str(e)}") self._create_merged_metadata() - def _convert_single_grid(self, grid_id, grid_points): + def _convert_single_grid(self, grid_id): """转换单个网格的OBJ文件""" # 构建相关路径 grid_name = f"grid_{grid_id[0]}_{grid_id[1]}" @@ -50,7 +50,6 @@ class ConvertOBJ: texturing_dir = os.path.join(project_dir, "odm_texturing") texturing_dst_dir = os.path.join(project_dir, "odm_texturing_dst") split_obj_dir = os.path.join(texturing_dst_dir, "split_obj") - opensfm_dir = os.path.join(project_dir, "opensfm") log_file = os.path.join( project_dir, "odm_orthophoto", "odm_orthophoto_log.txt") os.makedirs(texturing_dst_dir, exist_ok=True) diff --git a/post_pro/merge_tif.py b/post_pro/merge_tif.py index 6e004ef..6e563ad 100644 --- a/post_pro/merge_tif.py +++ b/post_pro/merge_tif.py @@ -19,11 +19,11 @@ class MergeTif: self.output_dir = output_dir self.logger = logging.getLogger('UAV_Preprocess.MergeTif') - def merge_orthophoto(self, grid_points: Dict[tuple, pd.DataFrame]): + def merge_orthophoto(self, grid_lt): """合并网格的正射影像""" try: all_orthos_and_ortho_cuts = [] - for grid_id, points in grid_points.items(): + for grid_id in grid_lt: grid_ortho_dir = os.path.join( self.output_dir, f"grid_{grid_id[0]}_{grid_id[1]}", diff --git a/utils/directory_manager.py b/utils/directory_manager.py new file mode 100644 index 0000000..bd831e7 --- /dev/null +++ b/utils/directory_manager.py @@ -0,0 +1,78 @@ +import os +import shutil +import psutil + + +class DirectoryManager: + def __init__(self, config): + """ + 初始化目录管理器 + Args: + config: 配置对象,包含输入和输出目录等信息 + """ + self.config = config + + def clean_output_dir(self): + """清理输出目录""" + try: + shutil.rmtree(self.config.output_dir) + print(f"已清理输出目录: {self.config.output_dir}") + except Exception as e: + print(f"清理输出目录时发生错误: {str(e)}") + raise + + def setup_output_dirs(self): + """创建必要的输出目录结构""" + try: + # 创建主输出目录 + os.makedirs(self.config.output_dir) + + # 创建过滤图像保存目录 + os.makedirs(os.path.join(self.config.output_dir, 'filter_imgs')) + + # 创建日志目录 + os.makedirs(os.path.join(self.config.output_dir, 'logs')) + + print(f"已创建输出目录结构: {self.config.output_dir}") + except Exception as e: + print(f"创建输出目录时发生错误: {str(e)}") + raise + + def _get_directory_size(self, path): + """获取目录的总大小(字节)""" + total_size = 0 + for dirpath, dirnames, filenames in os.walk(path): + for filename in filenames: + file_path = os.path.join(dirpath, filename) + try: + total_size += os.path.getsize(file_path) + except (OSError, FileNotFoundError): + continue + return total_size + + def check_disk_space(self): + """检查磁盘空间是否足够""" + # 获取输入目录大小 + input_size = self._get_directory_size(self.config.image_dir) + + # 获取输出目录所在磁盘的剩余空间 + output_drive = os.path.splitdrive( + os.path.abspath(self.config.output_dir))[0] + if not output_drive: # 处理Linux/Unix路径 + output_drive = '/home' + + disk_usage = psutil.disk_usage(output_drive) + free_space = disk_usage.free + + # 计算所需空间(输入大小的10倍) + required_space = input_size * 10 + + if free_space < required_space: + error_msg = ( + f"磁盘空间不足!\n" + f"输入目录大小: {input_size / (1024**3):.2f} GB\n" + f"所需空间: {required_space / (1024**3):.2f} GB\n" + f"可用空间: {free_space / (1024**3):.2f} GB\n" + f"在驱动器 {output_drive}" + ) + raise RuntimeError(error_msg) diff --git a/utils/gps_extractor.py b/utils/gps_extractor.py index e5b6859..3e4090a 100644 --- a/utils/gps_extractor.py +++ b/utils/gps_extractor.py @@ -3,11 +3,10 @@ from PIL import Image import piexif import logging import pandas as pd -from datetime import datetime class GPSExtractor: - """从图像文件提取GPS坐标和拍摄日期""" + """从图像文件提取GPS坐标""" def __init__(self, image_dir): self.image_dir = image_dir @@ -18,75 +17,49 @@ class GPSExtractor: """将DMS格式转换为十进制度""" return dms[0][0] / dms[0][1] + (dms[1][0] / dms[1][1]) / 60 + (dms[2][0] / dms[2][1]) / 3600 - @staticmethod - def _parse_datetime(datetime_str): - """解析EXIF中的日期时间字符串""" - try: - # EXIF日期格式通常为 'YYYY:MM:DD HH:MM:SS' - return datetime.strptime(datetime_str.decode(), '%Y:%m:%d %H:%M:%S') - except Exception: - return None - - def get_gps_and_date(self, image_path): - """提取单张图片的GPS坐标和拍摄日期""" + def get_gps(self, image_path): + """提取单张图片的GPS坐标""" try: image = Image.open(image_path) exif_data = piexif.load(image.info['exif']) - + # 提取GPS信息 gps_info = exif_data.get("GPS", {}) lat = lon = None if gps_info: lat = self._dms_to_decimal(gps_info.get(2, [])) lon = self._dms_to_decimal(gps_info.get(4, [])) - self.logger.debug(f"成功提取图片GPS坐标: {image_path} - 纬度: {lat}, 经度: {lon}") - - # 提取拍摄日期 - date_info = None - if "Exif" in exif_data: - # 优先使用DateTimeOriginal - date_str = exif_data["Exif"].get(36867) # DateTimeOriginal - if not date_str: - # 备选DateTime - date_str = exif_data["Exif"].get(36868) # DateTimeDigitized - if not date_str: - # 最后使用基本DateTime - date_str = exif_data["0th"].get(306) # DateTime - - if date_str: - date_info = self._parse_datetime(date_str) - self.logger.debug(f"成功提取图片拍摄日期: {image_path} - {date_info}") + self.logger.debug( + f"成功提取图片GPS坐标: {image_path} - 纬度: {lat}, 经度: {lon}") if not gps_info: self.logger.warning(f"图片无GPS信息: {image_path}") - if not date_info: - self.logger.warning(f"图片无拍摄日期信息: {image_path}") - - return lat, lon, date_info - + + return lat, lon + except Exception as e: self.logger.error(f"提取图片信息时发生错误: {image_path} - {str(e)}") return None, None, None def extract_all_gps(self): - """提取所有图片的GPS坐标和拍摄日期""" - self.logger.info(f"开始从目录提取GPS坐标和拍摄日期: {self.image_dir}") + """提取所有图片的GPS坐标""" + self.logger.info(f"开始从目录提取GPS坐标: {self.image_dir}") gps_data = [] total_images = 0 successful_extractions = 0 - + for image_file in os.listdir(self.image_dir): total_images += 1 image_path = os.path.join(self.image_dir, image_file) - lat, lon, date = self.get_gps_and_date(image_path) + lat, lon = self.get_gps(image_path) if lat and lon: # 仍然以GPS信息作为主要判断依据 successful_extractions += 1 gps_data.append({ 'file': image_file, 'lat': lat, 'lon': lon, - 'date': date }) - - self.logger.info(f"GPS坐标和拍摄日期提取完成 - 总图片数: {total_images}, 成功提取: {successful_extractions}, 失败: {total_images - successful_extractions}") + + self.logger.info( + f"GPS坐标提取完成 - 总图片数: {total_images}, 成功提取: {successful_extractions}, 失败: {total_images - successful_extractions}") return pd.DataFrame(gps_data) diff --git a/utils/grid_divider.py b/utils/grid_divider.py index 84d2c99..60e42e3 100644 --- a/utils/grid_divider.py +++ b/utils/grid_divider.py @@ -15,42 +15,6 @@ class GridDivider: self.logger.info(f"初始化网格划分器,重叠率: {overlap}") self.num_grids_width = 0 # 添加网格数量属性 self.num_grids_height = 0 - - def adjust_grid_size(self, points_df): - """动态调整网格大小 - - Args: - points_df: 包含GPS点的DataFrame - - Returns: - tuple: (grids, translations, grid_points, final_grid_size) - """ - self.logger.info(f"开始动态调整网格大小,初始大小: {self.grid_size}米") - - while True: - # 使用当前grid_size划分网格 - grids, translations = self.divide_grids(points_df) - grid_points, multiple_grid_points = self.assign_to_grids(points_df, grids) - - # 检查每个网格中的点数 - max_points = 0 - for grid_id, points in grid_points.items(): - max_points = max(max_points, len(points)) - - self.logger.info(f"当前网格大小: {self.grid_size}米, 单个网格最大点数: {max_points}") - - # 如果最大点数超过1600,减小网格大小 - if max_points > 1600: - self.grid_size -= 100 - self.logger.info(f"点数超过1500,减小网格大小至: {self.grid_size}米") - if self.grid_size < 500: # 设置一个最小网格大小限制 - self.logger.warning("网格大小已达到最小值500米,停止调整") - break - else: - self.logger.info(f"找到合适的网格大小: {self.grid_size}米") - break - return grids - def adjust_grid_size_and_overlap(self, points_df): """动态调整网格重叠率""" @@ -58,8 +22,9 @@ class GridDivider: self.logger.info(f"开始动态调整网格重叠率,初始重叠率: {self.overlap}") while True: # 使用调整好的网格大小划分网格 - grids, translations = self.divide_grids(points_df) - grid_points, multiple_grid_points = self.assign_to_grids(points_df, grids) + grids = self.divide_grids(points_df) + grid_points, multiple_grid_points = self.assign_to_grids( + points_df, grids) if len(grids) == 1: self.logger.info(f"网格数量为1,跳过重叠率调整") @@ -68,16 +33,52 @@ class GridDivider: self.overlap += 0.02 self.logger.info(f"重叠率增加到: {self.overlap}") else: - self.logger.info(f"找到合适的重叠率: {self.overlap}, 有{multiple_grid_points}个点被分配到多个网格") - break - return grids, translations, grid_points - + self.logger.info( + f"找到合适的重叠率: {self.overlap}, 有{multiple_grid_points}个点被分配到多个网格") + break + return grids, grid_points + + def adjust_grid_size(self, points_df): + """动态调整网格大小 + + Args: + points_df: 包含GPS点的DataFrame + + Returns: + tuple: grids + """ + self.logger.info(f"开始动态调整网格大小,初始大小: {self.grid_size}米") + + while True: + # 使用当前grid_size划分网格 + grids = self.divide_grids(points_df) + grid_points, multiple_grid_points = self.assign_to_grids( + points_df, grids) + + # 检查每个网格中的点数 + max_points = 0 + for grid_id, points in grid_points.items(): + max_points = max(max_points, len(points)) + + self.logger.info( + f"当前网格大小: {self.grid_size}米, 单个网格最大点数: {max_points}") + + # 如果最大点数超过2000,减小网格大小 + if max_points > 2000: + self.grid_size -= 100 + self.logger.info(f"点数超过2000,减小网格大小至: {self.grid_size}米") + if self.grid_size < 500: # 设置一个最小网格大小限制 + self.logger.warning("网格大小已达到最小值500米,停止调整") + break + else: + self.logger.info(f"找到合适的网格大小: {self.grid_size}米") + break + return grids + def divide_grids(self, points_df): """计算边界框并划分网格 Returns: - tuple: (grids, translations) - - grids: 网格边界列表 - - translations: 网格平移量字典 + tuple: grids 网格边界列表 """ self.logger.info("开始划分网格") @@ -91,12 +92,15 @@ class GridDivider: self.logger.info(f"区域宽度: {width:.2f}米, 高度: {height:.2f}米") # 精细调整网格的长宽,避免出现2*grid_size-1的情况的影响 - grid_size_lt = [self.grid_size -200, self.grid_size -100, self.grid_size , self.grid_size +100, self.grid_size +200] + grid_size_lt = [self.grid_size - 200, self.grid_size - 100, + self.grid_size, self.grid_size + 100, self.grid_size + 200] width_modulus_lt = [width % grid_size for grid_size in grid_size_lt] - grid_width = grid_size_lt[width_modulus_lt.index(min(width_modulus_lt))] + grid_width = grid_size_lt[width_modulus_lt.index( + min(width_modulus_lt))] height_modulus_lt = [height % grid_size for grid_size in grid_size_lt] - grid_height = grid_size_lt[height_modulus_lt.index(min(height_modulus_lt))] + grid_height = grid_size_lt[height_modulus_lt.index( + min(height_modulus_lt))] self.logger.info(f"网格宽度: {grid_width:.2f}米, 网格高度: {grid_height:.2f}米") # 计算需要划分的网格数量 @@ -108,45 +112,30 @@ class GridDivider: lon_step = (max_lon - min_lon) / self.num_grids_width grids = [] - grid_translations = {} # 存储每个网格相对于第一个网格的平移量 # 先创建所有网格 for i in range(self.num_grids_height): for j in range(self.num_grids_width): grid_min_lat = min_lat + i * lat_step - self.overlap * lat_step - grid_max_lat = min_lat + (i + 1) * lat_step + self.overlap * lat_step + grid_max_lat = min_lat + \ + (i + 1) * lat_step + self.overlap * lat_step grid_min_lon = min_lon + j * lon_step - self.overlap * lon_step - grid_max_lon = min_lon + (j + 1) * lon_step + self.overlap * lon_step - - grid_id = (i, j) # 使用(i,j)作为网格标识,i代表行,j代表列 - grid_bounds = (grid_min_lat, grid_max_lat, grid_min_lon, grid_max_lon) + grid_max_lon = min_lon + \ + (j + 1) * lon_step + self.overlap * lon_step + + grid_bounds = (grid_min_lat, grid_max_lat, + grid_min_lon, grid_max_lon) grids.append(grid_bounds) - + self.logger.debug( f"网格[{i},{j}]: 纬度[{grid_min_lat:.6f}, {grid_max_lat:.6f}], " f"经度[{grid_min_lon:.6f}, {grid_max_lon:.6f}]" ) - - # 计算每个网格相对于第一个网格的平移量 - reference_grid = grids[0] - for i in range(self.num_grids_height): - for j in range(self.num_grids_width): - grid_id = (i, j) - grid_idx = i * self.num_grids_width + j - if grid_idx == 0: # 参考网格 - grid_translations[grid_id] = (0, 0) - else: - translation = self.calculate_grid_translation(reference_grid, grids[grid_idx]) - grid_translations[grid_id] = translation - self.logger.debug( - f"网格[{i},{j}]相对于参考网格的平移量: x={translation[0]:.2f}m, y={translation[1]:.2f}m" - ) - + self.logger.info( f"成功划分为 {len(grids)} 个网格 ({self.num_grids_width}x{self.num_grids_height})") - return grids, grid_translations - + return grids def assign_to_grids(self, points_df, grids): """将点分配到对应网格""" @@ -166,7 +155,7 @@ class GridDivider: for j in range(self.num_grids_width): grid_idx = i * self.num_grids_width + j min_lat, max_lat, min_lon, max_lon = grids[grid_idx] - + if min_lat <= point['lat'] <= max_lat and min_lon <= point['lon'] <= max_lon: grid_points[(i, j)].append(point.to_dict()) if point_assigned: @@ -202,20 +191,21 @@ class GridDivider: for j in range(self.num_grids_width): grid_idx = i * self.num_grids_width + j min_lat, max_lat, min_lon, max_lon = grids[grid_idx] - + # 计算网格的实际长度和宽度(米) width = geodesic((min_lat, min_lon), (min_lat, max_lon)).meters - height = geodesic((min_lat, min_lon), (max_lat, min_lon)).meters - + height = geodesic((min_lat, min_lon), + (max_lat, min_lon)).meters + plt.plot([min_lon, max_lon, max_lon, min_lon, min_lon], [min_lat, min_lat, max_lat, max_lat, min_lat], 'r-', alpha=0.5) # 在网格中心添加网格编号和尺寸信息 center_lon = (min_lon + max_lon) / 2 center_lat = (min_lat + max_lat) / 2 - plt.text(center_lon, center_lat, + plt.text(center_lon, center_lat, f"({i},{j})\n{width:.0f}m×{height:.0f}m", # 显示(i,j)和尺寸 - horizontalalignment='center', + horizontalalignment='center', verticalalignment='center', fontsize=8) @@ -254,23 +244,23 @@ class GridDivider: """ ref_center = self.get_grid_center(reference_grid) target_center = self.get_grid_center(target_grid) - + # 计算经度方向的距离(x轴) x_distance = geodesic( - (ref_center[0], ref_center[1]), + (ref_center[0], ref_center[1]), (ref_center[0], target_center[1]) ).meters # 如果目标在参考点西边,距离为负 if target_center[1] < ref_center[1]: x_distance = -x_distance - + # 计算纬度方向的距离(y轴) y_distance = geodesic( - (ref_center[0], ref_center[1]), + (ref_center[0], ref_center[1]), (target_center[0], ref_center[1]) ).meters # 如果目标在参考点南边,距离为负 if target_center[0] < ref_center[0]: y_distance = -y_distance - + return (x_distance, y_distance) diff --git a/utils/odm_monitor.py b/utils/odm_monitor.py index 17d0e2e..0bf1916 100644 --- a/utils/odm_monitor.py +++ b/utils/odm_monitor.py @@ -1,134 +1,22 @@ import os import time import logging -import subprocess from typing import Dict, Tuple import pandas as pd import numpy as np from osgeo import gdal - - -class NotOverlapError(Exception): - """图像重叠度不足异常""" - pass +import docker class ODMProcessMonitor: """ODM处理监控器""" - def __init__(self, output_dir: str, mode: str = "快拼模式"): + def __init__(self, output_dir: str, mode: str = "三维模式"): self.output_dir = output_dir self.logger = logging.getLogger('UAV_Preprocess.ODMMonitor') self.mode = mode - def _check_success(self, grid_dir: str) -> bool: - """检查ODM是否执行成功 - - 检查项目: - 1. 必要的文件夹和文件是否存在 - 2. 产品文件是否有效 - """ - project_dir = os.path.join(grid_dir, 'project') - - # 根据不同模式检查不同的产品 - if self.mode == "快拼模式": - # 只检查正射影像 - # if not self._check_orthophoto(project_dir): - # return False - pass - - elif self.mode == "三维模式": - # 检查点云和实景三维 - if not all([ - os.path.exists(os.path.join( - project_dir, 'odm_georeferencing', 'odm_georeferenced_model.laz')), - os.path.exists(os.path.join( - project_dir, 'odm_texturing', 'odm_textured_model_geo.obj')) - ]): - self.logger.error("点云或实景三维文件夹未生成") - return False - - # TODO: 添加点云和实景三维的质量检查 - - elif self.mode == "重建模式": - # 检查所有产品 - if not all([ - os.path.exists(os.path.join( - project_dir, 'odm_georeferencing', 'odm_georeferenced_model.laz')), - os.path.exists(os.path.join( - project_dir, 'odm_texturing', 'odm_textured_model_geo.obj')) - ]): - self.logger.error("部分必要的文件夹未生成") - return False - - # 检查正射影像 - # if not self._check_orthophoto(project_dir): - # return False - - # TODO: 添加点云和实景三维的质量检查 - - return True - - # TODO 正射影像怎么检查最好 - def _check_orthophoto(self, project_dir: str) -> bool: - """检查正射影像的质量""" - ortho_path = os.path.join( - project_dir, 'odm_orthophoto', 'odm_orthophoto.original.tif') - - if not os.path.exists(ortho_path): - self.logger.error("正射影像文件未生成") - return False - - # 检查文件大小 - file_size_mb = os.path.getsize(ortho_path) / (1024 * 1024) # 转换为MB - if file_size_mb < 1: - self.logger.error(f"正射影像文件过小: {file_size_mb:.2f}MB") - return False - - try: - # 打开影像文件 - ds = gdal.Open(ortho_path) - if ds is None: - self.logger.error("无法打开正射影像文件") - return False - - # 读取第一个波段 - band = ds.GetRasterBand(1) - - # 获取统计信息 - stats = band.GetStatistics(False, True) - if stats is None: - self.logger.error("无法获取影像统计信息") - return False - - min_val, max_val, mean, std = stats - - # 计算空值比例 - no_data_value = band.GetNoDataValue() - array = band.ReadAsArray() - if no_data_value is not None: - no_data_ratio = np.sum(array == no_data_value) / array.size - else: - no_data_ratio = 0 - - # 检查空值比例是否过高(超过50%) - if no_data_ratio > 0.5: - self.logger.error(f"正射影像空值比例过高: {no_data_ratio:.2%}") - return False - - # 检查影像是否全黑或全白 - if max_val - min_val < 1: - self.logger.error("正射影像可能无效:像素值范围过小") - return False - - ds = None # 关闭数据集 - return True - - except Exception as e: - self.logger.error(f"检查正射影像时发生错误: {str(e)}") - return False - - def run_odm_with_monitor(self, grid_dir: str, grid_id: tuple, produce_dem: bool = False, accuracy='medium') -> Tuple[bool, str]: + def run_odm_with_monitor(self, grid_dir: str, grid_id: tuple) -> Tuple[bool, str]: """运行ODM命令""" self.logger.info(f"开始处理网格 ({grid_id[0]},{grid_id[1]})") success = False @@ -136,140 +24,93 @@ class ODMProcessMonitor: max_retries = 3 current_try = 0 - # 根据模式设置是否使用lowest quality - use_lowest_quality = self.mode == "快拼模式" + # 初始化 Docker 客户端 + client = docker.from_env() while current_try < max_retries: current_try += 1 self.logger.info( f"第 {current_try} 次尝试处理网格 ({grid_id[0]},{grid_id[1]})") - try: - # 构建Docker命令 - grid_dir = grid_dir[0].lower()+grid_dir[1:].replace('\\', '/') - docker_command = ( - f"docker run --gpus all -ti --rm " - f"-v {grid_dir}:/datasets " - f"opendronemap/odm:gpu " - f"--project-path /datasets project " - f"--max-concurrency 15 " - f"--force-gps " - f"--use-exif " - f"--use-hybrid-bundle-adjustment " - f"--optimize-disk-space " - f"--orthophoto-cutline " - f"--feature-type sift " - # f"--orthophoto-resolution 8 " + # 构建 Docker 容器运行参数 + grid_dir = grid_dir[0].lower( + ) + grid_dir[1:].replace('\\', '/') + volumes = { + grid_dir: {'bind': '/datasets', 'mode': 'rw'} + } + command = ( + f"--project-path /datasets project " + f"--max-concurrency 15 " + f"--force-gps " + f"--use-exif " + f"--use-hybrid-bundle-adjustment " + f"--optimize-disk-space " + f"--orthophoto-cutline " + f"--feature-type sift " + f"--orthophoto-resolution 8 " + ) + + if self.mode == "快拼模式": + command += ( + f"--fast-orthophoto " + f"--skip-3dmodel " + ) + else: # 三维模式参数 + command += ( + f"--dsm " + f"--dtm " ) - if accuracy == "high": - docker_command += ( - f"--feature-quality ultra " - f"--pc-quality ultra " - f"--mesh-size 3000000 " - f"--mesh-octree-depth 12 " - f"--orthophoto-resolution 2 " - ) - if produce_dem and self.mode != "快拼模式": - docker_command += ( - f"--dsm " - f"--dtm " - ) + command += "--rerun-all" - # 根据是否使用lowest quality添加参数 - if use_lowest_quality: - # docker_command += f"--feature-quality lowest " - pass + self.logger.info(f"Docker 命令: {command}") - if self.mode == "快拼模式": - docker_command += ( - f"--fast-orthophoto " - f"--skip-3dmodel " - ) - # elif self.mode == "三维模式": - # docker_command += ( - # f"--skip-orthophoto " - # ) + # 运行 Docker 容器 + container = client.containers.run( + image="opendronemap/odm:gpu", + command=command, + volumes=volumes, + detach=True, + remove=False, + runtime="nvidia", # 使用 GPU + ) + # 等待容器运行完成 + exit_status = container.wait() + if exit_status["StatusCode"] != 0: + self.logger.error( + f"容器运行失败,退出状态码: {exit_status['StatusCode']}") + # 获取容器的错误日志 + error_msg = container.logs( + stderr=True).decode("utf-8").splitlines() + self.logger.error("容器运行失败的详细错误日志:") + for line in error_msg: + self.logger.error(line) + else: + # 获取所有日志 + logs = container.logs().decode("utf-8").splitlines() - docker_command += "--rerun-all" - self.logger.info(docker_command) + # 输出最后 50 行日志 + self.logger.info("容器运行完成,以下是最后 50 行日志:") + for line in logs[-50:]: + self.logger.info(line) + success = True + error_msg = "" + break - process = subprocess.Popen( - docker_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - logging.info(f"进程{process.pid}开始执行") - stdout, stderr = process.communicate() - stdout = stdout.decode('utf-8') - stderr = stderr.decode('utf-8') - # 关闭process,防止内存堆积 - process.terminate() - return_code = process.poll() # 获取进程返回码,如果返回 None 说明进程仍在运行 - if return_code is None: - logging.info(f"进程{process.pid}仍在运行") - else: - logging.info(f"进程{process.pid}已被关闭,返回码:{return_code}.") - - stdout_lines = stdout.strip().split('\n') - last_lines = '\n'.join( - stdout_lines[-50:] if len(stdout_lines) > 10 else stdout_lines) - self.logger.info(f"==========stdout==========: {last_lines}") - - if stderr: - self.logger.error(f"docker run指令执行失败") - self.logger.error(f"==========stderr==========: {stderr}") - if "error during connect" in stderr or "The system cannot find the file specified" in stderr: - error_msg = "Docker没有启动,请启动Docker" - elif "user declined directory sharing" in stderr: - error_msg = "Docker无法访问目录,请检查目录权限和共享设置" - else: - error_msg = "Docker运行失败,需要人工排查错误" - break - else: - self.logger.info("docker run指令执行成功") - if "ODM app finished" in last_lines: - self.logger.info("ODM处理完成") - if self._check_success(grid_dir): - self.logger.info( - f"网格 ({grid_id[0]},{grid_id[1]}) 处理成功") - success = True - error_msg = "" - break - else: - self.logger.error( - f"虽然ODM处理完成,但是生产产品质量可能不合格,需要人工检查") - raise NotOverlapError - # TODO 先写成这样,后面这三种情况可能处理不一样 - elif "enough overlap" in last_lines: - raise NotOverlapError - elif "out of memory" in last_lines: - raise NotOverlapError - elif "strange values" in last_lines: - raise NotOverlapError - else: - raise NotOverlapError - - except NotOverlapError: - if use_lowest_quality and self.mode == "快拼模式": - self.logger.warning( - "检测到not overlap错误,移除lowest quality参数后重试") - use_lowest_quality = False - time.sleep(10) - continue - else: - self.logger.error( - "即使移除lowest quality参数后仍然出现错误") - error_msg = "图像重叠度不足,需要人工检查数据集的采样间隔情况" - break + # 删除容器 + container.remove() + time.sleep(5) return success, error_msg - def process_all_grids(self, grid_points: Dict[tuple, pd.DataFrame], produce_dem: bool, accuracy: str) -> Dict[tuple, pd.DataFrame]: + def process_all_grids(self, grid_points: Dict[tuple, pd.DataFrame]) -> list: """处理所有网格 Returns: Dict[tuple, pd.DataFrame]: 成功处理的网格点数据字典 """ self.logger.info("开始执行网格处理") - successful_grid_points = {} + successful_grid_lt = [] failed_grids = [] for grid_id, points in grid_points.items(): @@ -281,12 +122,10 @@ class ODMProcessMonitor: success, error_msg = self.run_odm_with_monitor( grid_dir=grid_dir, grid_id=grid_id, - produce_dem=produce_dem, - accuracy=accuracy ) if success: - successful_grid_points[grid_id] = points + successful_grid_lt.append(grid_id) else: self.logger.error( f"网格 ({grid_id[0]},{grid_id[1]}) 处理失败: {error_msg}") @@ -301,7 +140,7 @@ class ODMProcessMonitor: # 汇总处理结果 total_grids = len(grid_points) failed_count = len(failed_grids) - success_count = len(successful_grid_points) + success_count = len(successful_grid_lt) self.logger.info( f"网格处理完成。总计: {total_grids}, 成功: {success_count}, 失败: {failed_count}") @@ -311,7 +150,7 @@ class ODMProcessMonitor: self.logger.error( f"网格 ({grid_id[0]},{grid_id[1]}): {error_msg}") - if len(successful_grid_points) == 0: + if len(successful_grid_lt) == 0: raise Exception("所有网格处理都失败,无法继续处理") - return successful_grid_points + return successful_grid_lt diff --git a/utils/visualizer.py b/utils/visualizer.py index 302fbfd..83918ac 100644 --- a/utils/visualizer.py +++ b/utils/visualizer.py @@ -65,7 +65,7 @@ class FilterVisualizer: # 创建图形 plt.rcParams['font.sans-serif']=['SimHei']#黑体 plt.rcParams['axes.unicode_minus'] = False - plt.figure(figsize=(20, 16)) + plt.figure() # 绘制保留的点 plt.scatter(current_x, current_y,