import os import shutil from datetime import timedelta from dataclasses import dataclass from typing import Dict, Tuple import psutil import pandas as pd from pathlib import Path from filter.cluster_filter import GPSCluster from utils.gps_extractor import GPSExtractor from utils.grid_divider import GridDivider from utils.logger import setup_logger from utils.visualizer import FilterVisualizer from utils.docker_runner import DockerRunner from post_pro.conv_obj import ConvertOBJ @dataclass class ProcessConfig: """配置类""" image_dir: str output_dir: str # 聚类过滤参数 cluster_eps: float = 0.01 cluster_min_samples: int = 5 # 时间组重叠过滤参数 time_group_overlap_threshold: float = 0.7 time_group_interval: timedelta = timedelta(minutes=5) # 孤立点过滤参数 filter_distance_threshold: float = 0.001 # 经纬度距离 filter_min_neighbors: int = 6 # 密集点过滤参数 filter_grid_size: float = 0.001 filter_dense_distance_threshold: float = 10 # 普通距离,单位:米 filter_time_threshold: timedelta = timedelta(minutes=5) # 网格划分参数 grid_overlap: float = 0.05 grid_size: float = 500 # 几个pipline过程是否开启 mode: str = "快拼模式" accuracy: str = "medium" produce_dem: bool = False class ODM_Plugin: def __init__(self, config: ProcessConfig): self.config = config self.logger = setup_logger(config.output_dir) # 检查磁盘空间 # TODO 现在输入目录的磁盘空间也需要检查 self._check_disk_space() # 清理并重建输出目录 if os.path.exists(config.output_dir): self._clean_output_dir() self._setup_output_dirs() # 修改输入目录,符合ODM要求,从这里开始,image_dir就是project_path self._rename_input_dir() self.project_path = self.config.image_dir # 初始化其他组件 self.gps_points = None self.grid_points = None self.visualizer = FilterVisualizer(config.output_dir) def _clean_output_dir(self): """清理输出目录""" try: shutil.rmtree(self.config.output_dir) self.logger.info(f"已清理输出目录: {self.config.output_dir}") except Exception as e: self.logger.info(f"清理输出目录时发生错误: {str(e)}") raise def _setup_output_dirs(self): """创建必要的输出目录结构""" try: # 创建主输出目录 os.makedirs(self.config.output_dir) # 创建过滤图像保存目录 os.makedirs(os.path.join(self.config.output_dir, 'filter_imgs')) # 创建日志目录 os.makedirs(os.path.join(self.config.output_dir, 'logs')) self.logger.info(f"已创建输出目录结构: {self.config.output_dir}") except Exception as e: self.logger.info(f"创建输出目录时发生错误: {str(e)}") raise def _get_directory_size(self, path): """获取目录的总大小(字节)""" total_size = 0 for dirpath, dirnames, filenames in os.walk(path): for filename in filenames: file_path = os.path.join(dirpath, filename) try: total_size += os.path.getsize(file_path) except (OSError, FileNotFoundError): continue return total_size def _check_disk_space(self): """检查磁盘空间是否足够""" # 获取输入目录大小 input_size = self._get_directory_size(self.config.image_dir) # 获取输出目录所在磁盘的剩余空间 output_drive = os.path.splitdrive( os.path.abspath(self.config.output_dir))[0] if not output_drive: # 处理Linux/Unix路径 output_drive = '/home' disk_usage = psutil.disk_usage(output_drive) free_space = disk_usage.free # 计算所需空间(输入大小的1.5倍) required_space = input_size * 12 if free_space < required_space: error_msg = ( f"磁盘空间不足!\n" f"输入目录大小: {input_size / (1024**3):.2f} GB\n" f"所需空间: {required_space / (1024**3):.2f} GB\n" f"可用空间: {free_space / (1024**3):.2f} GB\n" f"在驱动器 {output_drive}" ) raise RuntimeError(error_msg) def _rename_input_dir(self): image_dir = Path(self.config.image_dir).resolve() if not image_dir.exists() or not image_dir.is_dir(): raise ValueError( f"Provided path '{image_dir}' is not a valid directory.") # 原目录名和父路径 parent_dir = image_dir.parent original_name = image_dir.name # 新的 images 路径(原目录重命名为 images) images_path = parent_dir / "images" # 重命名原目录为 images image_dir.rename(images_path) # 创建一个新的、和原目录同名的文件夹 new_root = parent_dir / original_name new_root.mkdir(exist_ok=False) # 创建 project 子文件夹 project_dir = new_root / "project" project_dir.mkdir() # 把 images 文件夹移动到 project 下 final_images_path = project_dir / "images" shutil.move(str(images_path), str(final_images_path)) self.logger.info(f"符合标准输入的文件夹结构已经创建好了,{final_images_path}") return final_images_path def extract_gps(self) -> pd.DataFrame: """提取GPS数据""" self.logger.info("开始提取GPS数据") extractor = GPSExtractor(self.project_path) self.gps_points = extractor.extract_all_gps() self.logger.info(f"成功提取 {len(self.gps_points)} 个GPS点") def cluster(self): """使用DBSCAN对GPS点进行聚类,只保留最大的类""" previous_points = self.gps_points.copy() clusterer = GPSCluster( self.gps_points, eps=self.config.cluster_eps, min_samples=self.config.cluster_min_samples ) self.clustered_points = clusterer.fit() self.gps_points = clusterer.get_cluster_stats(self.clustered_points) self.visualizer.visualize_filter_step( self.gps_points, previous_points, "1-Clustering") def divide_grids(self): """划分网格 Returns: tuple: (grid_points, translations) - grid_points: 网格点数据字典 - translations: 网格平移量字典 """ grid_divider = GridDivider( overlap=self.config.grid_overlap, grid_size=self.config.grid_size, project_path=self.project_path, output_dir=self.config.output_dir ) grids, self.grid_points = grid_divider.adjust_grid_size_and_overlap( self.gps_points ) grid_divider.visualize_grids(self.gps_points, grids) grid_divider.save_image_groups(self.grid_points) if len(grids) >= 20: self.logger.warning("网格数量已超过20, 需要人工调整分区") def odm_docker_runner(self): """"运行OMD docker容器""" self.logger.info("开始运行Docker容器") # TODO:加一些容错处理 docker_runner = DockerRunner(self.project_path) docker_runner.run_odm_container() def convert_obj(self): """转换OBJ模型""" self.logger.info("开始转换OBJ模型") converter = ConvertOBJ(self.config.output_dir) converter.convert_grid_obj(self.grid_points) def post_process(self): """后处理:合并或复制处理结果""" self.logger.info("开始后处理") self.logger.info("拷贝正射影像至输出目录") orthophoto_tif_path = os.path.join( self.project_path, "project", "odm_orthophoto", "odm_orthophoto.tif") shutil.copy(orthophoto_tif_path, self.config.output_dir) # if self.config.mode == "三维模式": # self.convert_obj() # else: # pass def process(self): """执行完整的预处理流程""" try: self.extract_gps() self.cluster() self.divide_grids() self.logger.info("==========预处理任务完成==========") self.odm_docker_runner() self.post_process() except Exception as e: self.logger.error(f"处理过程中发生错误: {str(e)}", exc_info=True) raise