更新merge代码

This commit is contained in:
龙澳 2024-12-31 14:23:45 +08:00
parent 96022f4e22
commit ef1a84373a
7 changed files with 558 additions and 87 deletions

View File

@ -19,3 +19,4 @@ conda install -c conda-forge open3d
- command_runner中rerun需要更新
- grid要动态分割大小
- 任务队列
- 目前obj转osgb的软件windows没有装上linux成功了,后续需要做一个docker镜像

View File

@ -209,6 +209,26 @@ class ImagePreprocessor:
)
grid_points = grid_divider.assign_to_grids(self.gps_points, grids)
self.logger.info(f"成功划分为 {len(grid_points)} 个网格")
# 生成image_groups.txt文件
try:
groups_file = os.path.join(self.config.output_dir, "image_groups.txt")
self.logger.info(f"开始生成分组文件: {groups_file}")
with open(groups_file, 'w') as f:
for grid_idx, points_lt in grid_points.items():
# 使用ASCII字母作为组标识A, B, C...
group_letter = chr(65 + grid_idx) # 65是ASCII中'A'的编码
# 为每个网格中的图像写入分组信息
for point in points_lt:
f.write(f"{point['file']} {group_letter}\n")
self.logger.info(f"分组文件生成成功: {groups_file}")
except Exception as e:
self.logger.error(f"生成分组文件时发生错误: {str(e)}", exc_info=True)
raise
return grid_points
def copy_images(self, grid_points: Dict[int, pd.DataFrame]):
@ -270,8 +290,8 @@ class ImagePreprocessor:
if __name__ == "__main__":
# 创建配置
config = PreprocessConfig(
image_dir=r"E:\datasets\UAV\1815\images",
output_dir=r"G:\ODM_output\1815",
image_dir=r"E:\datasets\UAV\134\project\images",
output_dir=r"G:\ODM_output\134_test",
cluster_eps=0.01,
cluster_min_samples=5,
@ -287,8 +307,8 @@ if __name__ == "__main__":
filter_dense_distance_threshold=10,
filter_time_threshold=timedelta(minutes=5),
grid_size=500,
grid_overlap=0.05,
grid_size=300,
grid_overlap=0.1,
mode="重建模式",

319
odm_preprocess_fast.py Normal file
View File

@ -0,0 +1,319 @@
import os
import shutil
from datetime import timedelta
from dataclasses import dataclass
from typing import Dict
import matplotlib.pyplot as plt
import pandas as pd
from tqdm import tqdm
from filter.cluster_filter import GPSCluster
from filter.time_group_overlap_filter import TimeGroupOverlapFilter
from filter.gps_filter import GPSFilter
from utils.odm_monitor import ODMProcessMonitor
from utils.gps_extractor import GPSExtractor
from utils.grid_divider import GridDivider
from utils.logger import setup_logger
from utils.visualizer import FilterVisualizer
from post_pro.merge_tif import MergeTif
from tools.test_docker_run import run_docker_command
from post_pro.merge_obj import MergeObj
from post_pro.merge_ply import MergePly
@dataclass
class PreprocessConfig:
"""预处理配置类"""
image_dir: str
output_dir: str
# 聚类过滤参数
cluster_eps: float = 0.01
cluster_min_samples: int = 5
# 时间组重叠过滤参数
time_group_overlap_threshold: float = 0.7
time_group_interval: timedelta = timedelta(minutes=5)
# 孤立点过滤参数
filter_distance_threshold: float = 0.001 # 经纬度距离
filter_min_neighbors: int = 6
# 密集点过滤参数
filter_grid_size: float = 0.001
filter_dense_distance_threshold: float = 10 # 普通距离,单位:米
filter_time_threshold: timedelta = timedelta(minutes=5)
# 网格划分参数
grid_overlap: float = 0.05
grid_size: float = 500
# 几个pipline过程是否开启
mode: str = "快拼模式"
class ImagePreprocessor:
def __init__(self, config: PreprocessConfig):
self.config = config
# # 清理并重建输出目录
# if os.path.exists(config.output_dir):
# self._clean_output_dir()
# self._setup_output_dirs()
# 初始化其他组件
self.logger = setup_logger(config.output_dir)
self.gps_points = None
self.odm_monitor = ODMProcessMonitor(
config.output_dir, mode=config.mode)
self.visualizer = FilterVisualizer(config.output_dir)
def _clean_output_dir(self):
"""清理输出目录"""
try:
shutil.rmtree(self.config.output_dir)
print(f"已清理输出目录: {self.config.output_dir}")
except Exception as e:
print(f"清理输出目录时发生错误: {str(e)}")
raise
def _setup_output_dirs(self):
"""创建必要的输出目录结构"""
try:
# 创建主输出目录
os.makedirs(self.config.output_dir)
# 创建过滤图像保存目录
os.makedirs(os.path.join(self.config.output_dir, 'filter_imgs'))
# 创建日志目录
os.makedirs(os.path.join(self.config.output_dir, 'logs'))
print(f"已创建输出目录结构: {self.config.output_dir}")
except Exception as e:
print(f"创建输出目录时发生错误: {str(e)}")
raise
def extract_gps(self) -> pd.DataFrame:
"""提取GPS数据"""
self.logger.info("开始提取GPS数据")
extractor = GPSExtractor(self.config.image_dir)
self.gps_points = extractor.extract_all_gps()
self.logger.info(f"成功提取 {len(self.gps_points)} 个GPS点")
return self.gps_points
def cluster(self) -> pd.DataFrame:
"""使用DBSCAN对GPS点进行聚类只保留最大的类"""
self.logger.info("开始聚类")
previous_points = self.gps_points.copy()
# 创建聚类器并执行聚类
clusterer = GPSCluster(
self.gps_points, output_dir=self.config.output_dir,
eps=self.config.cluster_eps, min_samples=self.config.cluster_min_samples)
# 获取主要类别的点
self.clustered_points = clusterer.fit()
self.gps_points = clusterer.get_main_cluster(self.clustered_points)
# 获取统计信息并记录
stats = clusterer.get_cluster_stats(self.clustered_points)
self.logger.info(
f"聚类完成:主要类别包含 {stats['main_cluster_points']} 个点,"
f"噪声点 {stats['noise_points']}"
)
# 可视化聚类结果
self.visualizer.visualize_filter_step(
self.gps_points, previous_points, "1-Clustering")
return self.gps_points
def filter_time_group_overlap(self) -> pd.DataFrame:
"""过滤重叠的时间组"""
self.logger.info("开始过滤重叠时间组")
self.logger.info("开始过滤重叠时间组")
previous_points = self.gps_points.copy()
filter = TimeGroupOverlapFilter(
self.config.image_dir,
self.config.output_dir,
overlap_threshold=self.config.time_group_overlap_threshold
)
deleted_files = filter.filter_overlapping_groups(
time_threshold=self.config.time_group_interval
)
# 更新GPS点数据移除被删除的图像
self.gps_points = self.gps_points[~self.gps_points['file'].isin(
deleted_files)]
self.logger.info(f"重叠时间组过滤后剩余 {len(self.gps_points)} 个GPS点")
# 可视化过滤结果
self.visualizer.visualize_filter_step(
self.gps_points, previous_points, "2-Time Group Overlap")
return self.gps_points
# TODO 过滤算法还需要更新
def filter_points(self) -> pd.DataFrame:
"""过滤GPS点"""
self.logger.info("开始过滤GPS点")
filter = GPSFilter(self.config.output_dir)
# 过滤孤立点
previous_points = self.gps_points.copy()
self.logger.info(
f"开始过滤孤立点(距离阈值: {self.config.filter_distance_threshold}, "
f"最小邻居数: {self.config.filter_min_neighbors})"
)
self.gps_points = filter.filter_isolated_points(
self.gps_points,
self.config.filter_distance_threshold,
self.config.filter_min_neighbors,
)
self.logger.info(f"孤立点过滤后剩余 {len(self.gps_points)} 个GPS点")
# 可视化孤立点过滤结果
self.visualizer.visualize_filter_step(
self.gps_points, previous_points, "3-Isolated Points")
# # 过滤密集点
# previous_points = self.gps_points.copy()
# self.logger.info(
# f"开始过滤密集点(网格大小: {self.config.filter_grid_size}, "
# f"距离阈值: {self.config.filter_dense_distance_threshold})"
# )
# self.gps_points = filter.filter_dense_points(
# self.gps_points,
# grid_size=self.config.filter_grid_size,
# distance_threshold=self.config.filter_dense_distance_threshold,
# time_threshold=self.config.filter_time_threshold,
# )
# self.logger.info(f"密集点过滤后剩余 {len(self.gps_points)} 个GPS点")
# # 可视化密集点过滤结果
# self.visualizer.visualize_filter_step(
# self.gps_points, previous_points, "4-Dense Points")
return self.gps_points
def divide_grids(self) -> Dict[int, pd.DataFrame]:
"""划分网格"""
self.logger.info(f"开始划分网格 (重叠率: {self.config.grid_overlap})")
grid_divider = GridDivider(
overlap=self.config.grid_overlap,
output_dir=self.config.output_dir
)
grids = grid_divider.divide_grids(
self.gps_points, grid_size=self.config.grid_size
)
grid_points = grid_divider.assign_to_grids(self.gps_points, grids)
self.logger.info(f"成功划分为 {len(grid_points)} 个网格")
# 生成image_groups.txt文件
try:
groups_file = os.path.join(self.config.output_dir, "image_groups.txt")
self.logger.info(f"开始生成分组文件: {groups_file}")
with open(groups_file, 'w') as f:
for grid_idx, points_lt in grid_points.items():
# 使用ASCII字母作为组标识A, B, C...
group_letter = chr(65 + grid_idx) # 65是ASCII中'A'的编码
# 为每个网格中的图像写入分组信息
for point in points_lt:
f.write(f"{point['file']} {group_letter}\n")
self.logger.info(f"分组文件生成成功: {groups_file}")
except Exception as e:
self.logger.error(f"生成分组文件时发生错误: {str(e)}", exc_info=True)
raise
return grid_points
def copy_images(self, grid_points: Dict[int, pd.DataFrame]):
"""复制图像到目标文件夹"""
self.logger.info("开始复制图像文件")
self.logger.info("开始复制图像文件")
for grid_idx, points in grid_points.items():
output_dir = os.path.join(
self.config.output_dir, f"grid_{grid_idx + 1}", "project", "images"
)
os.makedirs(output_dir, exist_ok=True)
for point in tqdm(points, desc=f"复制网格 {grid_idx + 1} 的图像"):
src = os.path.join(self.config.image_dir, point["file"])
dst = os.path.join(output_dir, point["file"])
shutil.copy(src, dst)
self.logger.info(f"网格 {grid_idx + 1} 包含 {len(points)} 张图像")
def merge_tif(self, grid_points: Dict[int, pd.DataFrame]):
"""合并所有网格的影像产品"""
self.logger.info("开始合并所有影像产品")
merger = MergeTif(self.config.output_dir)
merger.merge_all_tifs(grid_points)
def merge_obj(self, grid_points: Dict[int, pd.DataFrame]):
"""合并所有网格的OBJ模型"""
self.logger.info("开始合并OBJ模型")
merger = MergeObj(self.config.output_dir)
merger.merge_grid_obj(grid_points)
def merge_ply(self, grid_points: Dict[int, pd.DataFrame]):
"""合并所有网格的PLY点云"""
self.logger.info("开始合并PLY点云")
merger = MergePly(self.config.output_dir)
merger.merge_grid_ply(grid_points)
def process(self):
"""执行完整的预处理流程"""
try:
self.extract_gps()
self.cluster()
# self.filter_time_group_overlap()
self.filter_points()
grid_points = self.divide_grids()
# self.copy_images(grid_points)
self.logger.info("预处理任务完成")
# self.odm_monitor.process_all_grids(grid_points)
# self.merge_tif(grid_points)
self.merge_ply(grid_points)
self.merge_obj(grid_points)
except Exception as e:
self.logger.error(f"处理过程中发生错误: {str(e)}", exc_info=True)
raise
if __name__ == "__main__":
# 创建配置
config = PreprocessConfig(
image_dir=r"E:\datasets\UAV\1009\project\images",
output_dir=r"G:\ODM_output\1009",
cluster_eps=0.01,
cluster_min_samples=5,
# 添加时间组重叠过滤参数
time_group_overlap_threshold=0.7,
time_group_interval=timedelta(minutes=5),
filter_distance_threshold=0.001,
filter_min_neighbors=6,
filter_grid_size=0.001,
filter_dense_distance_threshold=10,
filter_time_threshold=timedelta(minutes=5),
grid_size=300,
grid_overlap=0.1,
mode="重建模式",
)
# 创建处理器并执行
processor = ImagePreprocessor(config)
processor.process()

View File

@ -71,12 +71,13 @@ class MergeObj:
self.output_dir,
f"grid_{grid_idx + 1}",
"project",
"odm_texturing",
"odm_texturing_25d",
"odm_textured_model_geo.obj"
)
if not os.path.exists(grid_obj):
self.logger.warning(f"网格 {grid_idx + 1} 的OBJ文件不存在: {grid_obj}")
self.logger.warning(
f"网格 {grid_idx + 1} 的OBJ文件不存在: {grid_obj}")
continue
if input_obj1 is None:
@ -84,7 +85,8 @@ class MergeObj:
self.logger.info(f"设置第一个输入OBJ: {input_obj1}")
else:
input_obj2 = grid_obj
output_obj = os.path.join(self.output_dir, "merged_model.obj")
output_obj = os.path.join(
self.output_dir, "merged_model.obj")
self.logger.info(
f"开始合并第 {merge_count + 1} 次:\n"
@ -107,3 +109,34 @@ class MergeObj:
except Exception as e:
self.logger.error(f"OBJ模型合并过程中发生错误: {str(e)}", exc_info=True)
raise
if __name__ == "__main__":
import sys
sys.path.append(os.path.dirname(
os.path.dirname(os.path.abspath(__file__))))
from utils.logger import setup_logger
import pandas as pd
# 设置输出目录和日志
output_dir = r"G:\ODM_output\1009"
setup_logger(output_dir)
# 构造测试用的grid_points字典
# 假设我们有两个网格每个网格包含一些GPS点的DataFrame
grid_points = {
0: pd.DataFrame({
'latitude': [39.9, 39.91],
'longitude': [116.3, 116.31],
'altitude': [100, 101]
}),
1: pd.DataFrame({
'latitude': [39.92, 39.93],
'longitude': [116.32, 116.33],
'altitude': [102, 103]
})
}
# 创建MergeObj实例并执行合并
merge_obj = MergeObj(output_dir)
merge_obj.merge_grid_obj(grid_points)

View File

@ -1,7 +1,7 @@
import os
import logging
import numpy as np
from typing import Dict
from typing import Dict, Tuple
import pandas as pd
import open3d as o3d
@ -11,8 +11,51 @@ class MergePly:
self.output_dir = output_dir
self.logger = logging.getLogger('UAV_Preprocess.MergePly')
def merge_two_plys(self, ply1_path: str, ply2_path: str, output_path: str):
"""合并两个PLY文件"""
def read_corners_file(self, grid_idx: int) -> Tuple[float, float]:
"""读取角点文件并计算中心点坐标
角点文件格式xmin ymin xmax ymax
"""
corners_file = os.path.join(
self.output_dir,
f"grid_{grid_idx + 1}",
"project",
"odm_orthophoto",
"odm_orthophoto_corners.txt"
)
try:
if not os.path.exists(corners_file):
raise FileNotFoundError(f"角点文件不存在: {corners_file}")
# 读取角点文件
with open(corners_file, 'r') as f:
line = f.readline().strip()
if not line:
raise ValueError(f"角点文件为空: {corners_file}")
# 解析四个角点值xmin ymin xmax ymax
xmin, ymin, xmax, ymax = map(float, line.split())
# 计算中心点坐标
center_x = (xmin + xmax) / 2
center_y = (ymin + ymax) / 2
self.logger.info(
f"网格 {grid_idx + 1} 边界坐标: \n"
f"xmin={xmin:.2f}, ymin={ymin:.2f}\n"
f"xmax={xmax:.2f}, ymax={ymax:.2f}\n"
f"中心点: x={center_x:.2f}, y={center_y:.2f}"
)
return center_x, center_y
except Exception as e:
self.logger.error(f"读取角点文件时发生错误: {str(e)}", exc_info=True)
raise
def merge_two_plys(self, ply1_path: str, ply2_path: str, output_path: str,
center1: Tuple[float, float],
center2: Tuple[float, float]):
"""合并两个PLY文件使用中心点坐标进行对齐"""
try:
self.logger.info("开始合并PLY点云")
self.logger.info(f"输入点云1: {ply1_path}")
@ -30,15 +73,15 @@ class MergePly:
if pcd1 is None or pcd2 is None:
raise ValueError("无法读取点云文件")
# 获取点云中心
center1 = pcd1.get_center()
center2 = pcd2.get_center()
# 计算平移向量(直接使用中心点坐标差)
translation = np.array([
center2[0] - center1[0], # x方向的平移
center2[1] - center1[1], # y方向的平移
0.0 # z方向不平移
])
# 计算平移向量
translation_vector = center2 - center1
# 对齐点云
pcd2.translate(translation_vector)
# 对第二个点云进行平移
pcd2.translate(translation*100)
# 合并点云
combined_pcd = pcd1 + pcd2
@ -53,56 +96,93 @@ class MergePly:
self.logger.error(f"合并PLY点云时发生错误: {str(e)}", exc_info=True)
raise
def merge_grid_ply(self, grid_points: Dict[int, pd.DataFrame]):
"""合并所有网格的PLY点云"""
def merge_grid_ply(self, grid_points: Dict[int, list]):
"""合并所有网格的PLY点云,以第一个网格为参考点"""
self.logger.info("开始合并所有网格的PLY点云")
if len(grid_points) < 2:
self.logger.info("只有一个网格,无需合并")
return
input_ply1, input_ply2 = None, None
merge_count = 0
try:
for grid_idx, points in grid_points.items():
grid_ply = os.path.join(
# 获取网格索引列表并排序
grid_indices = sorted(grid_points.keys())
# 读取第一个网格作为参考网格
ref_idx = grid_indices[0]
ref_ply = os.path.join(
self.output_dir,
f"grid_{ref_idx + 1}",
"project",
"odm_filterpoints",
"point_cloud.ply"
)
if not os.path.exists(ref_ply):
raise FileNotFoundError(f"参考网格的PLY文件不存在: {ref_ply}")
# 获取参考网格的中心点坐标
ref_center = self.read_corners_file(ref_idx)
self.logger.info(f"参考网格(grid_{ref_idx + 1})中心点: x={ref_center[0]:.2f}, y={ref_center[1]:.2f}")
# 将参考点云复制到输出位置作为初始合并结果
output_ply = os.path.join(self.output_dir, "merged_pointcloud.ply")
import shutil
shutil.copy2(ref_ply, output_ply)
# 依次处理其他网格
for grid_idx in grid_indices[1:]:
current_ply = os.path.join(
self.output_dir,
f"grid_{grid_idx + 1}",
"project",
"odm_georeferencing",
"odm_georeferenced_model.ply"
"odm_filterpoints",
"point_cloud.ply"
)
if not os.path.exists(grid_ply):
self.logger.warning(f"网格 {grid_idx + 1} 的PLY文件不存在: {grid_ply}")
if not os.path.exists(current_ply):
self.logger.warning(f"网格 {grid_idx + 1} 的PLY文件不存在: {current_ply}")
continue
if input_ply1 is None:
input_ply1 = grid_ply
self.logger.info(f"设置第一个输入PLY: {input_ply1}")
else:
input_ply2 = grid_ply
output_ply = os.path.join(self.output_dir, "merged_pointcloud.ply")
# 读取当前网格的中心点坐标
current_center = self.read_corners_file(grid_idx)
self.logger.info(
f"开始合并第 {merge_count + 1} 次:\n"
f"输入1: {input_ply1}\n"
f"输入2: {input_ply2}\n"
f"输出: {output_ply}"
f"处理网格 {grid_idx + 1}:\n"
f"合并点云: {current_ply}\n"
f"当前网格中心点: x={current_center[0]:.2f}, y={current_center[1]:.2f}"
)
self.merge_two_plys(input_ply1, input_ply2, output_ply)
merge_count += 1
input_ply1 = output_ply
input_ply2 = None
self.logger.info(
f"PLY点云合并完成共执行 {merge_count} 次合并,"
f"最终输出文件: {input_ply1}"
# 合并点云,始终使用第一个网格的中心点作为参考点
self.merge_two_plys(
output_ply, # 当前合并结果
current_ply, # 要合并的新点云
output_ply, # 覆盖原有的合并结果
ref_center, # 参考网格中心点(始终不变)
current_center # 当前网格中心点
)
self.logger.info(f"PLY点云合并完成最终输出文件: {output_ply}")
except Exception as e:
self.logger.error(f"PLY点云合并过程中发生错误: {str(e)}", exc_info=True)
raise
if __name__ == "__main__":
from utils.logger import setup_logger
import open3d as o3d
# 设置输出目录和日志
output_dir = r"G:\ODM_output\1009"
setup_logger(output_dir)
# 构造测试用的grid_points字典
grid_points = {
0: [], # 不再需要GPS点信息
1: []
}
# 创建MergePly实例并执行合并
merge_ply = MergePly(output_dir)
merge_ply.merge_grid_ply(grid_points)

View File

@ -3,8 +3,6 @@ import logging
import os
from typing import Dict
import pandas as pd
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
class MergeTif:
@ -53,7 +51,8 @@ class MergeTif:
)
self.logger.info("开始执行影像拼接...")
result = gdal.Warp(output_tif, [input_tif1, input_tif2], options=warp_options)
result = gdal.Warp(
output_tif, [input_tif1, input_tif2], options=warp_options)
if result is None:
error_msg = "影像拼接失败"
@ -100,7 +99,8 @@ class MergeTif:
)
if not os.path.exists(grid_tif):
self.logger.warning(f"网格 {grid_idx + 1}{product_name}不存在: {grid_tif}")
self.logger.warning(
f"网格 {grid_idx + 1}{product_name}不存在: {grid_tif}")
continue
if input_tif1 is None:
@ -108,7 +108,8 @@ class MergeTif:
self.logger.info(f"设置第一个输入{product_name}: {input_tif1}")
else:
input_tif2 = grid_tif
output_tif = os.path.join(self.output_dir, f"merged_{product_info['output']}")
output_tif = os.path.join(
self.output_dir, f"merged_{product_info['output']}")
self.logger.info(
f"开始合并{product_name}{merge_count + 1} 次:\n"
@ -129,7 +130,8 @@ class MergeTif:
)
except Exception as e:
self.logger.error(f"{product_name}合并过程中发生错误: {str(e)}", exc_info=True)
self.logger.error(
f"{product_name}合并过程中发生错误: {str(e)}", exc_info=True)
raise
def merge_all_tifs(self, grid_points: Dict[int, pd.DataFrame]):
@ -157,7 +159,7 @@ class MergeTif:
]
for product in products:
self.merge_grid_product(grid_points, product)
self.merge_grid_tif(grid_points, product)
self.logger.info("所有产品合并完成")
except Exception as e:
@ -166,17 +168,31 @@ class MergeTif:
if __name__ == "__main__":
import sys
sys.path.append(os.path.dirname(
os.path.dirname(os.path.abspath(__file__))))
from utils.logger import setup_logger
import pandas as pd
# 定义影像路径
input_tif1 = r"G:\ODM_output\20241024100834\output\grid_1\project\odm_orthophoto\odm_orthophoto.tif"
input_tif2 = r"G:\ODM_output\20241024100834\output\grid_2\project\odm_orthophoto\odm_orthophoto.tif"
output_tif = r"G:\ODM_output\20241024100834\output\merged_orthophoto.tif"
# 设置日志
output_dir = r"E:\studio2\ODM_pro\test"
# 设置输出目录和日志
output_dir = r"G:\ODM_output\1009"
setup_logger(output_dir)
# 执行拼接
# 构造测试用的grid_points字典
# 假设我们有两个网格每个网格包含一些GPS点的DataFrame
grid_points = {
0: pd.DataFrame({
'latitude': [39.9, 39.91],
'longitude': [116.3, 116.31],
'altitude': [100, 101]
}),
1: pd.DataFrame({
'latitude': [39.92, 39.93],
'longitude': [116.32, 116.33],
'altitude': [102, 103]
})
}
# 创建MergeTif实例并执行合并
merge_tif = MergeTif(output_dir)
merge_tif.merge_two_tifs(input_tif1, input_tif2, output_tif)
merge_tif.merge_all_tifs(grid_points)

View File

@ -50,12 +50,14 @@ class ODMProcessMonitor:
stdout, stderr = result.stdout.decode(
'utf-8'), result.stderr.decode('utf-8')
self.logger.info(f"==========stdout==========: {stdout}")
self.logger.error(f"==========stderr==========: {stderr}")
# 检查执行结果
if self._check_success(grid_dir):
self.logger.info(stdout, stderr)
self.logger.info(f"网格 {grid_idx + 1} 处理成功")
return True, ""
else:
self.logger.error(f"网格 {grid_idx + 1} 处理失败")
return False, f"网格 {grid_idx + 1} 处理失败"
def process_all_grids(self, grid_points: Dict[int, pd.DataFrame]):