修改bib引用
This commit is contained in:
143
references.bib
143
references.bib
@@ -1,41 +1,112 @@
|
||||
|
||||
@article{guo2024,
|
||||
title = {Hybrid concurrency control protocol for data sharing among heterogeneous blockchains},
|
||||
author = {Guo, Tiezheng and Zhang, Zhiwei and Yuan, Ye and Yang, Xiaochun and Wang, Guoren},
|
||||
year = {2024},
|
||||
month = {01},
|
||||
date = {2024-01-22},
|
||||
journal = {Frontiers of Computer Science},
|
||||
pages = {183104},
|
||||
volume = {18},
|
||||
number = {3},
|
||||
doi = {10.1007/s11704-022-2327-7},
|
||||
url = {https://doi.org/10.1007/s11704-022-2327-7},
|
||||
langid = {en}
|
||||
@article{gorelickGoogleEarthEngine2017,
|
||||
title = {Google {{Earth Engine}}: {{Planetary-scale}} Geospatial Analysis for Everyone},
|
||||
shorttitle = {Google {{Earth Engine}}},
|
||||
author = {Gorelick, Noel and Hancher, Matt and Dixon, Mike and Ilyushchenko, Simon and Thau, David and Moore, Rebecca},
|
||||
year = 2017,
|
||||
month = dec,
|
||||
journal = {Remote Sensing of Environment},
|
||||
series = {Big {{Remotely Sensed Data}}: Tools, Applications and Experiences},
|
||||
volume = {202},
|
||||
pages = {18--27},
|
||||
issn = {0034-4257},
|
||||
doi = {10.1016/j.rse.2017.06.031},
|
||||
urldate = {2026-01-18},
|
||||
abstract = {Google Earth Engine is a cloud-based platform for planetary-scale geospatial analysis that brings Google's massive computational capabilities to bear on a variety of high-impact societal issues including deforestation, drought, disaster, disease, food security, water management, climate monitoring and environmental protection. It is unique in the field as an integrated platform designed to empower not only traditional remote sensing scientists, but also a much wider audience that lacks the technical capacity needed to utilize traditional supercomputers or large-scale commodity cloud computing resources.},
|
||||
keywords = {Analysis,Big data,Cloud computing,Data democratization,Earth Engine,Platform},
|
||||
file = {E\:\\zotero\\storage\\63PKW7Q5\\Gorelick 等 - 2017 - Google Earth Engine Planetary-scale geospatial analysis for everyone.pdf;E\:\\zotero\\storage\\E9IJLV2Y\\S0034425717302900.html}
|
||||
}
|
||||
|
||||
@article{preil2025,
|
||||
title = {Genetic Multi-Armed Bandits: A Reinforcement Learning Inspired Approach for Simulation Optimization},
|
||||
author = {Preil, Deniz and Krapp, Michael},
|
||||
year = {2025},
|
||||
month = {04},
|
||||
date = {2025-04},
|
||||
journal = {IEEE Transactions on Evolutionary Computation},
|
||||
pages = {360--374},
|
||||
volume = {29},
|
||||
number = {2},
|
||||
doi = {10.1109/TEVC.2024.3524505},
|
||||
url = {https://ieeexplore.ieee.org/document/10818791}
|
||||
@article{GridMesaNoSQLbasedBig2024,
|
||||
title = {{{GridMesa}}: {{A NoSQL-based}} Big Spatial Data Management System with an Adaptive Grid Approximation Model},
|
||||
shorttitle = {{{GridMesa}}},
|
||||
year = 2024,
|
||||
month = jun,
|
||||
journal = {Future Generation Computer Systems},
|
||||
volume = {155},
|
||||
pages = {324--339},
|
||||
publisher = {North-Holland},
|
||||
issn = {0167-739X},
|
||||
doi = {10.1016/j.future.2024.02.010},
|
||||
urldate = {2025-05-13},
|
||||
abstract = {Due to the urgent demand for managing massive spatial data, various spatial data management systems built on distributed NoSQL databases have emerged.\dots},
|
||||
langid = {english},
|
||||
file = {E:\zotero\storage\28M2A6FZ\2024 - GridMesa A NoSQL-based big spatial data managemen.pdf}
|
||||
}
|
||||
|
||||
@inproceedings{rajesh2024,
|
||||
title = {2024 IEEE International Parallel and Distributed Processing Symposium (IPDPS)},
|
||||
author = {Rajesh, Neeraj and Bateman, Keith and Bez, Jean Luca and Byna, Suren and Kougkas, Anthony and Sun, Xian-He},
|
||||
year = {2024},
|
||||
month = {05},
|
||||
date = {2024-05},
|
||||
pages = {494--505},
|
||||
doi = {10.1109/IPDPS57955.2024.00050},
|
||||
url = {https://ieeexplore.ieee.org/abstract/document/10579249},
|
||||
langid = {en}
|
||||
@article{hongHybridApproachIntegrating2025,
|
||||
title = {A {{Hybrid Approach}} to {{Integrating Deterministic}} and {{Non-Deterministic Concurrency Control}} in {{Database Systems}}},
|
||||
author = {Hong, Yinhao and Zhao, Hongyao and Lu, Wei and Du, Xiaoyong and Chen, Yuxing and Pan, Anqun and Zheng, Lixiong},
|
||||
year = 2025,
|
||||
month = aug,
|
||||
journal = {Proc. VLDB Endow.},
|
||||
volume = {18},
|
||||
number = {5},
|
||||
pages = {1376--1389},
|
||||
issn = {2150-8097},
|
||||
doi = {10.14778/3718057.3718066},
|
||||
urldate = {2025-10-28},
|
||||
abstract = {Deterministic and non-deterministic concurrency control algorithms have shown respective advantages under diverse workloads. Thus, a natural idea is to blend them together. However, because deterministic algorithms work with stringent assumptions, e.g., batched execution and non-interactive transactions, they hardly work together with non-deterministic algorithms. To address this issue, we propose HDCC, a hybrid approach that adaptively employs Calvin and OCC, which have distinct concurrency control and logging schemes, in the same database system. To ensure serializability and recovery correctness, we introduce lock-sharing, global validation, and two-log-interleaving mechanisms. Additionally, we introduce a rule-based assignment mechanism to dynamically select Calvin or OCC based on workload characteristics. Experimental results using TPC-C and YCSB benchmarks demonstrate that HDCC surpasses existing hybrid approaches by up to 3.1\texttimes.},
|
||||
file = {E:\zotero\storage\UIIZSXL7\Hong 等 - 2025 - A Hybrid Approach to Integrating Deterministic and Non-Deterministic Concurrency Control in Database.pdf}
|
||||
}
|
||||
|
||||
@article{lewisAustralianGeoscienceData2017a,
|
||||
title = {The {{Australian Geoscience Data Cube}} --- {{Foundations}} and Lessons Learned},
|
||||
author = {Lewis, Adam and Oliver, Simon and Lymburner, Leo and Evans, Ben and Wyborn, Lesley and Mueller, Norman and Raevksi, Gregory and Hooke, Jeremy and Woodcock, Rob and Sixsmith, Joshua and Wu, Wenjun and Tan, Peter and Li, Fuqin and Killough, Brian and Minchin, Stuart and Roberts, Dale and Ayers, Damien and Bala, Biswajit and Dwyer, John and Dekker, Arnold and Dhu, Trevor and Hicks, Andrew and Ip, Alex and Purss, Matt and Richards, Clare and Sagar, Stephen and Trenham, Claire and Wang, Peter and Wang, Lan-Wei},
|
||||
year = 2017,
|
||||
month = dec,
|
||||
journal = {Remote Sensing of Environment},
|
||||
series = {Big {{Remotely Sensed Data}}: Tools, Applications and Experiences},
|
||||
volume = {202},
|
||||
pages = {276--292},
|
||||
issn = {0034-4257},
|
||||
doi = {10.1016/j.rse.2017.03.015},
|
||||
urldate = {2026-01-18},
|
||||
abstract = {The Australian Geoscience Data Cube (AGDC) aims to realise the full potential of Earth observation data holdings by addressing the Big Data challenges of volume, velocity, and variety that otherwise limit the usefulness of Earth observation data. There have been several iterations and AGDC version 2 is a major advance on previous work. The foundations and core components of the AGDC are: (1) data preparation, including geometric and radiometric corrections to Earth observation data to produce standardised surface reflectance measurements that support time-series analysis, and collection management systems which track the provenance of each Data Cube product and formalise re-processing decisions; (2) the software environment used to manage and interact with the data; and (3) the supporting high performance computing environment provided by the Australian National Computational Infrastructure (NCI). A growing number of examples demonstrate that our data cube approach allows analysts to extract rich new information from Earth observation time series, including through new methods that draw on the full spatial and temporal coverage of the Earth observation archives. To enable easy-uptake of the AGDC, and to facilitate future cooperative development, our code is developed under an open-source, Apache License, Version 2.0. This open-source approach is enabling other organisations, including the Committee on Earth Observing Satellites (CEOS), to explore the use of similar data cubes in developing countries.},
|
||||
keywords = {Australian Geoscience Data Cube,Big data,Collection management,Data cube,Geometric correction,High performance computing,High performance data,Landsat,Pixel quality,Time-series},
|
||||
file = {E:\zotero\storage\9QEM6RZG\S0034425717301086.html}
|
||||
}
|
||||
|
||||
@article{MSTGIMultiscaleSpatiotemporal,
|
||||
title = {{{MSTGI}}: A Multi-Scale Spatio-Temporal Grid Index Model for Remote-Sensing Big Data Retrieval},
|
||||
shorttitle = {{{MSTGI}}},
|
||||
journal = {Remote Sensing Letters},
|
||||
urldate = {2025-05-13},
|
||||
abstract = {To promote the transformation of remote sensing (RS) data into geoscience knowledge, it is necessary to provide better data discovery capabilities, especially when large amounts of RS data have bee...},
|
||||
langid = {english},
|
||||
file = {E\:\\zotero\\storage\\MNGVJEEV\\MSTGI a multi-scale spatio-temporal grid index mo.pdf;E\:\\zotero\\storage\\LHUZXQP8\\2150704X.2023.html}
|
||||
}
|
||||
|
||||
@article{preilGeneticMultiArmedBandits2025,
|
||||
title = {Genetic {{Multi-Armed Bandits}}: {{A Reinforcement Learning Inspired Approach}} for {{Simulation Optimization}}},
|
||||
shorttitle = {Genetic {{Multi-Armed Bandits}}},
|
||||
author = {Preil, Deniz and Krapp, Michael},
|
||||
year = 2025,
|
||||
month = apr,
|
||||
journal = {IEEE Transactions on Evolutionary Computation},
|
||||
volume = {29},
|
||||
number = {2},
|
||||
pages = {360--374},
|
||||
issn = {1941-0026},
|
||||
doi = {10.1109/TEVC.2024.3524505},
|
||||
urldate = {2025-11-03},
|
||||
abstract = {Many real-world problems are inherently stochastic, complicating, or even precluding the use of analytical methods. These problems are often characterized by high dimensionality, large solution spaces, and numerous local optima, which make finding optimal solutions challenging. Therefore, simulation optimization is frequently employed. This article specifically focuses on the discrete case, also known as discrete optimization via simulation. Despite their adaptions for stochastic problems, previous evolutionary algorithms face a major limitation in these problems. They discard all information about solutions that are not involved in the most recent population. However, this is ineffective, as each simulation observation gathered over the course of iterations provides valuable information that should guide the selection of subsequent solutions. Inspired by the domain of reinforcement learning (RL), we propose a novel memory concept for evolutionary algorithms that ensures global convergence and significantly improves their finite time performance. Unlike previous evolutionary algorithms, our approach permanently preserves simulation observations to progressively improve the accuracy of sample means when revisiting solutions in later iterations. Moreover, the selection of new solutions is based on the entire memory rather than just the last population. The numerical experiments demonstrate that this novel approach, which combines a genetic algorithm (GA) with such memory, consistently outperforms popular convergent state-of-the-art benchmark algorithms in a large variety of established test problems while requiring considerably less computational effort. This marks the so-called genetic multi-armed bandit (MAB) as one of the currently most powerful algorithms for solving stochastic problems.},
|
||||
keywords = {Complexity theory,Convergence,Evolutionary computation,Genetic algorithms,Genetic algorithms (GAs),Genetics,multi-armed bandits (MABs),Optimization,Program processors,Reinforcement learning,reinforcement learning (RL),Resource management,Search problems,simulation,simulation optimization},
|
||||
file = {E:\zotero\storage\4NPKAMXS\Preil和Krapp - 2025 - Genetic Multi-Armed Bandits A Reinforcement Learning Inspired Approach for Simulation Optimization.pdf}
|
||||
}
|
||||
|
||||
@inproceedings{rajeshTunIOAIpoweredFramework2024,
|
||||
title = {{{TunIO}}: {{An AI-powered Framework}} for {{Optimizing HPC I}}/{{O}}},
|
||||
shorttitle = {{{TunIO}}},
|
||||
booktitle = {2024 {{IEEE International Parallel}} and {{Distributed Processing Symposium}} ({{IPDPS}})},
|
||||
author = {Rajesh, Neeraj and Bateman, Keith and Bez, Jean Luca and Byna, Suren and Kougkas, Anthony and Sun, Xian-He},
|
||||
year = 2024,
|
||||
month = may,
|
||||
pages = {494--505},
|
||||
issn = {1530-2075},
|
||||
doi = {10.1109/IPDPS57955.2024.00050},
|
||||
urldate = {2025-06-10},
|
||||
abstract = {I/O operations are a known performance bottleneck of HPC applications. To achieve good performance, users often employ an iterative multistage tuning process to find an optimal I/O stack configuration. However, an I/O stack contains multiple layers, such as high-level I/O libraries, I/O middleware, and parallel file systems, and each layer has many parameters. These parameters and layers are entangled and influenced by each other. The tuning process is time-consuming and complex. In this work, we present TunIO, an AI-powered I/O tuning framework that implements several techniques to balance the tuning cost and performance gain, including tuning the high-impact parameters first. Furthermore, TunIO analyzes the application source code to extract its I/O kernel while retaining all statements necessary to perform I/O. It utilizes a smart selection of high-impact configuration parameters of the given tuning objective. Finally, it uses a novel Reinforcement Learning (RL)-driven early stopping mechanism to balance the cost and performance gain. Experimental results show that TunIO leads to a reduction of up to {$\approx$}73\% in tuning time while achieving the same performance gain when compared to H5Tuner. It achieves a significant performance gain/cost of 208.4 MBps/min (I/O bandwidth for each minute spent in tuning) over existing approaches under our testing.},
|
||||
langid = {english},
|
||||
keywords = {AI-powered I/O tuning,autotuning,Costs,I/O performance optimization,Kernel,Libraries,Middleware,Performance gain,Reinforcement learning,source code transformations,Source coding,storage stack tuning},
|
||||
file = {E:\zotero\storage\WK48YMSE\Rajesh 等 - 2024 - TunIO An AI-powered Framework for Optimizing HPC .pdf}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user