图表左轴朝内,方法分类,并发控制添加baseline
This commit is contained in:
Binary file not shown.
BIN
exp/cc_exp1_1_mean.pdf
Normal file
BIN
exp/cc_exp1_1_mean.pdf
Normal file
Binary file not shown.
BIN
exp/cc_exp1_1_p95.pdf
Normal file
BIN
exp/cc_exp1_1_p95.pdf
Normal file
Binary file not shown.
Binary file not shown.
BIN
exp/cc_exp1_2_bak.pdf
Normal file
BIN
exp/cc_exp1_2_bak.pdf
Normal file
Binary file not shown.
BIN
exp/cc_exp1_2_mean.pdf
Normal file
BIN
exp/cc_exp1_2_mean.pdf
Normal file
Binary file not shown.
BIN
exp/cc_exp1_2_p95.pdf
Normal file
BIN
exp/cc_exp1_2_p95.pdf
Normal file
Binary file not shown.
Binary file not shown.
BIN
exp/cc_exp1_3_mean.pdf
Normal file
BIN
exp/cc_exp1_3_mean.pdf
Normal file
Binary file not shown.
BIN
exp/cc_exp1_3_p95.pdf
Normal file
BIN
exp/cc_exp1_3_p95.pdf
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
exp/cc_exp4.pdf
BIN
exp/cc_exp4.pdf
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
exp/tune_exp2_1.pdf
Normal file
BIN
exp/tune_exp2_1.pdf
Normal file
Binary file not shown.
Binary file not shown.
BIN
fig/overview.png
BIN
fig/overview.png
Binary file not shown.
|
Before Width: | Height: | Size: 30 KiB After Width: | Height: | Size: 29 KiB |
BIN
fig/unlock.png
BIN
fig/unlock.png
Binary file not shown.
|
Before Width: | Height: | Size: 159 KiB After Width: | Height: | Size: 161 KiB |
@@ -478,6 +478,21 @@
|
||||
year = {2025}
|
||||
}
|
||||
|
||||
@article{Habibi25Brook2PL,
|
||||
author = {Habibi, Farzad and Fang, Juncheng and Lorido-Botran, Tania and Nawab, Faisal},
|
||||
title = {Brook-2PL: Tolerating High Contention Workloads with A Deadlock-Free Two-Phase Locking Protocol},
|
||||
year = {2025},
|
||||
issue_date = {December 2025},
|
||||
publisher = {Association for Computing Machinery},
|
||||
address = {New York, NY, USA},
|
||||
volume = {3},
|
||||
number = {6},
|
||||
journal = {Proc. ACM Manag. Data},
|
||||
month = dec,
|
||||
articleno = {302},
|
||||
numpages = {27}
|
||||
}
|
||||
|
||||
@article{Chen21Tuning1,
|
||||
author = {Tao Chen and
|
||||
Miqing Li},
|
||||
|
||||
@@ -47,7 +47,7 @@
|
||||
\newlabel{eqn:pre_rs}{{1}{3}}
|
||||
\newlabel{eqn:pre_st_query}{{2}{3}}
|
||||
\newlabel{eqn:cost_total}{{3}{3}}
|
||||
\@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces The workflow for processing concurrent spatio-temporal range retrievals in the system}}{4}{}\protected@file@percent }
|
||||
\@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces Overview of the proposed system architecture and its main component modules}}{4}{}\protected@file@percent }
|
||||
\newlabel{fig:overview}{{1}{4}}
|
||||
\newlabel{eqn_pre_objective}{{4}{4}}
|
||||
\@writefile{toc}{\contentsline {section}{\numberline {IV}System Overview}{4}{}\protected@file@percent }
|
||||
@@ -95,7 +95,11 @@
|
||||
\@writefile{toc}{\contentsline {subsection}{\numberline {\mbox {VIII-A}}Experimental Setup}{10}{}\protected@file@percent }
|
||||
\@writefile{toc}{\contentsline {subsubsection}{\numberline {\mbox {VIII-A}1}Dataset}{10}{}\protected@file@percent }
|
||||
\@writefile{toc}{\contentsline {subsubsection}{\numberline {\mbox {VIII-A}2}Retrieval Workload}{10}{}\protected@file@percent }
|
||||
\citation{Strobl08PostGIS}
|
||||
\citation{hughes15geomesa}
|
||||
\citation{liu24mstgi}
|
||||
\citation{LEWIS17datacube}
|
||||
\citation{riotiler25riotiler}
|
||||
\@writefile{lot}{\contentsline {table}{\numberline {II}{\ignorespaces Cluster Configurations}}{11}{}\protected@file@percent }
|
||||
\newlabel{table_config}{{II}{11}}
|
||||
\newlabel{sec_exp_env}{{\mbox {VIII-A}3}{11}}
|
||||
@@ -109,6 +113,8 @@
|
||||
\newlabel{fig:index_exp1}{{6}{11}}
|
||||
\newlabel{sec:Index_exp_1}{{\mbox {VIII-B}1}{11}}
|
||||
\@writefile{toc}{\contentsline {subsubsection}{\numberline {\mbox {VIII-B}1}I/O Selectivity Analysis}{11}{}\protected@file@percent }
|
||||
\newlabel{sec:Index_exp_2}{{\mbox {VIII-B}2}{11}}
|
||||
\@writefile{toc}{\contentsline {subsubsection}{\numberline {\mbox {VIII-B}2}End-to-End Retrieval Latency}{11}{}\protected@file@percent }
|
||||
\newlabel{fig:index_exp2_1}{{\mbox {VIII-B}2}{12}}
|
||||
\newlabel{fig:index_exp2_2}{{\mbox {VIII-B}2}{12}}
|
||||
\@writefile{lof}{\contentsline {figure}{\numberline {7}{\ignorespaces End-to-End retrieval latency}}{12}{}\protected@file@percent }
|
||||
@@ -117,16 +123,14 @@
|
||||
\newlabel{fig:index_exp2}{{7}{12}}
|
||||
\@writefile{lof}{\contentsline {figure}{\numberline {8}{\ignorespaces Latency breakdown}}{12}{}\protected@file@percent }
|
||||
\newlabel{fig:index_exp2_3}{{8}{12}}
|
||||
\newlabel{sec:Index_exp_2}{{\mbox {VIII-B}2}{12}}
|
||||
\@writefile{toc}{\contentsline {subsubsection}{\numberline {\mbox {VIII-B}2}End-to-End Retrieval Latency}{12}{}\protected@file@percent }
|
||||
\newlabel{fig:index_exp3_1}{{\mbox {VIII-B}3}{12}}
|
||||
\newlabel{fig:index_exp3_2}{{\mbox {VIII-B}3}{12}}
|
||||
\@writefile{lof}{\contentsline {figure}{\numberline {9}{\ignorespaces Ablation analysis}}{12}{}\protected@file@percent }
|
||||
\@writefile{lof}{\contentsline {subfigure}{\numberline{(a)}{\ignorespaces {I/O reduction analysis}}}{12}{}\protected@file@percent }
|
||||
\@writefile{lof}{\contentsline {subfigure}{\numberline{(b)}{\ignorespaces {Latency breakdown}}}{12}{}\protected@file@percent }
|
||||
\newlabel{fig:index_exp3}{{9}{12}}
|
||||
\newlabel{sec:Index_exp_3}{{\mbox {VIII-B}3}{12}}
|
||||
\@writefile{toc}{\contentsline {subsubsection}{\numberline {\mbox {VIII-B}3}Ablation Study}{12}{}\protected@file@percent }
|
||||
\newlabel{fig:index_exp3_1}{{\mbox {VIII-B}3}{13}}
|
||||
\newlabel{fig:index_exp3_2}{{\mbox {VIII-B}3}{13}}
|
||||
\@writefile{lof}{\contentsline {figure}{\numberline {9}{\ignorespaces Ablation analysis}}{13}{}\protected@file@percent }
|
||||
\@writefile{lof}{\contentsline {subfigure}{\numberline{(a)}{\ignorespaces {I/O reduction analysis}}}{13}{}\protected@file@percent }
|
||||
\@writefile{lof}{\contentsline {subfigure}{\numberline{(b)}{\ignorespaces {Latency breakdown}}}{13}{}\protected@file@percent }
|
||||
\newlabel{fig:index_exp3}{{9}{13}}
|
||||
\@writefile{lof}{\contentsline {figure}{\numberline {10}{\ignorespaces Impact of grid resolution on query latency}}{13}{}\protected@file@percent }
|
||||
\newlabel{fig:index_exp3_3}{{10}{13}}
|
||||
\newlabel{fig:index_exp4_2}{{\mbox {VIII-B}4}{13}}
|
||||
@@ -137,43 +141,54 @@
|
||||
\newlabel{fig:index_exp4}{{11}{13}}
|
||||
\@writefile{toc}{\contentsline {subsubsection}{\numberline {\mbox {VIII-B}4}Index Construction and Storage Overhead}{13}{}\protected@file@percent }
|
||||
\@writefile{toc}{\contentsline {subsection}{\numberline {\mbox {VIII-C}}Evaluating the Concurrency Control}{13}{}\protected@file@percent }
|
||||
\citation{Thomson12Calvin}
|
||||
\citation{Wu25OOCC}
|
||||
\citation{Habibi25Brook2PL}
|
||||
\@writefile{toc}{\contentsline {subsubsection}{\numberline {\mbox {VIII-C}1}Concurrency Scalability}{14}{}\protected@file@percent }
|
||||
\@writefile{toc}{\contentsline {subsubsection}{\numberline {\mbox {VIII-C}2}Storage-Level Effects and Request Collapse}{14}{}\protected@file@percent }
|
||||
\newlabel{sec:ModeSwitch}{{\mbox {VIII-C}3}{14}}
|
||||
\@writefile{toc}{\contentsline {subsubsection}{\numberline {\mbox {VIII-C}3}Deterministic and Non-Deterministic Modes}{14}{}\protected@file@percent }
|
||||
\citation{Behzad13HDF5}
|
||||
\citation{Chen98SA,Robert20SA}
|
||||
\citation{Agarwal19TPE}
|
||||
\citation{Bagbaba20RF}
|
||||
\citation{Rajesh24TunIO}
|
||||
\newlabel{fig:cc_exp1_3}{{12(a)}{15}}
|
||||
\newlabel{sub@fig:cc_exp1_3}{{(a)}{15}}
|
||||
\newlabel{fig:cc_exp1_2}{{12(b)}{15}}
|
||||
\newlabel{sub@fig:cc_exp1_2}{{(b)}{15}}
|
||||
\newlabel{fig:cc_exp1_1}{{12(c)}{15}}
|
||||
\newlabel{sub@fig:cc_exp1_1}{{(c)}{15}}
|
||||
\@writefile{lof}{\contentsline {figure}{\numberline {12}{\ignorespaces Concurrency scalability analysis under varying spatial overlap ratios ($\sigma $).}}{15}{}\protected@file@percent }
|
||||
\@writefile{lof}{\contentsline {figure}{\numberline {12}{\ignorespaces Mean latency under varying spatial overlap ratios ($\sigma $) and concurrent client counts.}}{15}{}\protected@file@percent }
|
||||
\@writefile{lof}{\contentsline {subfigure}{\numberline{(a)}{\ignorespaces {$\sigma =0.4$}}}{15}{}\protected@file@percent }
|
||||
\@writefile{lof}{\contentsline {subfigure}{\numberline{(b)}{\ignorespaces {$\sigma =0.6$}}}{15}{}\protected@file@percent }
|
||||
\@writefile{lof}{\contentsline {subfigure}{\numberline{(c)}{\ignorespaces {$\sigma =0.8$}}}{15}{}\protected@file@percent }
|
||||
\newlabel{fig:cc_exp1}{{12}{15}}
|
||||
\newlabel{fig:cc_exp1_3p95}{{13(a)}{15}}
|
||||
\newlabel{sub@fig:cc_exp1_3p95}{{(a)}{15}}
|
||||
\newlabel{fig:cc_exp1_2p95}{{13(b)}{15}}
|
||||
\newlabel{sub@fig:cc_exp1_2p95}{{(b)}{15}}
|
||||
\newlabel{fig:cc_exp1_1p95}{{13(c)}{15}}
|
||||
\newlabel{sub@fig:cc_exp1_1p95}{{(c)}{15}}
|
||||
\@writefile{lof}{\contentsline {figure}{\numberline {13}{\ignorespaces 95th percentile (P95) latency under varying spatial overlap ratios ($\sigma $) and concurrent client counts.}}{15}{}\protected@file@percent }
|
||||
\@writefile{lof}{\contentsline {subfigure}{\numberline{(a)}{\ignorespaces {$\sigma =0.4$}}}{15}{}\protected@file@percent }
|
||||
\@writefile{lof}{\contentsline {subfigure}{\numberline{(b)}{\ignorespaces {$\sigma =0.6$}}}{15}{}\protected@file@percent }
|
||||
\@writefile{lof}{\contentsline {subfigure}{\numberline{(c)}{\ignorespaces {$\sigma =0.8$}}}{15}{}\protected@file@percent }
|
||||
\newlabel{fig:cc_exp1p95}{{13}{15}}
|
||||
\newlabel{fig:cc_exp3_1}{{\mbox {VIII-C}2}{15}}
|
||||
\newlabel{fig:cc_exp3_2}{{\mbox {VIII-C}2}{15}}
|
||||
\@writefile{lof}{\contentsline {figure}{\numberline {13}{\ignorespaces The data volume reduction and request collapse}}{15}{}\protected@file@percent }
|
||||
\@writefile{lof}{\contentsline {figure}{\numberline {14}{\ignorespaces The data volume reduction and request collapse}}{15}{}\protected@file@percent }
|
||||
\@writefile{lof}{\contentsline {subfigure}{\numberline{(a)}{\ignorespaces {The number of clients}}}{15}{}\protected@file@percent }
|
||||
\@writefile{lof}{\contentsline {subfigure}{\numberline{(b)}{\ignorespaces {The number of clients}}}{15}{}\protected@file@percent }
|
||||
\newlabel{fig:cc_exp3}{{13}{15}}
|
||||
\@writefile{lof}{\contentsline {figure}{\numberline {14}{\ignorespaces Mode Switching}}{15}{}\protected@file@percent }
|
||||
\newlabel{fig:cc_exp4}{{14}{15}}
|
||||
\@writefile{toc}{\contentsline {subsection}{\numberline {\mbox {VIII-D}}Evaluating the I/O Tuning}{15}{}\protected@file@percent }
|
||||
\@writefile{lof}{\contentsline {figure}{\numberline {15}{\ignorespaces Efficiency analysis of the tuning framework.}}{16}{}\protected@file@percent }
|
||||
\newlabel{fig:tune_exp1}{{15}{16}}
|
||||
\newlabel{fig:cc_exp3}{{14}{15}}
|
||||
\newlabel{sec:ModeSwitch}{{\mbox {VIII-C}3}{15}}
|
||||
\@writefile{toc}{\contentsline {subsubsection}{\numberline {\mbox {VIII-C}3}Deterministic and Non-Deterministic Modes}{15}{}\protected@file@percent }
|
||||
\@writefile{lof}{\contentsline {figure}{\numberline {15}{\ignorespaces Mode Switching}}{15}{}\protected@file@percent }
|
||||
\newlabel{fig:cc_exp4}{{15}{15}}
|
||||
\citation{Behzad13HDF5}
|
||||
\citation{Robert20SA}
|
||||
\citation{Agarwal19TPE}
|
||||
\citation{Bagbaba20RF}
|
||||
\citation{Rajesh24TunIO}
|
||||
\@writefile{toc}{\contentsline {subsection}{\numberline {\mbox {VIII-D}}Evaluating the I/O Tuning}{16}{}\protected@file@percent }
|
||||
\@writefile{toc}{\contentsline {subsubsection}{\numberline {\mbox {VIII-D}1}Convergence Speed and Tuning Cost}{16}{}\protected@file@percent }
|
||||
\@writefile{toc}{\contentsline {subsubsection}{\numberline {\mbox {VIII-D}2}Adaptation to Workload Shifts}{16}{}\protected@file@percent }
|
||||
\@writefile{lof}{\contentsline {figure}{\numberline {16}{\ignorespaces Mode Switching}}{16}{}\protected@file@percent }
|
||||
\newlabel{fig:tune_exp3}{{16}{16}}
|
||||
\@writefile{toc}{\contentsline {section}{\numberline {IX}Conclusions}{16}{}\protected@file@percent }
|
||||
\newlabel{sec:Con}{{IX}{16}}
|
||||
\@writefile{lof}{\contentsline {figure}{\numberline {16}{\ignorespaces Efficiency analysis of the tuning framework.}}{16}{}\protected@file@percent }
|
||||
\newlabel{fig:tune_exp1}{{16}{16}}
|
||||
\@writefile{lof}{\contentsline {figure}{\numberline {17}{\ignorespaces Latency evolution under workload shift}}{16}{}\protected@file@percent }
|
||||
\newlabel{fig:tune_exp3}{{17}{16}}
|
||||
\bibstyle{IEEEtran}
|
||||
\bibdata{IEEEabrv,references}
|
||||
\bibcite{Ma15RS_bigdata}{1}
|
||||
@@ -190,6 +205,10 @@
|
||||
\bibcite{riotiler25riotiler}{12}
|
||||
\bibcite{Thomson12Calvin}{13}
|
||||
\bibcite{Lim17OCC}{14}
|
||||
\@writefile{toc}{\contentsline {subsubsection}{\numberline {\mbox {VIII-D}2}Adaptation to Workload Shifts}{17}{}\protected@file@percent }
|
||||
\@writefile{toc}{\contentsline {section}{\numberline {IX}Conclusions}{17}{}\protected@file@percent }
|
||||
\newlabel{sec:Con}{{IX}{17}}
|
||||
\@writefile{toc}{\contentsline {section}{References}{17}{}\protected@file@percent }
|
||||
\bibcite{Rajesh24TunIO}{15}
|
||||
\bibcite{Preil25GMAB}{16}
|
||||
\bibcite{Tang12Quad-Tree}{17}
|
||||
@@ -207,9 +226,9 @@
|
||||
\bibcite{Behzad13HDF5}{29}
|
||||
\bibcite{Wang26RethinkingTuning}{30}
|
||||
\bibcite{Xie12supercomputer}{31}
|
||||
\bibcite{Chen98SA}{32}
|
||||
\bibcite{Robert20SA}{33}
|
||||
\bibcite{Agarwal19TPE}{34}
|
||||
\bibcite{Bagbaba20RF}{35}
|
||||
\@writefile{toc}{\contentsline {section}{References}{17}{}\protected@file@percent }
|
||||
\gdef \@abspage@last{17}
|
||||
\bibcite{Habibi25Brook2PL}{32}
|
||||
\bibcite{Chen98SA}{33}
|
||||
\bibcite{Robert20SA}{34}
|
||||
\bibcite{Agarwal19TPE}{35}
|
||||
\bibcite{Bagbaba20RF}{36}
|
||||
\gdef \@abspage@last{18}
|
||||
|
||||
@@ -208,6 +208,11 @@ B.~Xie, J.~S. Chase, D.~Dillow, O.~Drokin, S.~Klasky, S.~Oral, and
|
||||
J.~K. Hollingsworth, Ed.\hskip 1em plus 0.5em minus 0.4em\relax {IEEE/ACM},
|
||||
2012, p.~8.
|
||||
|
||||
\bibitem{Habibi25Brook2PL}
|
||||
F.~Habibi, J.~Fang, T.~Lorido-Botran, and F.~Nawab, ``Brook-2pl: Tolerating
|
||||
high contention workloads with a deadlock-free two-phase locking protocol,''
|
||||
\emph{Proc. ACM Manag. Data}, vol.~3, no.~6, Dec. 2025.
|
||||
|
||||
\bibitem{Chen98SA}
|
||||
Y.~Chen, M.~Winslett, Y.~Cho, and S.~Kuo, ``Automatic parallel {I/O}
|
||||
performance optimization in panda,'' in \emph{Proceedings of the Tenth Annual
|
||||
|
||||
@@ -20,45 +20,45 @@ Warning--empty author in LEWIS17datacube
|
||||
Warning--empty booktitle in Lim17OCC
|
||||
|
||||
Done.
|
||||
You've used 35 entries,
|
||||
You've used 36 entries,
|
||||
4087 wiz_defined-function locations,
|
||||
1851 strings with 32312 characters,
|
||||
and the built_in function-call counts, 33516 in all, are:
|
||||
= -- 2590
|
||||
> -- 1011
|
||||
< -- 298
|
||||
+ -- 542
|
||||
- -- 184
|
||||
* -- 1661
|
||||
:= -- 4680
|
||||
add.period$ -- 86
|
||||
call.type$ -- 35
|
||||
change.case$ -- 53
|
||||
1856 strings with 32536 characters,
|
||||
and the built_in function-call counts, 34074 in all, are:
|
||||
= -- 2636
|
||||
> -- 1028
|
||||
< -- 300
|
||||
+ -- 550
|
||||
- -- 188
|
||||
* -- 1691
|
||||
:= -- 4776
|
||||
add.period$ -- 88
|
||||
call.type$ -- 36
|
||||
change.case$ -- 54
|
||||
chr.to.int$ -- 705
|
||||
cite$ -- 38
|
||||
duplicate$ -- 2325
|
||||
empty$ -- 2614
|
||||
format.name$ -- 221
|
||||
if$ -- 7884
|
||||
cite$ -- 39
|
||||
duplicate$ -- 2372
|
||||
empty$ -- 2658
|
||||
format.name$ -- 226
|
||||
if$ -- 8009
|
||||
int.to.chr$ -- 0
|
||||
int.to.str$ -- 35
|
||||
missing$ -- 436
|
||||
newline$ -- 130
|
||||
num.names$ -- 48
|
||||
pop$ -- 1041
|
||||
int.to.str$ -- 36
|
||||
missing$ -- 446
|
||||
newline$ -- 133
|
||||
num.names$ -- 49
|
||||
pop$ -- 1061
|
||||
preamble$ -- 1
|
||||
purify$ -- 0
|
||||
quote$ -- 2
|
||||
skip$ -- 2527
|
||||
skip$ -- 2569
|
||||
stack$ -- 0
|
||||
substring$ -- 1699
|
||||
swap$ -- 1994
|
||||
text.length$ -- 63
|
||||
substring$ -- 1700
|
||||
swap$ -- 2030
|
||||
text.length$ -- 65
|
||||
text.prefix$ -- 0
|
||||
top$ -- 5
|
||||
type$ -- 35
|
||||
type$ -- 36
|
||||
warning$ -- 3
|
||||
while$ -- 158
|
||||
width$ -- 37
|
||||
write$ -- 375
|
||||
while$ -- 159
|
||||
width$ -- 38
|
||||
write$ -- 385
|
||||
(There were 3 warnings)
|
||||
|
||||
241
rs_retrieval.log
241
rs_retrieval.log
@@ -1,4 +1,4 @@
|
||||
This is pdfTeX, Version 3.141592653-2.6-1.40.25 (MiKTeX 23.4) (preloaded format=pdflatex 2025.10.23) 11 FEB 2026 11:08
|
||||
This is pdfTeX, Version 3.141592653-2.6-1.40.25 (MiKTeX 23.4) (preloaded format=pdflatex 2025.10.23) 28 FEB 2026 15:30
|
||||
entering extended mode
|
||||
restricted \write18 enabled.
|
||||
%&-line parsing enabled.
|
||||
@@ -499,11 +499,11 @@ File: fig/cc.png Graphic file (type png)
|
||||
Package pdftex.def Info: fig/cc.png used on input line 277.
|
||||
(pdftex.def) Requested size: 464.39685pt x 151.93185pt.
|
||||
[7]
|
||||
<fig/unlock.png, id=48, 503.9628pt x 314.1336pt>
|
||||
<fig/unlock.png, id=48, 512.1534pt x 314.1336pt>
|
||||
File: fig/unlock.png Graphic file (type png)
|
||||
<use fig/unlock.png>
|
||||
Package pdftex.def Info: fig/unlock.png used on input line 336.
|
||||
(pdftex.def) Requested size: 216.81pt x 135.1419pt.
|
||||
(pdftex.def) Requested size: 216.81pt x 132.98494pt.
|
||||
[8 <./fig/cc.png> <./fig/unlock.png>]
|
||||
Underfull \hbox (badness 4518) in paragraph at lines 378--378
|
||||
[]\OT1/ptm/b/n/10 Algorithm 1: \OT1/ptm/m/n/10 Surrogate-Assisted Ge-netic Mult
|
||||
@@ -528,51 +528,51 @@ Overfull \hbox (2.45601pt too wide) in paragraph at lines 458--472
|
||||
[]
|
||||
|
||||
[10]
|
||||
<exp/index_exp1_1.pdf, id=62, 253.94875pt x 218.8175pt>
|
||||
<exp/index_exp1_1.pdf, id=62, 253.94875pt x 220.825pt>
|
||||
File: exp/index_exp1_1.pdf Graphic file (type pdf)
|
||||
<use exp/index_exp1_1.pdf>
|
||||
Package pdftex.def Info: exp/index_exp1_1.pdf used on input line 547.
|
||||
(pdftex.def) Requested size: 111.27748pt x 95.88266pt.
|
||||
<exp/index_exp1_2.pdf, id=63, 253.94875pt x 217.81375pt>
|
||||
Package pdftex.def Info: exp/index_exp1_1.pdf used on input line 553.
|
||||
(pdftex.def) Requested size: 111.27748pt x 96.76231pt.
|
||||
<exp/index_exp1_2.pdf, id=63, 253.94875pt x 219.82124pt>
|
||||
File: exp/index_exp1_2.pdf Graphic file (type pdf)
|
||||
<use exp/index_exp1_2.pdf>
|
||||
Package pdftex.def Info: exp/index_exp1_2.pdf used on input line 553.
|
||||
Package pdftex.def Info: exp/index_exp1_2.pdf used on input line 559.
|
||||
(pdftex.def) Requested size: 111.27748pt x 96.32248pt.
|
||||
<exp/index_exp2_1.pdf, id=64, 253.94875pt x 217.81375pt>
|
||||
File: exp/index_exp2_1.pdf Graphic file (type pdf)
|
||||
<use exp/index_exp2_1.pdf>
|
||||
Package pdftex.def Info: exp/index_exp2_1.pdf used on input line 575.
|
||||
(pdftex.def) Requested size: 111.27748pt x 95.44283pt.
|
||||
<exp/index_exp2_2.pdf, id=65, 253.94875pt x 217.81375pt>
|
||||
File: exp/index_exp2_2.pdf Graphic file (type pdf)
|
||||
<use exp/index_exp2_2.pdf>
|
||||
Package pdftex.def Info: exp/index_exp2_2.pdf used on input line 581.
|
||||
(pdftex.def) Requested size: 111.27748pt x 95.44283pt.
|
||||
<exp/index_exp2_3.pdf, id=66, 253.94875pt x 193.72375pt>
|
||||
File: exp/index_exp2_3.pdf Graphic file (type pdf)
|
||||
<use exp/index_exp2_3.pdf>
|
||||
Package pdftex.def Info: exp/index_exp2_3.pdf used on input line 589.
|
||||
(pdftex.def) Requested size: 130.08621pt x 99.23824pt.
|
||||
[11 <./exp/index_exp1_1.pdf> <./exp/index_exp1_2.pdf
|
||||
|
||||
pdfTeX warning: pdflatex.exe (file ./exp/index_exp1_2.pdf): PDF inclusion: mult
|
||||
iple pdfs with page group included in a single page
|
||||
>]
|
||||
<exp/index_exp2_1.pdf, id=88, 253.94875pt x 215.80624pt>
|
||||
File: exp/index_exp2_1.pdf Graphic file (type pdf)
|
||||
<use exp/index_exp2_1.pdf>
|
||||
Package pdftex.def Info: exp/index_exp2_1.pdf used on input line 569.
|
||||
(pdftex.def) Requested size: 111.27748pt x 94.56317pt.
|
||||
<exp/index_exp2_2.pdf, id=89, 253.94875pt x 215.80624pt>
|
||||
File: exp/index_exp2_2.pdf Graphic file (type pdf)
|
||||
<use exp/index_exp2_2.pdf>
|
||||
Package pdftex.def Info: exp/index_exp2_2.pdf used on input line 575.
|
||||
(pdftex.def) Requested size: 111.27748pt x 94.56317pt.
|
||||
<exp/index_exp2_3.pdf, id=90, 253.94875pt x 191.71625pt>
|
||||
File: exp/index_exp2_3.pdf Graphic file (type pdf)
|
||||
<use exp/index_exp2_3.pdf>
|
||||
Package pdftex.def Info: exp/index_exp2_3.pdf used on input line 583.
|
||||
(pdftex.def) Requested size: 130.08621pt x 98.20985pt.
|
||||
<exp/index_exp3_1.pdf, id=91, 253.94875pt x 194.7275pt>
|
||||
<exp/index_exp3_1.pdf, id=91, 253.94875pt x 196.735pt>
|
||||
File: exp/index_exp3_1.pdf Graphic file (type pdf)
|
||||
<use exp/index_exp3_1.pdf>
|
||||
Package pdftex.def Info: exp/index_exp3_1.pdf used on input line 601.
|
||||
(pdftex.def) Requested size: 105.4204pt x 80.83417pt.
|
||||
<exp/index_exp3_2.pdf, id=92, 253.94875pt x 196.735pt>
|
||||
Package pdftex.def Info: exp/index_exp3_1.pdf used on input line 607.
|
||||
(pdftex.def) Requested size: 105.4204pt x 81.6675pt.
|
||||
<exp/index_exp3_2.pdf, id=92, 253.94875pt x 197.73875pt>
|
||||
File: exp/index_exp3_2.pdf Graphic file (type pdf)
|
||||
<use exp/index_exp3_2.pdf>
|
||||
Package pdftex.def Info: exp/index_exp3_2.pdf used on input line 607.
|
||||
(pdftex.def) Requested size: 105.4204pt x 81.6675pt.
|
||||
<exp/index_exp3_3.pdf, id=93, 253.94875pt x 204.765pt>
|
||||
Package pdftex.def Info: exp/index_exp3_2.pdf used on input line 613.
|
||||
(pdftex.def) Requested size: 105.4204pt x 82.08418pt.
|
||||
<exp/index_exp3_3.pdf, id=93, 253.94875pt x 206.7725pt>
|
||||
File: exp/index_exp3_3.pdf Graphic file (type pdf)
|
||||
<use exp/index_exp3_3.pdf>
|
||||
Package pdftex.def Info: exp/index_exp3_3.pdf used on input line 615.
|
||||
(pdftex.def) Requested size: 130.08621pt x 104.8943pt.
|
||||
Package pdftex.def Info: exp/index_exp3_3.pdf used on input line 621.
|
||||
(pdftex.def) Requested size: 130.08621pt x 105.92268pt.
|
||||
[12 <./exp/index_exp2_1.pdf> <./exp/index_exp2_2.pdf
|
||||
|
||||
pdfTeX warning: pdflatex.exe (file ./exp/index_exp2_2.pdf): PDF inclusion: mult
|
||||
@@ -581,26 +581,26 @@ iple pdfs with page group included in a single page
|
||||
|
||||
pdfTeX warning: pdflatex.exe (file ./exp/index_exp2_3.pdf): PDF inclusion: mult
|
||||
iple pdfs with page group included in a single page
|
||||
>]
|
||||
<exp/index_exp4_2.pdf, id=174, 253.94875pt x 194.7275pt>
|
||||
File: exp/index_exp4_2.pdf Graphic file (type pdf)
|
||||
<use exp/index_exp4_2.pdf>
|
||||
Package pdftex.def Info: exp/index_exp4_2.pdf used on input line 633.
|
||||
(pdftex.def) Requested size: 114.79138pt x 88.02173pt.
|
||||
<exp/index_exp4_1.pdf, id=175, 253.94875pt x 200.75pt>
|
||||
File: exp/index_exp4_1.pdf Graphic file (type pdf)
|
||||
<use exp/index_exp4_1.pdf>
|
||||
Package pdftex.def Info: exp/index_exp4_1.pdf used on input line 639.
|
||||
(pdftex.def) Requested size: 106.5929pt x 84.26234pt.
|
||||
[13 <./exp/index_exp3_1.pdf> <./exp/index_exp3_2.pdf
|
||||
> <./exp/index_exp3_1.pdf
|
||||
|
||||
pdfTeX warning: pdflatex.exe (file ./exp/index_exp3_1.pdf): PDF inclusion: mult
|
||||
iple pdfs with page group included in a single page
|
||||
> <./exp/index_exp3_2.pdf
|
||||
|
||||
pdfTeX warning: pdflatex.exe (file ./exp/index_exp3_2.pdf): PDF inclusion: mult
|
||||
iple pdfs with page group included in a single page
|
||||
> <./exp/index_exp3_3.pdf
|
||||
|
||||
pdfTeX warning: pdflatex.exe (file ./exp/index_exp3_3.pdf): PDF inclusion: mult
|
||||
iple pdfs with page group included in a single page
|
||||
> <./exp/index_exp4_2.pdf
|
||||
>]
|
||||
<exp/index_exp4_2.pdf, id=191, 253.94875pt x 195.73125pt>
|
||||
File: exp/index_exp4_2.pdf Graphic file (type pdf)
|
||||
<use exp/index_exp4_2.pdf>
|
||||
Package pdftex.def Info: exp/index_exp4_2.pdf used on input line 639.
|
||||
(pdftex.def) Requested size: 114.79138pt x 88.47545pt.
|
||||
<exp/index_exp4_1.pdf, id=192, 253.94875pt x 202.7575pt>
|
||||
File: exp/index_exp4_1.pdf Graphic file (type pdf)
|
||||
<use exp/index_exp4_1.pdf>
|
||||
Package pdftex.def Info: exp/index_exp4_1.pdf used on input line 645.
|
||||
(pdftex.def) Requested size: 106.5929pt x 85.10497pt.
|
||||
[13 <./exp/index_exp3_3.pdf> <./exp/index_exp4_2.pdf
|
||||
|
||||
pdfTeX warning: pdflatex.exe (file ./exp/index_exp4_2.pdf): PDF inclusion: mult
|
||||
iple pdfs with page group included in a single page
|
||||
@@ -609,57 +609,72 @@ iple pdfs with page group included in a single page
|
||||
pdfTeX warning: pdflatex.exe (file ./exp/index_exp4_1.pdf): PDF inclusion: mult
|
||||
iple pdfs with page group included in a single page
|
||||
>]
|
||||
Underfull \hbox (badness 1939) in paragraph at lines 652--654
|
||||
[]\OT1/ptm/m/n/10 For com-par-i-son, we eval-u-ate the fol-low-ing ex-e-cu-tion
|
||||
|
||||
[]
|
||||
|
||||
<exp/cc_exp1_3.pdf, id=233, 253.94875pt x 205.76875pt>
|
||||
File: exp/cc_exp1_3.pdf Graphic file (type pdf)
|
||||
<use exp/cc_exp1_3.pdf>
|
||||
Package pdftex.def Info: exp/cc_exp1_3.pdf used on input line 661.
|
||||
(pdftex.def) Requested size: 151.76744pt x 122.97867pt.
|
||||
<exp/cc_exp1_2.pdf, id=234, 253.94875pt x 205.76875pt>
|
||||
File: exp/cc_exp1_2.pdf Graphic file (type pdf)
|
||||
<use exp/cc_exp1_2.pdf>
|
||||
Package pdftex.def Info: exp/cc_exp1_2.pdf used on input line 663.
|
||||
(pdftex.def) Requested size: 151.76744pt x 122.97867pt.
|
||||
<exp/cc_exp1_1.pdf, id=235, 253.94875pt x 205.76875pt>
|
||||
File: exp/cc_exp1_1.pdf Graphic file (type pdf)
|
||||
<use exp/cc_exp1_1.pdf>
|
||||
Package pdftex.def Info: exp/cc_exp1_1.pdf used on input line 667.
|
||||
(pdftex.def) Requested size: 151.76744pt x 122.97867pt.
|
||||
<exp/cc_exp3_1.pdf, id=236, 253.94875pt x 205.76875pt>
|
||||
<exp/cc_exp1_3_mean.pdf, id=233, 253.94875pt x 222.8325pt>
|
||||
File: exp/cc_exp1_3_mean.pdf Graphic file (type pdf)
|
||||
<use exp/cc_exp1_3_mean.pdf>
|
||||
Package pdftex.def Info: exp/cc_exp1_3_mean.pdf used on input line 675.
|
||||
(pdftex.def) Requested size: 151.76744pt x 133.17691pt.
|
||||
<exp/cc_exp1_2_mean.pdf, id=234, 253.94875pt x 222.8325pt>
|
||||
File: exp/cc_exp1_2_mean.pdf Graphic file (type pdf)
|
||||
<use exp/cc_exp1_2_mean.pdf>
|
||||
Package pdftex.def Info: exp/cc_exp1_2_mean.pdf used on input line 677.
|
||||
(pdftex.def) Requested size: 151.76744pt x 133.17691pt.
|
||||
<exp/cc_exp1_1_mean.pdf, id=235, 253.94875pt x 222.8325pt>
|
||||
File: exp/cc_exp1_1_mean.pdf Graphic file (type pdf)
|
||||
<use exp/cc_exp1_1_mean.pdf>
|
||||
Package pdftex.def Info: exp/cc_exp1_1_mean.pdf used on input line 681.
|
||||
(pdftex.def) Requested size: 151.76744pt x 133.17691pt.
|
||||
<exp/cc_exp1_3_p95.pdf, id=236, 253.94875pt x 222.8325pt>
|
||||
File: exp/cc_exp1_3_p95.pdf Graphic file (type pdf)
|
||||
<use exp/cc_exp1_3_p95.pdf>
|
||||
Package pdftex.def Info: exp/cc_exp1_3_p95.pdf used on input line 691.
|
||||
(pdftex.def) Requested size: 151.76744pt x 133.17691pt.
|
||||
<exp/cc_exp1_2_p95.pdf, id=237, 253.94875pt x 222.8325pt>
|
||||
File: exp/cc_exp1_2_p95.pdf Graphic file (type pdf)
|
||||
<use exp/cc_exp1_2_p95.pdf>
|
||||
Package pdftex.def Info: exp/cc_exp1_2_p95.pdf used on input line 693.
|
||||
(pdftex.def) Requested size: 151.76744pt x 133.17691pt.
|
||||
<exp/cc_exp1_1_p95.pdf, id=238, 253.94875pt x 222.8325pt>
|
||||
File: exp/cc_exp1_1_p95.pdf Graphic file (type pdf)
|
||||
<use exp/cc_exp1_1_p95.pdf>
|
||||
Package pdftex.def Info: exp/cc_exp1_1_p95.pdf used on input line 697.
|
||||
(pdftex.def) Requested size: 151.76744pt x 133.17691pt.
|
||||
<exp/cc_exp3_1.pdf, id=239, 253.94875pt x 220.825pt>
|
||||
File: exp/cc_exp3_1.pdf Graphic file (type pdf)
|
||||
<use exp/cc_exp3_1.pdf>
|
||||
Package pdftex.def Info: exp/cc_exp3_1.pdf used on input line 691.
|
||||
(pdftex.def) Requested size: 110.10678pt x 89.21667pt.
|
||||
<exp/cc_exp3_2.pdf, id=237, 253.94875pt x 196.735pt>
|
||||
Package pdftex.def Info: exp/cc_exp3_1.pdf used on input line 724.
|
||||
(pdftex.def) Requested size: 106.5929pt x 92.68857pt.
|
||||
<exp/cc_exp3_2.pdf, id=240, 253.94875pt x 210.7875pt>
|
||||
File: exp/cc_exp3_2.pdf Graphic file (type pdf)
|
||||
<use exp/cc_exp3_2.pdf>
|
||||
Package pdftex.def Info: exp/cc_exp3_2.pdf used on input line 697.
|
||||
(pdftex.def) Requested size: 113.62068pt x 88.02258pt.
|
||||
<exp/cc_exp4.pdf, id=238, 253.94875pt x 210.7875pt>
|
||||
Package pdftex.def Info: exp/cc_exp3_2.pdf used on input line 730.
|
||||
(pdftex.def) Requested size: 111.27748pt x 92.36403pt.
|
||||
[14]
|
||||
<exp/cc_exp4.pdf, id=244, 254.9525pt x 212.795pt>
|
||||
File: exp/cc_exp4.pdf Graphic file (type pdf)
|
||||
<use exp/cc_exp4.pdf>
|
||||
Package pdftex.def Info: exp/cc_exp4.pdf used on input line 714.
|
||||
(pdftex.def) Requested size: 130.08621pt x 107.97943pt.
|
||||
[14]
|
||||
Underfull \vbox (badness 10000) has occurred while \output is active []
|
||||
Package pdftex.def Info: exp/cc_exp4.pdf used on input line 747.
|
||||
(pdftex.def) Requested size: 130.08621pt x 108.5792pt.
|
||||
[15 <./exp/cc_exp1_3_mean.pdf> <./exp/cc_exp1_2_mean.pdf
|
||||
|
||||
<exp/tune_exp1_1.pdf, id=242, 253.94875pt x 224.84pt>
|
||||
File: exp/tune_exp1_1.pdf Graphic file (type pdf)
|
||||
<use exp/tune_exp1_1.pdf>
|
||||
Package pdftex.def Info: exp/tune_exp1_1.pdf used on input line 749.
|
||||
(pdftex.def) Requested size: 130.08621pt x 115.17805pt.
|
||||
[15 <./exp/cc_exp1_3.pdf> <./exp/cc_exp1_2.pdf
|
||||
pdfTeX warning: pdflatex.exe (file ./exp/cc_exp1_2_mean.pdf): PDF inclusion: mu
|
||||
ltiple pdfs with page group included in a single page
|
||||
> <./exp/cc_exp1_1_mean.pdf
|
||||
|
||||
pdfTeX warning: pdflatex.exe (file ./exp/cc_exp1_2.pdf): PDF inclusion: multipl
|
||||
e pdfs with page group included in a single page
|
||||
> <./exp/cc_exp1_1.pdf
|
||||
pdfTeX warning: pdflatex.exe (file ./exp/cc_exp1_1_mean.pdf): PDF inclusion: mu
|
||||
ltiple pdfs with page group included in a single page
|
||||
> <./exp/cc_exp1_3_p95.pdf
|
||||
|
||||
pdfTeX warning: pdflatex.exe (file ./exp/cc_exp1_1.pdf): PDF inclusion: multipl
|
||||
e pdfs with page group included in a single page
|
||||
pdfTeX warning: pdflatex.exe (file ./exp/cc_exp1_3_p95.pdf): PDF inclusion: mul
|
||||
tiple pdfs with page group included in a single page
|
||||
> <./exp/cc_exp1_2_p95.pdf
|
||||
|
||||
pdfTeX warning: pdflatex.exe (file ./exp/cc_exp1_2_p95.pdf): PDF inclusion: mul
|
||||
tiple pdfs with page group included in a single page
|
||||
> <./exp/cc_exp1_1_p95.pdf
|
||||
|
||||
pdfTeX warning: pdflatex.exe (file ./exp/cc_exp1_1_p95.pdf): PDF inclusion: mul
|
||||
tiple pdfs with page group included in a single page
|
||||
> <./exp/cc_exp3_1.pdf
|
||||
|
||||
pdfTeX warning: pdflatex.exe (file ./exp/cc_exp3_1.pdf): PDF inclusion: multipl
|
||||
@@ -673,41 +688,51 @@ e pdfs with page group included in a single page
|
||||
pdfTeX warning: pdflatex.exe (file ./exp/cc_exp4.pdf): PDF inclusion: multiple
|
||||
pdfs with page group included in a single page
|
||||
>]
|
||||
<exp/tune_exp3_1.pdf, id=311, 253.94875pt x 216.81pt>
|
||||
File: exp/tune_exp3_1.pdf Graphic file (type pdf)
|
||||
<use exp/tune_exp3_1.pdf>
|
||||
Package pdftex.def Info: exp/tune_exp3_1.pdf used on input line 764.
|
||||
(pdftex.def) Requested size: 130.08621pt x 111.06456pt.
|
||||
[16 <./exp/tune_exp1_1.pdf> <./exp/tune_exp3_1.pdf
|
||||
Underfull \hbox (badness 2469) in paragraph at lines 770--771
|
||||
[]\OT1/ptm/b/n/10 TPE \OT1/ptm/m/n/10 [35]: A model-based se-quen-tial op-ti-mi
|
||||
za-tion
|
||||
[]
|
||||
|
||||
pdfTeX warning: pdflatex.exe (file ./exp/tune_exp3_1.pdf): PDF inclusion: multi
|
||||
<exp/tune_exp1_1.pdf, id=361, 253.94875pt x 226.8475pt>
|
||||
File: exp/tune_exp1_1.pdf Graphic file (type pdf)
|
||||
<use exp/tune_exp1_1.pdf>
|
||||
Package pdftex.def Info: exp/tune_exp1_1.pdf used on input line 782.
|
||||
(pdftex.def) Requested size: 130.08621pt x 116.20644pt.
|
||||
<exp/tune_exp2_1.pdf, id=362, 253.94875pt x 228.855pt>
|
||||
File: exp/tune_exp2_1.pdf Graphic file (type pdf)
|
||||
<use exp/tune_exp2_1.pdf>
|
||||
Package pdftex.def Info: exp/tune_exp2_1.pdf used on input line 797.
|
||||
(pdftex.def) Requested size: 130.08621pt x 117.2348pt.
|
||||
[16 <./exp/tune_exp1_1.pdf> <./exp/tune_exp2_1.pdf
|
||||
|
||||
pdfTeX warning: pdflatex.exe (file ./exp/tune_exp2_1.pdf): PDF inclusion: multi
|
||||
ple pdfs with page group included in a single page
|
||||
>]
|
||||
Underfull \hbox (badness 2495) in paragraph at lines 779--780
|
||||
Underfull \hbox (badness 2495) in paragraph at lines 820--821
|
||||
[]\OT1/ptm/m/n/10 This work is sup-ported by the Na-tional Key R&D
|
||||
[]
|
||||
|
||||
|
||||
Underfull \hbox (badness 2799) in paragraph at lines 779--780
|
||||
Underfull \hbox (badness 2799) in paragraph at lines 820--821
|
||||
\OT1/ptm/m/n/10 Pro-gram of China ``In-ter-gov-ern-men-tal In-ter-na-tional Sci
|
||||
-
|
||||
[]
|
||||
|
||||
|
||||
Underfull \hbox (badness 7576) in paragraph at lines 779--780
|
||||
Underfull \hbox (badness 7576) in paragraph at lines 820--821
|
||||
\OT1/ptm/m/n/10 ence and Tech-nol-ogy In-no-va-tion Co-op-er-a-tion" (Grant
|
||||
[]
|
||||
|
||||
(rs_retrieval.bbl) [17] (rs_retrieval.aux)
|
||||
(rs_retrieval.bbl [17]) [18] (rs_retrieval.aux)
|
||||
|
||||
LaTeX Warning: There were multiply-defined labels.
|
||||
|
||||
)
|
||||
Here is how much of TeX's memory you used:
|
||||
5769 strings out of 476331
|
||||
98432 string characters out of 5797649
|
||||
5798 strings out of 476331
|
||||
99238 string characters out of 5797649
|
||||
1882660 words of memory out of 5000000
|
||||
26073 multiletter control sequences out of 15000+600000
|
||||
26099 multiletter control sequences out of 15000+600000
|
||||
561830 words of font info for 131 fonts, out of 8000000 for 9000
|
||||
1145 hyphenation exceptions out of 8191
|
||||
62i,17n,67p,2484b,497s stack positions out of 10000i,1000n,20000p,200000b,200000s
|
||||
@@ -729,9 +754,9 @@ urier/ucrr8a.pfb><D:/software/ctex/MiKTeX/fonts/type1/urw/times/utmb8a.pfb><D:/
|
||||
software/ctex/MiKTeX/fonts/type1/urw/times/utmbi8a.pfb><D:/software/ctex/MiKTeX
|
||||
/fonts/type1/urw/times/utmr8a.pfb><D:/software/ctex/MiKTeX/fonts/type1/urw/time
|
||||
s/utmri8a.pfb>
|
||||
Output written on rs_retrieval.pdf (17 pages, 2530708 bytes).
|
||||
Output written on rs_retrieval.pdf (18 pages, 2549070 bytes).
|
||||
PDF statistics:
|
||||
432 PDF objects out of 1000 (max. 8388607)
|
||||
486 PDF objects out of 1000 (max. 8388607)
|
||||
0 named destinations out of 1000 (max. 500000)
|
||||
116 words of extra memory for PDF output out of 10000 (max. 10000000)
|
||||
131 words of extra memory for PDF output out of 10000 (max. 10000000)
|
||||
|
||||
|
||||
BIN
rs_retrieval.pdf
BIN
rs_retrieval.pdf
Binary file not shown.
Binary file not shown.
117
rs_retrieval.tex
117
rs_retrieval.tex
@@ -158,7 +158,7 @@ subject to:
|
||||
\begin{figure}
|
||||
\centering
|
||||
\includegraphics[width=2.2in]{fig/overview.png}
|
||||
\caption{The workflow for processing concurrent spatio-temporal range retrievals in the system}
|
||||
\caption{Overview of the proposed system architecture and its main component modules}
|
||||
\label{fig:overview}
|
||||
\end{figure}
|
||||
|
||||
@@ -518,22 +518,28 @@ All experiments are conducted on a cluster with 9 homogenous nodes (1 master nod
|
||||
\end{table}
|
||||
|
||||
\subsection{Evaluating the Data Indexing Structure}
|
||||
To comprehensively evaluate the effectiveness of the proposed I/O-aware indexing structure, we conducted experiments on a single cluster node, as each node independently performs indexing for spatial retrieval in the distributed setting. We compare our approach against five representative baseline systems that span traditional database indexes, distributed NoSQL-based schemes, and state-of-the-art windowed I/O frameworks.
|
||||
We evaluate the I/O-aware indexing structure on a single cluster node, as each node independently performs indexing in the distributed setting. We compare our approach against five representative baselines spanning traditional database indexes, distributed NoSQL schemes, and window-based I/O frameworks.
|
||||
|
||||
The comparative methods are categorized as follows:
|
||||
We categorize the baseline systems into two groups based on their data retrieval strategies:
|
||||
|
||||
\textbf{Full-file Retrieval Systems.} These methods rely on metadata indexes for candidate filtering but retrieve entire image files during data extraction.
|
||||
|
||||
\begin{enumerate}
|
||||
\item \textbf{PostGIS (Full-file Retrieval):} A traditional relational database approach that employs R-tree spatial indexes for metadata filtering. While it efficiently identifies candidate images through spatial intersection tests, it retrieves entire image files during data extraction, incurring substantial I/O overhead even for small spatial queries.
|
||||
\item \textbf{PostGIS} \cite{Strobl08PostGIS}: Uses R-tree spatial indexes for metadata filtering. Retrieves entire image files during data extraction, incurring high I/O overhead even for small spatial queries.
|
||||
|
||||
\item \textbf{GeoMesa (Full-file Retrieval):} A distributed spatio-temporal index built on Hbase, which encodes spatial footprints using Z-order space-filling curves for scalable metadata discovery. Despite its superior indexing performance for billion-scale datasets, it still relies on full-file data loading.
|
||||
\item \textbf{GeoMesa} \cite{hughes15geomesa}: A distributed spatio-temporal index built on HBase, using Z-order space-filling curves for metadata discovery. Relies on full-file data loading despite superior indexing performance for billion-scale datasets.
|
||||
|
||||
\item \textbf{MSTGI (Full-file Retrieval):} A recently proposed multi-scale spatio-temporal grid index model \cite{liu24mstgi} that enhances GeoMesa through hierarchical time granularity (year/month/day) and Hilbert curve-based linearization. It inherits the full-file retrieval limitation, where data extraction cost remains decoupled from index-level optimization.
|
||||
\item \textbf{MSTGI} \cite{liu24mstgi}: Enhances GeoMesa through hierarchical time granularity (year/month/day) and Hilbert curve-based linearization. Inherits the full-file retrieval limitation.
|
||||
\end{enumerate}
|
||||
|
||||
\item \textbf{OpenDataCube (Window-based I/O):} A state-of-the-art data cube system that couples PostGIS indexes with windowed I/O via rasterio, enabling partial reads from monolithic image files. By leveraging GeoBox-based ROI computation and automatic overview selection, OpenDataCube represents the theoretical optimum for I/O selectivity but incurs runtime geospatial computation overhead to resolve pixel-to-geographic mappings.
|
||||
\textbf{Window-based I/O Systems.} These methods leverage windowed I/O to perform partial reads from monolithic image files, reducing unnecessary data transfer.
|
||||
|
||||
\item \textbf{Rio-tiler (Window-based I/O):} A lightweight raster reading engine optimized for dynamic tile generation. Similar to OpenDataCube, it employs PostGIS for spatial indexing and windowed I/O for partial data access, but features a streamlined execution path with minimal abstraction layers, resulting in lower per-query overhead. rio-tiler serves as a high-performance baseline for windowed reading without the complexity of full data cube management.
|
||||
\begin{enumerate}
|
||||
\item \textbf{OpenDataCube} \cite{LEWIS17datacube}: Couples PostGIS indexes with windowed I/O via rasterio. Achieves near-optimal I/O selectivity through GeoBox-based ROI computation and automatic overview selection, but incurs runtime geospatial computation overhead.
|
||||
|
||||
\item \textbf{Ours (I/O-aware Indexing):} The proposed approach leverages a dual-layer inverted index structure comprising Grid-to-Image (G2I) and Image-to-Grid (I2G) mappings. By pre-materializing grid-to-pixel correspondences at ingestion time, our method translates spatio-temporal predicates directly into byte-level read plans, completely eliminating runtime geometric computations while preserving minimal I/O volume through precise windowed access.
|
||||
\item \textbf{Rio-tiler} \cite{riotiler25riotiler}: A lightweight raster reading engine optimized for dynamic tile generation. Uses PostGIS for spatial indexing and windowed I/O for partial data access. Features a streamlined execution path with minimal abstraction layers, resulting in lower per-query overhead than OpenDataCube.
|
||||
|
||||
\item \textbf{Ours}: Employs a dual-layer inverted index (G2I and I2G mappings) that pre-materializes grid-to-pixel correspondences at ingestion time. Translates spatio-temporal predicates directly into byte-level read plans, eliminating runtime geometric computations while preserving minimal I/O volume through precise windowed access.
|
||||
\end{enumerate}
|
||||
|
||||
\subsubsection{I/O Selectivity Analysis}\label{sec:Index_exp_1}
|
||||
@@ -649,50 +655,77 @@ In this section, we evaluate the proposed hybrid coordination mechanism on a dis
|
||||
|
||||
To systematically control the workload characteristics, we developed a synthetic workload generator. We define the Spatial Overlap Ratio ($\sigma$) to quantify the extent of shared data regions among concurrent queries, ranging from $\sigma=0$ (disjoint) to $\sigma=0.9$ (highly concentrated hotspots). The number of concurrent clients varies from $N=1$ to $N=64$.
|
||||
|
||||
For comparison, we evaluate the following execution schemes:
|
||||
We evaluate the following execution schemes spanning uncoordinated access, deterministic scheduling, optimistic concurrency control, and pessimistic locking:
|
||||
|
||||
\begin{enumerate}
|
||||
\item \textbf{Baseline (Shared Index):} Metadata access is shared, but data retrieval remains uncoordinated, representing the state-of-the-art systems like OpenDataCube.
|
||||
\item \textbf{Ours:} The proposed mechanism featuring contention-aware switching, global I/O plan ordering, and window merging.
|
||||
\item \textbf{Shared Index}: Metadata access is shared, but data retrieval remains uncoordinated. Multiple clients independently issue windowed read requests without I/O-level coordination, minimizing coordination overhead but suffering from I/O amplification under high contention.
|
||||
|
||||
\item \textbf{Calvin} \cite{Thomson12Calvin}: A deterministic coordination mechanism that globally orders all I/O plans in a queue before execution and merges overlapping windows into single physical reads. Maximizes I/O sharing under high contention but incurs coordination overhead even when queries are spatially disjoint.
|
||||
|
||||
\item \textbf{OOCC} \cite{Wu25OOCC}: An optimistic concurrency control protocol for read-heavy, disaggregated settings. Queries proceed immediately without coordination, using lightweight validation to minimize round-trips. Achieves low latency for read-intensive workloads but fails to address I/O amplification since overlapping queries independently fetch identical byte ranges.
|
||||
|
||||
\item \textbf{Brook-2PL} \cite{Habibi25Brook2PL}: A deadlock-free two-phase locking protocol using SLW-Graph to prevent deadlocks and transaction chopping for early lock release. Ensures serialized access to shared image regions but uses record-level lock granularity rather than byte-range coordination, limiting concurrency for spatially overlapping retrievals.
|
||||
|
||||
\item \textbf{Ours}: A hybrid mechanism featuring contention-aware switching between deterministic and optimistic paths, global I/O plan ordering, and window merging. Adapts to spatial overlap ratio $\sigma$, combining low-latency optimistic execution under low contention with I/O-efficient deterministic coordination under high contention.
|
||||
\end{enumerate}
|
||||
|
||||
\begin{figure*}[htb]
|
||||
\centering
|
||||
\subfigure[$\sigma=0.4$]{\label{fig:cc_exp1_3}
|
||||
\includegraphics[width=2.1in]{exp/cc_exp1_3.pdf}}
|
||||
\includegraphics[width=2.1in]{exp/cc_exp1_3_mean.pdf}}
|
||||
\subfigure[$\sigma=0.6$]{\label{fig:cc_exp1_2}
|
||||
\includegraphics[width=2.1in]{exp/cc_exp1_2.pdf}}
|
||||
\includegraphics[width=2.1in]{exp/cc_exp1_2_mean.pdf}}
|
||||
%\subfigure[]{\label{fig:trans_candidate}
|
||||
%\includegraphics[width=0.6in]{trans_candidate.eps}}
|
||||
\subfigure[$\sigma=0.8$]{\label{fig:cc_exp1_1}
|
||||
\includegraphics[width=2.1in]{exp/cc_exp1_1.pdf}}
|
||||
\includegraphics[width=2.1in]{exp/cc_exp1_1_mean.pdf}}
|
||||
%\subfigure[]{\label{fig:diagram3}
|
||||
%\includegraphics[width=0.7in]{routing.eps}}
|
||||
\caption{% (a) Illustration of avoiding couplers.
|
||||
Concurrency scalability analysis under varying spatial overlap ratios ($\sigma$).}
|
||||
\caption{Mean latency under varying spatial overlap ratios ($\sigma$) and concurrent client counts.}
|
||||
\label{fig:cc_exp1}
|
||||
\end{figure*}
|
||||
|
||||
\begin{figure*}[htb]
|
||||
\centering
|
||||
\subfigure[$\sigma=0.4$]{\label{fig:cc_exp1_3p95}
|
||||
\includegraphics[width=2.1in]{exp/cc_exp1_3_p95.pdf}}
|
||||
\subfigure[$\sigma=0.6$]{\label{fig:cc_exp1_2p95}
|
||||
\includegraphics[width=2.1in]{exp/cc_exp1_2_p95.pdf}}
|
||||
%\subfigure[]{\label{fig:trans_candidate}
|
||||
%\includegraphics[width=0.6in]{trans_candidate.eps}}
|
||||
\subfigure[$\sigma=0.8$]{\label{fig:cc_exp1_1p95}
|
||||
\includegraphics[width=2.1in]{exp/cc_exp1_1_p95.pdf}}
|
||||
%\subfigure[]{\label{fig:diagram3}
|
||||
%\includegraphics[width=0.7in]{routing.eps}}
|
||||
\caption{95th percentile (P95) latency under varying spatial overlap ratios ($\sigma$) and concurrent client counts.}
|
||||
\label{fig:cc_exp1p95}
|
||||
\end{figure*}
|
||||
|
||||
\subsubsection{Concurrency Scalability}
|
||||
To evaluate the system's robustness under different workload characteristics, we conducted a sensitivity analysis by manipulating the Spatial Overlap Ratio ($\sigma$). We examined three distinct scenarios: low overlap ($\sigma=0.4$, simulating dispersed random queries), medium overlap ($\sigma=0.6$), and high overlap ($\sigma=0.8$, simulating hotspot analysis). Note that $\sigma=0.4$ is defined as a low overlap scenario because: when $\sigma \le 0.35$, the performance of the deterministic scheduling mode is even lower than that of the optimistic mode (See Sec.~\ref{sec:ModeSwitch}). So, the performance of our method is the same as the Baseline when $\sigma \le 0.35$. Fig.~\ref{fig:cc_exp1} illustrates the query latency trends as the number of concurrent clients increases from 1 to 64.
|
||||
To evaluate system robustness under varying workload characteristics, we conducted a sensitivity analysis by varying the Spatial Overlap Ratio ($\sigma$). We examined three scenarios: low overlap ($\sigma=0.4$, simulating dispersed random queries), medium overlap ($\sigma=0.6$), and high overlap ($\sigma=0.8$, simulating concentrated hotspot analysis). The number of concurrent clients varies from $N=1$ to $N=64$. Fig.~\ref{fig:cc_exp1} and Fig.~\ref{fig:cc_exp1p95} present the mean and 95th percentile (P95) latency, respectively.
|
||||
|
||||
The results reveal a fundamental divergence in how the two systems respond to data contention. As shown in Fig.~\ref{fig:cc_exp1}(a), when query footprints are spatially dispersed, the opportunities for physical I/O merging are minimal. Consequently, the performance of both systems is primarily constrained by the aggregate physical bandwidth of the storage cluster. Both approaches exhibit linear latency growth with respect to the client count. At $N=64$, the Baseline reaches a mean latency of approx. 37,000 ms, while our method records approx. 30,000 ms. Although our method maintains a slight performance edge due to the reduced read amplification provided by the I/O-aware index, it inevitably degrades to a linear query processing mode similar to the Baseline. This confirms that without spatial locality to leverage request collapsing, the system is bound by the hardware's I/O throughput limits.
|
||||
Note that our hybrid method automatically switches between execution modes based on overlap level. When $\sigma \le 0.35$, deterministic scheduling introduces unnecessary coordination overhead, so the system operates in optimistic mode. At $\sigma=0.4$, the system remains in optimistic mode, making Ours identical to OOCC. When $\sigma \ge 0.6$, spatial contention becomes significant, and the system switches to deterministic mode, making Ours identical to Calvin. This threshold is empirically validated in Sec.~\ref{sec:ModeSwitch}.
|
||||
|
||||
A sharp performance divergence is observed as the overlap ratio increases to $\sigma=0.8$ (Fig.~\ref{fig:cc_exp1}(b)). The Baseline suffers from severe performance degradation, with latency spiking from 37,000 ms (at $\sigma=0.2$) to 60,000 ms (at $\sigma=0.8$) under peak load. This deterioration is attributed to the I/O blender effect and lock convoys: highly concurrent requests competing for the same index pages and disk blocks cause excessive disk seek thrashing and thread blocking, significantly reducing effective throughput. Conversely, our method demonstrates sub-linear scalability in this scenario. The latency at $N=64$ drops significantly to 1,100 ms—a $54\times$ speedup over the Baseline. This result validates the efficacy of the \textit{Request Collapse} mechanism. As $\sigma$ increases, the probability of multiple logical queries targeting the same physical byte ranges rises, allowing the scheduler to merge $N$ concurrent requests into a single physical I/O operation.
|
||||
As shown in Fig.~\ref{fig:cc_exp1}(a), when queries access spatial regions with low overlap, all methods exhibit nearly linear latency growth. At $N=64$, OOCC/Ours achieves the best mean latency (30,000 ms) by avoiding coordination overhead, while Calvin (36,500 ms) and Brook-2PL (34,500 ms) introduce unnecessary serialization costs. Shared Index reaches 37,000 ms due to lack of I/O optimization. Similar trends appear in P95 latency (Fig.~\ref{fig:cc_exp1p95}(a)), where OOCC/Ours maintains 45,000 ms while others exceed 50,000 ms. Under low contention, all systems are fundamentally bandwidth-limited, and coordination mechanisms provide minimal benefit.
|
||||
|
||||
The medium-overlap scenario (Fig.~\ref{fig:cc_exp1}(c)) serves as a transition point, where our method achieves a mean latency of approx. 6,000 ms at peak load, compared to 40,000 ms for the Baseline. This indicates that the system's efficiency scales dynamically with the degree of data contention. The experimental results demonstrate the workload-adaptive nature of the proposed architecture. While the system performs comparably to traditional approaches under dispersed workloads (limited by physical bandwidth), its advantages become order-of-magnitude significant in data-intensive, high-contention scenarios, effectively turning I/O contention into an opportunity for optimization.
|
||||
A dramatic divergence emerges in high overlap scenario, as shown in Fig.~\ref{fig:cc_exp1}(c). At $N=64$, Shared Index (60,000 ms), OOCC (52,000 ms), and Brook-2PL (70,000 ms) all suffer from severe I/O amplification, as concurrent retrievals independently fetch overlapping byte ranges. In contrast, Calvin/Ours achieves only 1,100 ms—a $54\times$ speedup over Shared Index. The Request Collapse mechanism globally de-duplicates overlapping windows, transforming 64 independent retrievals into a single coordinated read. P95 latency (Fig.~\ref{fig:cc_exp1p95}(c)) shows even sharper contrast: Calvin/Ours maintains 1,800 ms while OOCC and Brook-2PL reach 80,000 ms and 105,000 ms due to storage-level thrashing.
|
||||
|
||||
The medium overlap scenario in Fig.~\ref{fig:cc_exp1}(b) demonstrates moderate I/O sharing benefits. At $N=64$, Calvin/Ours achieves 6,000 ms, a $6.7\times$ speedup over Shared Index (40,000 ms) and $5.8\times$ over OOCC (35,000 ms).
|
||||
|
||||
The results reveal that traditional concurrency control mechanisms fail to address I/O-level redundancy. OOCC and Brook-2PL operate at the transaction isolation level, treating retrievals as independent operations without merging redundant storage accesses. Our hybrid method, in contrast, operates at the I/O execution layer, safely sharing physical reads across concurrent retrievals by leveraging immutable image data and deterministic ordering. The adaptive switching strategy preserves low latency under dispersed workloads while achieving order-of-magnitude speedups under high contention.
|
||||
|
||||
\subsubsection{Storage-Level Effects and Request Collapse}
|
||||
\begin{figure}[tb]
|
||||
\centering
|
||||
\subfigure[The number of clients]{
|
||||
\begin{minipage}[b]{0.227\textwidth}
|
||||
\includegraphics[width=0.94\textwidth]{exp/cc_exp3_1.pdf}
|
||||
\includegraphics[width=0.91\textwidth]{exp/cc_exp3_1.pdf}
|
||||
\end{minipage}
|
||||
}
|
||||
\label{fig:cc_exp3_1}
|
||||
\subfigure[The number of clients]{
|
||||
\begin{minipage}[b]{0.227\textwidth}
|
||||
\includegraphics[width=0.97\textwidth]{exp/cc_exp3_2.pdf}
|
||||
\includegraphics[width=0.95\textwidth]{exp/cc_exp3_2.pdf}
|
||||
\end{minipage}
|
||||
}
|
||||
\label{fig:cc_exp3_2}
|
||||
@@ -700,13 +733,13 @@ The medium-overlap scenario (Fig.~\ref{fig:cc_exp1}(c)) serves as a transition p
|
||||
\label{fig:cc_exp3}
|
||||
\end{figure}
|
||||
|
||||
To uncover the cause of the significant latency reduction observed in high-contention scenarios ($\sigma=0.8$), we further analyzed the internal I/O behavior of the system. Specifically, we measured the total volume of physical data transferred from disk and the number of backend I/O requests issued to the storage system. Fig.~\ref{fig:cc_exp3} compares the physical storage pressure between the shared index baseline and our method.
|
||||
To uncover the root cause of the significant latency reduction observed in high-contention scenarios ($\sigma=0.8$), we analyzed the internal I/O behavior of the system. Specifically, we measured the total volume of physical data transferred from disk and the number of backend I/O requests issued to the storage system. Fig.~\ref{fig:cc_exp3} compares the physical storage pressure across all methods.
|
||||
|
||||
Fig.~\ref{fig:cc_exp3}(a) plots the total physical data read size. The baseline exhibits a strict linear increase in data volume. At $N=64$, the system is forced to fetch 32 GB of data. This confirms that without coordination, logically overlapping queries translate into redundant physical reads, leading to severe bandwidth saturation. In contrast, our approach effectively decouples logical demand from physical execution. Although 64 clients logically request 32 GB of data, the request collapse mechanism merges these overlapping windows, resulting in only 5 GB of actual disk traffic. This 84\% reduction in data volume explains why our system avoids the bandwidth bottleneck.
|
||||
Fig.~\ref{fig:cc_exp3}(a) plots the total physical data read size. Shared Index, OOCC, and Brook-2PL all exhibit identical behavior: strict linear growth in data volume, reaching 32 GB at $N=64$. This confirms that traditional concurrency control mechanisms—whether uncoordinated (Shared Index), optimistic (OOCC), or pessimistic (Brook-2PL)—fail to address I/O-level redundancy. Each retrieval independently fetches overlapping byte ranges, resulting in redundant physical reads and severe bandwidth saturation. In sharp contrast, Calvin/Ours effectively decouples logical demand from physical execution. Although 64 clients logically request 32 GB of data, the global de-duplication mechanism merges overlapping windows, resulting in only 5 GB of actual disk traffic—an 84\% reduction that eliminates the bandwidth bottleneck.
|
||||
|
||||
Fig.~\ref{fig:cc_exp3}(b) illustrates the number of backend I/O requests (IOPS). The baseline generates distinct I/O requests for every client, reaching 12,800 requests at $N=64$. This massive influx of small random reads overwhelms the storage scheduler, causing excessive disk head seek latency. Our system keeps the request count low and stable. Even at peak load ($N=64$), the number of physical I/O requests is suppressed to 2,000.
|
||||
Fig.~\ref{fig:cc_exp3}(b) illustrates the number of backend I/O requests (IOPS). Again, Shared Index, OOCC, and Brook-2PL generate identical behavior: each client issues distinct I/O requests, reaching 12,800 IOPS at $N=64$. This massive influx of small random reads overwhelms the storage scheduler, causing excessive disk seek latency. Notably, even Brook-2PL's conservative locking does not reduce IOPS—it merely serializes access without merging redundant reads. Calvin/Ours keeps the request count low and stable, suppressing physical I/O requests to only 2,000 at peak load through range merging.
|
||||
|
||||
Fig.~\ref{fig:cc_exp3}(a) and Fig.~\ref{fig:cc_exp3}(b) demonstrate the Request Collapse effect. While 64 concurrent clients generate 12,800 IOPS in the baseline, our system collapses them into fewer than 600 physical operations.
|
||||
The results demonstrate that the Request Collapse mechanism operates at a fundamentally different layer than traditional concurrency control. While OOCC and Brook-2PL coordinate transaction execution, they treat each retrieval's I/O plan as an independent unit. Calvin/Ours, in contrast, globally analyzes all admitted I/O plans and collapses redundant storage accesses, transforming 12,800 logical IOPS into fewer than 600 physical operations.
|
||||
|
||||
\subsubsection{Deterministic and Non-Deterministic Modes}\label{sec:ModeSwitch}
|
||||
\begin{figure}
|
||||
@@ -727,20 +760,20 @@ Our hybrid approach successfully combines the benefits of both worlds. As shown
|
||||
\subsection{Evaluating the I/O Tuning}
|
||||
In this section, we evaluate the effectiveness of the proposed SA-GMAB tuning framework. The experiments are designed to verify four key properties: fast convergence speed, robustness against stochastic noise, adaptability to workload shifts, and tangible end-to-end performance gains.
|
||||
|
||||
To comprehensively assess SA-GMAB across different optimization paradigms, we benchmark against five representative tuning strategies spanning heuristic search, probabilistic modeling, simulation-based prediction, and reinforcement learning approaches:
|
||||
We benchmark against five tuning strategies spanning heuristic search, probabilistic modeling, simulation-based prediction, and reinforcement learning:
|
||||
|
||||
\begin{enumerate}
|
||||
\item \textbf{Genetic Algorithm (GA):} A canonical evolutionary search method that explores the configuration space through selection, crossover, and mutation operators \cite{Behzad13HDF5}. GA serves as the foundational algorithm in TunIO and represents the baseline heuristic approach.
|
||||
\item \textbf{GA} \cite{Behzad13HDF5}: An evolutionary search method using selection, crossover, and mutation operators. Serves as the foundational algorithm in TunIO.
|
||||
|
||||
\item \textbf{Simulated Annealing (SA):} A classical stochastic optimization technique inspired by metallurgical annealing \cite{Chen98SA, Robert20SA}. SA has been widely applied in HPC I/O tuning for over two decades and provides a mature baseline for convergence analysis.
|
||||
\item \textbf{SA} \cite{Robert20SA}: A stochastic optimization technique inspired by metallurgical annealing. Widely applied in HPC I/O tuning for over two decades.
|
||||
|
||||
\item \textbf{Bayesian Optimization with TPE:} A model-based sequential optimization method that constructs a surrogate using Tree-structured Parzen Estimators and selects candidates via Expected Improvement \cite{Agarwal19TPE}. TPE represents state-of-the-art probabilistic optimization and achieves rapid convergence in recent HPC I/O studies.
|
||||
\item \textbf{TPE} \cite{Agarwal19TPE}: A model-based sequential optimization method using Tree-structured Parzen Estimators to construct a surrogate and Expected Improvement for candidate selection. Achieves rapid convergence in recent HPC I/O studies.
|
||||
|
||||
\item \textbf{Random Forest Regression (RF):} A simulation-based approach that trains an ensemble predictor on historical execution logs to rank candidate configurations offline \cite{Bagbaba20RF}. RF drastically reduces tuning time from hours to seconds by avoiding repeated real-system evaluations.
|
||||
\item \textbf{RF} \cite{Bagbaba20RF}: A simulation-based approach that trains an ensemble predictor on historical execution logs to rank configurations offline. Reduces tuning time by avoiding repeated real-system evaluations.
|
||||
|
||||
\item \textbf{TunIO:} A recent framework integrating high-impact parameter selection with Reinforcement Learning-driven early stopping \cite{Rajesh24TunIO}. TunIO balances tuning cost and performance in complex HPC I/O stacks and represents the state-of-the-art RL-based approach.
|
||||
\item \textbf{TunIO} \cite{Rajesh24TunIO}: A framework integrating high-impact parameter selection with reinforcement learning-driven early stopping. Balances tuning cost and performance in complex HPC I/O stacks.
|
||||
|
||||
\item \textbf{SA-GMAB (Ours):} The proposed framework combining surrogate modeling with a Genetic Multi-Armed Bandit strategy, explicitly designed to accelerate convergence and handle stochastic performance fluctuations in concurrent workloads.
|
||||
\item \textbf{SA-GMAB}: A framework combining surrogate modeling with a Genetic Multi-Armed Bandit strategy to accelerate convergence and handle stochastic performance fluctuations in concurrent workloads.
|
||||
\end{enumerate}
|
||||
|
||||
\subsubsection{Convergence Speed and Tuning Cost}
|
||||
@@ -761,14 +794,22 @@ The RL-based TunIO outperforms above baselines but still suffers from a slow sta
|
||||
\subsubsection{Adaptation to Workload Shifts}
|
||||
\begin{figure}
|
||||
\centering
|
||||
\includegraphics[width=1.8in]{exp/tune_exp3_1.pdf}
|
||||
\caption{Mode Switching}
|
||||
\includegraphics[width=1.8in]{exp/tune_exp2_1.pdf}
|
||||
\caption{Latency evolution under workload shift}
|
||||
\label{fig:tune_exp3}
|
||||
\end{figure}
|
||||
|
||||
We further investigated the system's resilience in non-stationary environments. We introduced a sudden workload shift at tuning step $t=60$, drastically changing the query pattern, from sparse random access to dense sequential scans to invalidate the previously learned optimal parameters.
|
||||
To evaluate robustness in non-stationary environments, we introduced a sudden workload shift at step $t=11$, transitioning from sparse random access to dense sequential scans. This transition invalidates the previously learned optimal configuration, forcing each method to re-adapt.
|
||||
|
||||
Fig.~\ref{fig:tune_exp3} illustrates the latency evolution before and after the shift. At $t=60$, the workload transition causes an immediate performance collapse across all methods, with latency spiking from a stable $\approx 50$ ms to $>300$ ms. This confirms that the configuration optimal for the previous phase is detrimental in the new environment. The GA-based method fails to adapt effectively. Post-shift, its latency hovers around $290-300$ ms. Lacking a mechanism to quickly reset or guide exploration, the genetic algorithm remains trapped in the local optima of the previous workload, exhibiting almost zero recovery within the observation window. TunIO manages to reduce latency but at a slow pace. It takes 40 steps to lower the latency from 308 ms to 134 ms ($t=100$). While the RL agent eventually learns the new reward function, the high sample complexity delays the recovery, leaving the system in a suboptimal state for a prolonged period. In contrast, SA-GMAB executes a decisive recovery. By leveraging the surrogate model to filter high-uncertainty candidates, it rapidly identifies the new optimal region. The latency drops to $\approx 88$ ms at $t=80$ and further stabilizes at $\approx 74$ ms at $t=100$.
|
||||
Fig.~\ref{fig:tune_exp3} illustrates the latency evolution across 50 tuning steps. During the initial stable phase (steps 1--10), all methods maintain low latency, confirming convergence to the optimal configuration for the first workload. At $t=11$, the workload shift triggers an immediate performance collapse: latency spikes from $50$ ms to $2100$ ms across all methods, as the previous optimal parameters become severely mismatched to the new I/O pattern.
|
||||
|
||||
The recovery phase (steps 11--50) reveals significant differences in adaptation speed. SA-GMAB demonstrates the fastest early recovery, leveraging its surrogate model and multi-armed bandit selection to rapidly identify promising regions. By step 20, SA-GMAB reduces latency to $706$ ms, while other methods remain above $1100$ ms. This rapid descent continues, stabilizing at $502$ ms by step 50. The key advantage lies in its ability to exploit historical observations via the persistent memory mechanism, enabling informed exploration despite the dramatic shift.
|
||||
|
||||
In contrast, TunIO and BO achieve lower final latency ($422$ ms and $448$ ms, respectively) but exhibit slower initial recovery. TunIO's RL-based adaptive mutation requires additional samples to learn the new reward landscape, taking $\sim 30$ steps to match SA-GMAB's performance at step 20. BO's Gaussian process also adapts gradually, as the model must accumulate sufficient observations in the new regime to update its posterior effectively.
|
||||
|
||||
Standard evolutionary and heuristic methods struggle significantly. SA cannot escape the prior optimum, converging to a suboptimal configuration at $1703$ ms, which demonstrates its sensitivity to cooling schedules in non-stationary environments. GA shows limited adaptation ($1374$ ms final latency), as its population-based search lacks mechanisms to detect and react to environmental shifts. RF achieves moderate recovery ($950$ ms), but its reliance on accumulated training data causes lag when the objective landscape changes abruptly.
|
||||
|
||||
These results confirm that SA-GMAB provides superior early-stage adaptation in dynamic workloads, crucial for minimizing the duration of suboptimal performance following shifts. While TunIO and BO eventually reach comparable or slightly better steady-state performance, SA-GMAB's ability to rapidly leverage historical knowledge makes it more suitable for scenarios where recovery time is critical.
|
||||
|
||||
\section{Conclusions}\label{sec:Con}
|
||||
This paper presents an I/O-aware retrieval approach designed to bound retrieval latency and maximize throughput for large-scale spatio-temporal analytics. By introducing the ``Index-as-an-Execution-Plan" paradigm, the dual-layer inverted index bridges the semantic gap between logical indexing and physical storage, effectively shifting the computational burden from retrieval time to ingestion time. To address the scalability challenges in concurrent environments, we developed a hybrid concurrency-aware I/O coordination protocol that adaptively switches between deterministic ordering and optimistic execution based on spatial contention. Furthermore, to handle the complexity of parameter configuration in fluctuating workloads, we integrated the SA-GMAB method for online automatic I/O tuning. The experimental results indicate that: (1) I/O-aware indexing achieves an order-of-magnitude latency reduction with negligible storage overhead; (2) the hybrid coordination protocol realizes a $54\times$ throughput improvement in high-overlap scenarios; and (3) the SA-GMAB method recovers from workload shifts $2\times$ faster than RL baselines while maximizing RoTI.
|
||||
|
||||
Reference in New Issue
Block a user