@InProceedings{ConfluentTries_SWAT2008,
AUTHOR = {Erik D. Demaine and Stefan Langerman and Eric Price},
TITLE = {Confluently Persistent Tries for Efficient Version Control},
BOOKTITLE = {Proceedings of the 11th Scandinavian Workshop on Algorithm
Theory (SWAT 2008)},
bookurl = {http://www.dmist.net/swat2008},
ADDRESS = {Gothenburg, Sweden},
SERIES = {Lecture Notes in Computer Science},
seriesurl = {http://www.springer.de/comp/lncs/},
VOLUME = 5124,
MONTH = {July 2--4},
YEAR = 2008,
PAGES = {160--172},
papers = {ConfluentTries_Algorithmica},
doi = {https://dx.doi.org/10.1007/978-3-540-69903-3_16},
dblp = {https://dblp.org/rec/conf/swat/DemaineLP08},
comments = {This paper is also available from <A HREF="http://dx.doi.org/10.1007/978-3-540-69903-3_16">SpringerLink</A>.},
withstudent = 1,
copyright = {Copyright held by the authors.},
award = {Invited to special issue of \emph{Algorithmica}.},
}
Our first data structure represents an n-node degree-Δ trie with O(1) “fingers” in each version while supporting finger movement (navigation) and modifications near the fingers (including subtree copy) in O(lg Δ) time and space per operation. This data structure is essentially a locality-sensitive version of the standard practice—path copying—costing O(d lg Δ) time and space for modification of a node at depth d, which is expensive when performing many deep but nearby updates. Our second data structure supporting finger movement in O(lg Δ) time and no space, while modifications take O(lg n) time and space. This data structure is substantially faster for deep updates, i.e., unbalanced tries. Both of these data structures are functional, which is a stronger property than confluent persistence. Without this stronger property, we show how both data structures can be sped up to support movement in O(lg lg Δ), which is essentially optimal. Along the way, we present a general technique for global rebuilding of fully persistent data structures, which is nontrivial because amortization and persistence do not usually mix. In particular, this technique improves the best previous result for fully persistent arrays and obtains the first efficient fully persistent hash table.