Compare commits

...

24 Commits

Author SHA1 Message Date
8a6d676e93 Improved writing 2026-03-18 23:57:01 +01:00
f3cf653ab5 Improved Writing made it less verbouse 2026-03-18 21:50:30 +01:00
64aeeb5772 Improved Writing made it less verbouse 2026-03-18 21:50:15 +01:00
6b32967f32 Add Results.tex for baseline profile 2026-03-17 18:44:49 +01:00
910a7b2a81 several fixups discussed on tuesday 2026-03-06 17:56:36 +01:00
f29d810240 several fixups discussed on tuesday 2026-03-06 17:56:30 +01:00
0a0ca0800a several fixups discussed on tuesday 2026-03-06 17:53:26 +01:00
b6ac4e20bf several fixups discussed on tuesday 2026-03-06 17:34:41 +01:00
c168a1b441 several fixups discussed on tuesday 2026-03-06 17:34:31 +01:00
ec32331bae added empty chapter for structure 2026-02-28 19:36:17 +01:00
841973f26f create charts for methodology section 2026-02-25 17:50:40 +01:00
c1c94fdf78 create macro for item with textbf 2026-02-25 16:10:21 +01:00
c08df6b885 added source code review to methodology section 2026-02-24 19:03:31 +01:00
2e2d8880c0 new structure 2026-02-24 18:36:44 +01:00
6f3b6ffa07 new structure 2026-02-24 18:36:34 +01:00
f1049f51f2 improved margings for creating a bounded version of the master thesis 2026-02-24 17:08:42 +01:00
170461431b related work 2026-02-16 22:09:54 +01:00
c5ee9deeba Finished cover page 2026-02-16 21:19:53 +01:00
200c8ba004 Finished cover page 2026-02-16 21:19:41 +01:00
d396de9f62 improved motivation 2026-02-16 20:52:55 +01:00
906f9c963a Methodology section 2026-02-11 23:12:58 +01:00
ae3d6b2749 really good motivation section 2026-02-11 22:27:05 +01:00
9bba7cdfd2 really good motivation section 2026-02-11 22:26:58 +01:00
230dfef15d before claude motivation rewrite 2026-02-11 22:16:18 +01:00
39 changed files with 2230 additions and 1289 deletions

8
.gitignore vendored
View File

@@ -1,4 +1,6 @@
# LaTeX intermediate and output files
CLAUDE.md
result
*.aux
**/*.bak.*
*.bbl
@@ -22,7 +24,11 @@
# SyncTeX files
*.synctex.gz
*.synctex(busy)
openspec
.claude
**/node_modules
**/dist
log.txt
# PDF files
*.pdf

44
Chapters/Background.tex Normal file
View File

@@ -0,0 +1,44 @@
\chapter{Background} % Main chapter title
\label{Background}
\subsection{Nix: A Safe and Policy-Free System for Software Deployment}
Nix addresses significant issues in software deployment by utilizing
cryptographic hashes to ensure unique paths for component instances
\cite{dolstra_nix_2004}. Features such as concurrent installation of
multiple versions, atomic upgrades, and safe garbage collection make
Nix a flexible deployment system. This work uses Nix to ensure that
all VPN builds and system configurations are deterministic.
\subsection{NixOS: A Purely Functional Linux Distribution}
NixOS extends Nix principles to Linux system configuration
\cite{dolstra_nixos_2008}. System configurations are reproducible and
isolated from stateful interactions typical in imperative package
management. This property is essential for ensuring identical test
environments across benchmark runs.
\subsection{UDP NAT and Firewall Puncturing in the Wild}
Halkes and Pouwelse~\cite{halkes_udp_2011} measure UDP hole punching
efficacy on a live P2P network using the Tribler BitTorrent client.
Their study finds that 79\% of peers are unreachable due to NAT or
firewall restrictions, yet 64\% reside behind configurations amenable
to hole punching. Among compatible peers, over 80\% of puncturing
attempts succeed, establishing hole punching as a practical NAT
traversal technique. Their timeout measurements further indicate that
keep-alive messages must be sent at least every 55 seconds to maintain
open NAT mappings.
These findings directly inform the evaluation criteria for this thesis.
All mesh VPNs tested rely on UDP hole punching for NAT traversal;
the 80\% success rate sets a baseline expectation, while the 55-second
timeout informs analysis of each implementation's keep-alive behavior
during source code review.
\subsection{An Overview of Packet Reordering in TCP}
TODO \cite{leung_overview_2007}
\subsection{Performance Evaluation of TCP over QUIC Tunnels}
TODO \cite{guo_implementation_2025}

View File

@@ -1,603 +0,0 @@
% Chapter 1
\chapter{Chapter Title Here} % Main chapter title
\label{Chapter1} % For referencing the chapter elsewhere, use \ref{Chapter1}
%----------------------------------------------------------------------------------------
% Define some commands to keep the formatting separated from the content
\newcommand{\keyword}[1]{\textbf{#1}}
\newcommand{\tabhead}[1]{\textbf{#1}}
\newcommand{\code}[1]{\texttt{#1}}
\newcommand{\file}[1]{\texttt{\bfseries#1}}
\newcommand{\option}[1]{\texttt{\itshape#1}}
%----------------------------------------------------------------------------------------
\section{Welcome and Thank You}
Welcome to this \LaTeX{} Thesis Template, a beautiful and easy to use
template for writing a thesis using the \LaTeX{} typesetting system.
If you are writing a thesis (or will be in the future) and its
subject is technical or mathematical (though it doesn't have to be),
then creating it in \LaTeX{} is highly recommended as a way to make
sure you can just get down to the essential writing without having to
worry over formatting or wasting time arguing with your word processor.
\LaTeX{} is easily able to professionally typeset documents that run
to hundreds or thousands of pages long. With simple mark-up commands,
it automatically sets out the table of contents, margins, page
headers and footers and keeps the formatting consistent and
beautiful. One of its main strengths is the way it can easily typeset
mathematics, even \emph{heavy} mathematics. Even if those equations
are the most horribly twisted and most difficult mathematical
problems that can only be solved on a super-computer, you can at
least count on \LaTeX{} to make them look stunning.
%----------------------------------------------------------------------------------------
\section{Learning \LaTeX{}}
\LaTeX{} is not a \textsc{wysiwyg} (What You See is What You Get)
program, unlike word processors such as Microsoft Word or Apple's
Pages. Instead, a document written for \LaTeX{} is actually a simple,
plain text file that contains \emph{no formatting}. You tell \LaTeX{}
how you want the formatting in the finished document by writing in
simple commands amongst the text, for example, if I want to use
\emph{italic text for emphasis}, I write the \verb|\emph{text}|
command and put the text I want in italics in between the curly
braces. This means that \LaTeX{} is a \enquote{mark-up} language,
very much like HTML.
\subsection{A (not so short) Introduction to \LaTeX{}}
If you are new to \LaTeX{}, there is a very good eBook -- freely
available online as a PDF file -- called, \enquote{The Not So Short
Introduction to \LaTeX{}}. The book's title is typically shortened to
just \emph{lshort}. You can download the latest version (as it is
occasionally updated) from here:
\url{http://www.ctan.org/tex-archive/info/lshort/english/lshort.pdf}
It is also available in several other languages. Find yours from the
list on this page: \url{http://www.ctan.org/tex-archive/info/lshort/}
It is recommended to take a little time out to learn how to use
\LaTeX{} by creating several, small `test' documents, or having a
close look at several templates on:\\
\url{http://www.LaTeXTemplates.com}\\
Making the effort now means you're not stuck learning the system when
what you \emph{really} need to be doing is writing your thesis.
\subsection{A Short Math Guide for \LaTeX{}}
If you are writing a technical or mathematical thesis, then you may
want to read the document by the AMS (American Mathematical Society)
called, \enquote{A Short Math Guide for \LaTeX{}}. It can be found online here:
\url{http://www.ams.org/tex/amslatex.html}
under the \enquote{Additional Documentation} section towards the
bottom of the page.
\subsection{Common \LaTeX{} Math Symbols}
There are a multitude of mathematical symbols available for \LaTeX{}
and it would take a great effort to learn the commands for them all.
The most common ones you are likely to use are shown on this page:
\url{http://www.sunilpatel.co.uk/latex-type/latex-math-symbols/}
You can use this page as a reference or crib sheet, the symbols are
rendered as large, high quality images so you can quickly find the
\LaTeX{} command for the symbol you need.
\subsection{\LaTeX{} on a Mac}
The \LaTeX{} distribution is available for many systems including
Windows, Linux and Mac OS X. The package for OS X is called MacTeX
and it contains all the applications you need -- bundled together and
pre-customized -- for a fully working \LaTeX{} environment and work flow.
MacTeX includes a custom dedicated \LaTeX{} editor called TeXShop for
writing your `\file{.tex}' files and BibDesk: a program to manage
your references and create your bibliography section just as easily
as managing songs and creating playlists in iTunes.
%----------------------------------------------------------------------------------------
\section{Getting Started with this Template}
If you are familiar with \LaTeX{}, then you should explore the
directory structure of the template and then proceed to place your
own information into the \emph{THESIS INFORMATION} block of the
\file{main.tex} file. You can then modify the rest of this file to
your unique specifications based on your degree/university. Section
\ref{FillingFile} on page \pageref{FillingFile} will help you do
this. Make sure you also read section \ref{ThesisConventions} about
thesis conventions to get the most out of this template.
If you are new to \LaTeX{} it is recommended that you carry on
reading through the rest of the information in this document.
Before you begin using this template you should ensure that its style
complies with the thesis style guidelines imposed by your
institution. In most cases this template style and layout will be
suitable. If it is not, it may only require a small change to bring
the template in line with your institution's recommendations. These
modifications will need to be done on the \file{MastersDoctoralThesis.cls} file.
\subsection{About this Template}
This \LaTeX{} Thesis Template is originally based and created around
a \LaTeX{} style file created by Steve R.\ Gunn from the University
of Southampton (UK), department of Electronics and Computer Science.
You can find his original thesis style file at his site, here:
\url{http://www.ecs.soton.ac.uk/~srg/softwaretools/document/templates/}
Steve's \file{ecsthesis.cls} was then taken by Sunil Patel who
modified it by creating a skeleton framework and folder structure to
place the thesis files in. The resulting template can be found on
Sunil's site here:
\url{http://www.sunilpatel.co.uk/thesis-template}
Sunil's template was made available through
\url{http://www.LaTeXTemplates.com} where it was modified many times
based on user requests and questions. Version 2.0 and onwards of this
template represents a major modification to Sunil's template and is,
in fact, hardly recognisable. The work to make version 2.0 possible
was carried out by \href{mailto:vel@latextemplates.com}{Vel} and
Johannes Böttcher.
%----------------------------------------------------------------------------------------
\section{What this Template Includes}
\subsection{Folders}
This template comes as a single zip file that expands out to several
files and folders. The folder names are mostly self-explanatory:
\keyword{Appendices} -- this is the folder where you put the
appendices. Each appendix should go into its own separate \file{.tex}
file. An example and template are included in the directory.
\keyword{Chapters} -- this is the folder where you put the thesis
chapters. A thesis usually has about six chapters, though there is no
hard rule on this. Each chapter should go in its own separate
\file{.tex} file and they can be split as:
\begin{itemize}
\item Chapter 1: Introduction to the thesis topic
\item Chapter 2: Background information and theory
\item Chapter 3: (Laboratory) experimental setup
\item Chapter 4: Details of experiment 1
\item Chapter 5: Details of experiment 2
\item Chapter 6: Discussion of the experimental results
\item Chapter 7: Conclusion and future directions
\end{itemize}
This chapter layout is specialised for the experimental sciences,
your discipline may be different.
\keyword{Figures} -- this folder contains all figures for the thesis.
These are the final images that will go into the thesis document.
\subsection{Files}
Included are also several files, most of them are plain text and you
can see their contents in a text editor. After initial compilation,
you will see that more auxiliary files are created by \LaTeX{} or
BibTeX and which you don't need to delete or worry about:
\keyword{example.bib} -- this is an important file that contains all
the bibliographic information and references that you will be citing
in the thesis for use with BibTeX. You can write it manually, but
there are reference manager programs available that will create and
manage it for you. Bibliographies in \LaTeX{} are a large subject and
you may need to read about BibTeX before starting with this. Many
modern reference managers will allow you to export your references in
BibTeX format which greatly eases the amount of work you have to do.
\keyword{MastersDoctoralThesis.cls} -- this is an important file. It
is the class file that tells \LaTeX{} how to format the thesis.
\keyword{main.pdf} -- this is your beautifully typeset thesis (in the
PDF file format) created by \LaTeX{}. It is supplied in the PDF with
the template and after you compile the template you should get an
identical version.
\keyword{main.tex} -- this is an important file. This is the file
that you tell \LaTeX{} to compile to produce your thesis as a PDF
file. It contains the framework and constructs that tell \LaTeX{} how
to layout the thesis. It is heavily commented so you can read exactly
what each line of code does and why it is there. After you put your
own information into the \emph{THESIS INFORMATION} block -- you have
now started your thesis!
Files that are \emph{not} included, but are created by \LaTeX{} as
auxiliary files include:
\keyword{main.aux} -- this is an auxiliary file generated by
\LaTeX{}, if it is deleted \LaTeX{} simply regenerates it when you
run the main \file{.tex} file.
\keyword{main.bbl} -- this is an auxiliary file generated by BibTeX,
if it is deleted, BibTeX simply regenerates it when you run the
\file{main.aux} file. Whereas the \file{.bib} file contains all the
references you have, this \file{.bbl} file contains the references
you have actually cited in the thesis and is used to build the
bibliography section of the thesis.
\keyword{main.blg} -- this is an auxiliary file generated by BibTeX,
if it is deleted BibTeX simply regenerates it when you run the main
\file{.aux} file.
\keyword{main.lof} -- this is an auxiliary file generated by
\LaTeX{}, if it is deleted \LaTeX{} simply regenerates it when you
run the main \file{.tex} file. It tells \LaTeX{} how to build the
\emph{List of Figures} section.
\keyword{main.log} -- this is an auxiliary file generated by
\LaTeX{}, if it is deleted \LaTeX{} simply regenerates it when you
run the main \file{.tex} file. It contains messages from \LaTeX{}, if
you receive errors and warnings from \LaTeX{}, they will be in this
\file{.log} file.
\keyword{main.lot} -- this is an auxiliary file generated by
\LaTeX{}, if it is deleted \LaTeX{} simply regenerates it when you
run the main \file{.tex} file. It tells \LaTeX{} how to build the
\emph{List of Tables} section.
\keyword{main.out} -- this is an auxiliary file generated by
\LaTeX{}, if it is deleted \LaTeX{} simply regenerates it when you
run the main \file{.tex} file.
So from this long list, only the files with the \file{.bib},
\file{.cls} and \file{.tex} extensions are the most important ones.
The other auxiliary files can be ignored or deleted as \LaTeX{} and
BibTeX will regenerate them.
%----------------------------------------------------------------------------------------
\section{Filling in Your Information in the \file{main.tex}
File}\label{FillingFile}
You will need to personalise the thesis template and make it your own
by filling in your own information. This is done by editing the
\file{main.tex} file in a text editor or your favourite LaTeX environment.
Open the file and scroll down to the third large block titled
\emph{THESIS INFORMATION} where you can see the entries for
\emph{University Name}, \emph{Department Name}, etc \ldots
Fill out the information about yourself, your group and institution.
You can also insert web links, if you do, make sure you use the full
URL, including the \code{http://} for this. If you don't want these
to be linked, simply remove the \verb|\href{url}{name}| and only leave the name.
When you have done this, save the file and recompile \code{main.tex}.
All the information you filled in should now be in the PDF, complete
with web links. You can now begin your thesis proper!
%----------------------------------------------------------------------------------------
\section{The \code{main.tex} File Explained}
The \file{main.tex} file contains the structure of the thesis. There
are plenty of written comments that explain what pages, sections and
formatting the \LaTeX{} code is creating. Each major document element
is divided into commented blocks with titles in all capitals to make
it obvious what the following bit of code is doing. Initially there
seems to be a lot of \LaTeX{} code, but this is all formatting, and
it has all been taken care of so you don't have to do it.
Begin by checking that your information on the title page is correct.
For the thesis declaration, your institution may insist on something
different than the text given. If this is the case, just replace what
you see with what is required in the \emph{DECLARATION PAGE} block.
Then comes a page which contains a funny quote. You can put your own,
or quote your favourite scientist, author, person, and so on. Make
sure to put the name of the person who you took the quote from.
Following this is the abstract page which summarises your work in a
condensed way and can almost be used as a standalone document to
describe what you have done. The text you write will cause the
heading to move up so don't worry about running out of space.
Next come the acknowledgements. On this page, write about all the
people who you wish to thank (not forgetting parents, partners and
your advisor/supervisor).
The contents pages, list of figures and tables are all taken care of
for you and do not need to be manually created or edited. The next
set of pages are more likely to be optional and can be deleted since
they are for a more technical thesis: insert a list of abbreviations
you have used in the thesis, then a list of the physical constants
and numbers you refer to and finally, a list of mathematical symbols
used in any formulae. Making the effort to fill these tables means
the reader has a one-stop place to refer to instead of searching the
internet and references to try and find out what you meant by certain
abbreviations or symbols.
The list of symbols is split into the Roman and Greek alphabets.
Whereas the abbreviations and symbols ought to be listed in
alphabetical order (and this is \emph{not} done automatically for
you) the list of physical constants should be grouped into similar themes.
The next page contains a one line dedication. Who will you dedicate
your thesis to?
Finally, there is the block where the chapters are included.
Uncomment the lines (delete the \code{\%} character) as you write the
chapters. Each chapter should be written in its own file and put into
the \emph{Chapters} folder and named \file{Chapter1},
\file{Chapter2}, etc\ldots Similarly for the appendices, uncomment
the lines as you need them. Each appendix should go into its own file
and placed in the \emph{Appendices} folder.
After the preamble, chapters and appendices finally comes the
bibliography. The bibliography style (called \option{authoryear}) is
used for the bibliography and is a fully featured style that will
even include links to where the referenced paper can be found online.
Do not underestimate how grateful your reader will be to find that a
reference to a paper is just a click away. Of course, this relies on
you putting the URL information into the BibTeX file in the first place.
%----------------------------------------------------------------------------------------
\section{Thesis Features and Conventions}\label{ThesisConventions}
To get the best out of this template, there are a few conventions
that you may want to follow.
One of the most important (and most difficult) things to keep track
of in such a long document as a thesis is consistency. Using certain
conventions and ways of doing things (such as using a Todo list)
makes the job easier. Of course, all of these are optional and you
can adopt your own method.
\subsection{Printing Format}
This thesis template is designed for double sided printing (i.e.
content on the front and back of pages) as most theses are printed
and bound this way. Switching to one sided printing is as simple as
uncommenting the \option{oneside} option of the \code{documentclass}
command at the top of the \file{main.tex} file. You may then wish to
adjust the margins to suit specifications from your institution.
The headers for the pages contain the page number on the outer side
(so it is easy to flick through to the page you want) and the chapter
name on the inner side.
The text is set to 11 point by default with single line spacing,
again, you can tune the text size and spacing should you want or need
to using the options at the very start of \file{main.tex}. The
spacing can be changed similarly by replacing the
\option{singlespacing} with \option{onehalfspacing} or \option{doublespacing}.
\subsection{Using US Letter Paper}
The paper size used in the template is A4, which is the standard size
in Europe. If you are using this thesis template elsewhere and
particularly in the United States, then you may have to change the A4
paper size to the US Letter size. This can be done in the margins
settings section in \file{main.tex}.
Due to the differences in the paper size, the resulting margins may
be different to what you like or require (as it is common for
institutions to dictate certain margin sizes). If this is the case,
then the margin sizes can be tweaked by modifying the values in the
same block as where you set the paper size. Now your document should
be set up for US Letter paper size with suitable margins.
\subsection{References}
The \code{biblatex} package is used to format the bibliography and
inserts references such as this one \parencite{Reference1}. The
options used in the \file{main.tex} file mean that the in-text
citations of references are formatted with the author(s) listed with
the date of the publication. Multiple references are separated by
semicolons (e.g. \parencite{Reference2, Reference1}) and references
with more than three authors only show the first author with \emph{et
al.} indicating there are more authors (e.g. \parencite{Reference3}).
This is done automatically for you. To see how you use references,
have a look at the \file{Chapter1.tex} source file. Many reference
managers allow you to simply drag the reference into the document as you type.
Scientific references should come \emph{before} the punctuation mark
if there is one (such as a comma or period). The same goes for
footnotes\footnote{Such as this footnote, here down at the bottom of
the page.}. You can change this but the most important thing is to
keep the convention consistent throughout the thesis. Footnotes
themselves should be full, descriptive sentences (beginning with a
capital letter and ending with a full stop). The APA6 states:
\enquote{Footnote numbers should be superscripted, [...], following
any punctuation mark except a dash.} The Chicago manual of style
states: \enquote{A note number should be placed at the end of a
sentence or clause. The number follows any punctuation mark except
the dash, which it precedes. It follows a closing parenthesis.}
The bibliography is typeset with references listed in alphabetical
order by the first author's last name. This is similar to the APA
referencing style. To see how \LaTeX{} typesets the bibliography,
have a look at the very end of this document (or just click on the
reference number links in in-text citations).
\subsubsection{A Note on bibtex}
The bibtex backend used in the template by default does not correctly
handle unicode character encoding (i.e. "international" characters).
You may see a warning about this in the compilation log and, if your
references contain unicode characters, they may not show up correctly
or at all. The solution to this is to use the biber backend instead
of the outdated bibtex backend. This is done by finding this in
\file{main.tex}: \option{backend=bibtex} and changing it to
\option{backend=biber}. You will then need to delete all auxiliary
BibTeX files and navigate to the template directory in your terminal
(command prompt). Once there, simply type \code{biber main} and biber
will compile your bibliography. You can then compile \file{main.tex}
as normal and your bibliography will be updated. An alternative is to
set up your LaTeX editor to compile with biber instead of bibtex, see
\href{http://tex.stackexchange.com/questions/154751/biblatex-with-biber-configuring-my-editor-to-avoid-undefined-citations/}{here}
for how to do this for various editors.
\subsection{Tables}
Tables are an important way of displaying your results, below is an
example table which was generated with this code:
{\small
\begin{verbatim}
\begin{table}
\caption{The effects of treatments X and Y on the four groups studied.}
\label{tab:treatments}
\centering
\begin{tabular}{l l l}
\toprule
\tabhead{Groups} & \tabhead{Treatment X} & \tabhead{Treatment Y} \\
\midrule
1 & 0.2 & 0.8\\
2 & 0.17 & 0.7\\
3 & 0.24 & 0.75\\
4 & 0.68 & 0.3\\
\bottomrule\\
\end{tabular}
\end{table}
\end{verbatim}
}
\begin{table}
\caption{The effects of treatments X and Y on the four groups studied.}
\label{tab:treatments}
\centering
\begin{tabular}{l l l}
\toprule
\tabhead{Groups} & \tabhead{Treatment X} & \tabhead{Treatment Y} \\
\midrule
1 & 0.2 & 0.8\\
2 & 0.17 & 0.7\\
3 & 0.24 & 0.75\\
4 & 0.68 & 0.3\\
\bottomrule\\
\end{tabular}
\end{table}
You can reference tables with \verb|\ref{<label>}| where the label is
defined within the table environment. See \file{Chapter1.tex} for an
example of the label and citation (e.g. Table~\ref{tab:treatments}).
\subsection{Figures}
There will hopefully be many figures in your thesis (that should be
placed in the \emph{Figures} folder). The way to insert figures into
your thesis is to use a code template like this:
\begin{verbatim}
\begin{figure}
\centering
\includegraphics{Figures/Electron}
\decoRule
\caption[An Electron]{An electron (artist's impression).}
\label{fig:Electron}
\end{figure}
\end{verbatim}
Also look in the source file. Putting this code into the source file
produces the picture of the electron that you can see in the figure below.
\begin{figure}[th]
\centering
\includegraphics{Figures/Electron}
\decoRule
\caption[An Electron]{An electron (artist's impression).}
\label{fig:Electron}
\end{figure}
Sometimes figures don't always appear where you write them in the
source. The placement depends on how much space there is on the page
for the figure. Sometimes there is not enough room to fit a figure
directly where it should go (in relation to the text) and so \LaTeX{}
puts it at the top of the next page. Positioning figures is the job
of \LaTeX{} and so you should only worry about making them look good!
Figures usually should have captions just in case you need to refer
to them (such as in Figure~\ref{fig:Electron}). The \verb|\caption|
command contains two parts, the first part, inside the square
brackets is the title that will appear in the \emph{List of Figures},
and so should be short. The second part in the curly brackets should
contain the longer and more descriptive caption text.
The \verb|\decoRule| command is optional and simply puts an aesthetic
horizontal line below the image. If you do this for one image, do it
for all of them.
\LaTeX{} is capable of using images in pdf, jpg and png format.
\subsection{Typesetting mathematics}
If your thesis is going to contain heavy mathematical content, be
sure that \LaTeX{} will make it look beautiful, even though it won't
be able to solve the equations for you.
The \enquote{Not So Short Introduction to \LaTeX} (available on
\href{http://www.ctan.org/tex-archive/info/lshort/english/lshort.pdf}{CTAN})
should tell you everything you need to know for most cases of
typesetting mathematics. If you need more information, a much more
thorough mathematical guide is available from the AMS called,
\enquote{A Short Math Guide to \LaTeX} and can be downloaded from:
\url{ftp://ftp.ams.org/pub/tex/doc/amsmath/short-math-guide.pdf}
There are many different \LaTeX{} symbols to remember, luckily you
can find the most common symbols in
\href{http://ctan.org/pkg/comprehensive}{The Comprehensive \LaTeX~Symbol List}.
You can write an equation, which is automatically given an equation
number by \LaTeX{} like this:
\begin{verbatim}
\begin{equation}
E = mc^{2}
\label{eqn:Einstein}
\end{equation}
\end{verbatim}
This will produce Einstein's famous energy-matter equivalence equation:
\begin{equation}
E = mc^{2}
\label{eqn:Einstein}
\end{equation}
All equations you write (which are not in the middle of paragraph
text) are automatically given equation numbers by \LaTeX{}. If you
don't want a particular equation numbered, use the unnumbered form:
\begin{verbatim}
\[ a^{2}=4 \]
\end{verbatim}
%----------------------------------------------------------------------------------------
\section{Sectioning and Subsectioning}
You should break your thesis up into nice, bite-sized sections and
subsections. \LaTeX{} automatically builds a table of Contents by
looking at all the \verb|\chapter{}|, \verb|\section{}| and
\verb|\subsection{}| commands you write in the source.
The Table of Contents should only list the sections to three (3)
levels. A \verb|chapter{}| is level zero (0). A \verb|\section{}| is
level one (1) and so a \verb|\subsection{}| is level two (2). In your
thesis it is likely that you will even use a \verb|subsubsection{}|,
which is level three (3). The depth to which the Table of Contents is
formatted is set within \file{MastersDoctoralThesis.cls}. If you need
this changed, you can do it in \file{main.tex}.
%----------------------------------------------------------------------------------------
\section{In Closing}
You have reached the end of this mini-guide. You can now rename or
overwrite this pdf file and begin writing your own
\file{Chapter1.tex} and the rest of your thesis. The easy work of
setting up the structure and framework has been taken care of for
you. It's now your job to fill it out!
Good luck and have lots of fun!
\begin{flushright}
Guide written by ---\\
Sunil Patel: \href{http://www.sunilpatel.co.uk}{www.sunilpatel.co.uk}\\
Vel: \href{http://www.LaTeXTemplates.com}{LaTeXTemplates.com}
\end{flushright}

4
Chapters/Conclusion.tex Normal file
View File

@@ -0,0 +1,4 @@
\chapter{Conclusion} % Main chapter title
\label{Conclusion}

4
Chapters/Discussion.tex Normal file
View File

@@ -0,0 +1,4 @@
\chapter{Discussion} % Main chapter title
\label{Discussion}

View File

@@ -2,6 +2,210 @@
\label{Introduction}
This chapter introduces the Clan project, articulates its fundamental
objectives, outlines the key components, and examines the driving
factors motivating its development.
Peer-to-peer overlay VPNs allow nodes to connect directly regardless
of NAT or firewall restrictions. Yet practitioners choosing among the
growing number of mesh VPN implementations must rely largely on
anecdotal evidence: systematic, reproducible comparisons under
realistic conditions are scarce.
This thesis addresses that gap. We benchmark ten peer-to-peer VPN
implementations across seven workloads and four network impairment
profiles. We complement these performance benchmarks with a source
code analysis of each implementation, verified by the respective
maintainers. The entire
experimental framework is built on Nix, NixOS, and the Clan deployment
system, so every result is independently reproducible.
\section{Motivation}
Peer-to-peer architectures can provide censorship-resistant,
fault-tolerant infrastructure because they have no single point of
failure \cite{shukla_towards_2021}. Blockchain platforms like Ethereum
depend on this property, as do IoT edge networks and content delivery
systems. But these benefits only hold when nodes are spread across
diverse hosting entities.
In practice, this diversity remains illusory.
Amazon, Hetzner, and OVH collectively host 70\% of all Ethereum nodes
(see Figure~\ref{fig:ethernodes_hosting}), so nominally decentralized
infrastructure actually sits in a handful of cloud providers.
More concerning, these providers operate under overlapping regulatory
jurisdictions,
predominantly the United States and the European Union.
This concentration undermines technical sovereignty:
a single governmental action could compel service termination,
data disclosure, or traffic manipulation across a majority of the network.
\begin{figure}[H]
\centering
\includegraphics[width=1\textwidth]{Figures/ethernodes_hosting.png}
\caption{Distribution of Ethereum nodes hosted by various providers
\cite{noauthor_isps_nodate}}
\label{fig:ethernodes_hosting}
\end{figure}
This centralization persists because self-hosting is hard. Cloud
providers offer static IP addresses and publicly routable endpoints,
which avoids the networking problems that residential and small-office
deployments face.
Most internet-connected devices sit behind Network Address Translation (NAT),
which prevents incoming connections without explicit port forwarding
or relay infrastructure.
Combined with dynamic IP assignments from ISPs, stable peer
connectivity from self-hosted infrastructure has traditionally
required significant technical expertise.
Overlay VPNs solve this problem. They establish encrypted tunnels
that traverse NAT boundaries, so peers can connect directly without
static IP addresses or manual firewall configuration. Each node
receives a stable virtual address within the overlay network,
regardless of its physical network topology. A device behind
consumer-grade NAT can therefore participate as a first-class peer
in a distributed system.
The Clan deployment framework uses Nix and NixOS to eliminate
configuration drift and dependency conflicts, which makes it
practical for a single administrator to self-host distributed
services.
Overlay VPNs are central to Clan's architecture: they supply the
peer connectivity that lets nodes form a network regardless of
physical location or NAT situation.
As illustrated in Figure~\ref{fig:vision-stages}, Clan plans to offer
a web interface that lets users design and deploy private P2P networks
with minimal configuration, assisted by an integrated LLM.
During Clan's development, a recurring problem surfaced:
practitioners disagreed on which mesh VPN to use, each pointing to
different edge cases where their preferred VPN failed or lacked a
needed feature. These discussions relied on anecdotal evidence rather
than systematic evaluation, which motivated the present work.
\subsection{Related Work}
Existing research offers only partial coverage of this space.
Lackorzynski et al.\ \cite{lackorzynski_comparative_2019} benchmark
OpenVPN, IPSec, Tinc, Freelan, MACsec, and WireGuard in the context
of industrial communication systems. They measure point-to-point
throughput, latency, and CPU overhead but do not address overlay
network behavior such as NAT traversal or dynamic peer discovery.
The most closely related study by Kjorveziroski et al.\
\cite{kjorveziroski_full-mesh_2024} evaluates full-mesh VPN solutions
for distributed systems, looking at throughput, reliability under
packet loss, and relay behavior for VPNs including ZeroTier. However,
it focuses primarily on solutions with a central point of failure and
limits its workloads to synthetic iperf3 tests.
This thesis extends that work in several directions. It evaluates a
broader set of VPN implementations with emphasis on fully
decentralized architectures and tests them under application-level
workloads such as video streaming and package downloads. It also
applies multiple network impairment profiles and provides a
reproducible experimental framework built on Nix, NixOS, and Clan.
A secondary goal was to create an automated benchmarking framework
that generates a public leaderboard, similar in spirit to the
js-framework-benchmark (see Figure~\ref{fig:js-framework-benchmark}).
A web interface with regularly updated results gives VPN developers a
concrete baseline to measure against.
\section{Research Contribution}
This thesis makes the following contributions:
\begin{enumerate}
\item A benchmark of ten peer-to-peer VPN implementations across
seven workloads and four network impairment profiles. The workloads
include video streaming and package downloads alongside synthetic
throughput tests.
\item A source code analysis of all ten VPN implementations. Manual
code review was combined with LLM-assisted analysis and the results
were verified by the respective maintainers on GitHub.
\item A reproducible experimental framework built on Nix, NixOS,
and the Clan deployment system. Dependencies are pinned and system
configuration is declarative, down to deterministic cryptographic
material generation. Every result can be independently replicated.
\item A performance analysis showing that Tailscale outperforms the
Linux kernel's default networking stack under degraded conditions,
and that kernel parameter tuning (Reno congestion control in place
of CUBIC, with RACK disabled) yields measurable throughput
improvements.
\item The discovery of several security vulnerabilities across
the evaluated VPN implementations.
\item An automated benchmarking framework that produces a public
leaderboard, giving VPN developers a target to optimize
against.
\end{enumerate}
\begin{figure}[H]
\centering
\includegraphics[width=1\textwidth]{Figures/krause-js-framework.png}
\caption{js-framework-benchmark results for Chrome 144.0
\cite{krause_krausestjs-framework-benchmark_2026}}
\label{fig:js-framework-benchmark}
\end{figure}
\begin{figure}[h]
\centering
% Row 1
\begin{subfigure}{0.45\textwidth}
\centering
\includegraphics[width=\linewidth]{Figures/vision/stage1.png}
\caption{Stage 1}
\end{subfigure}
\hfill
\begin{subfigure}{0.45\textwidth}
\centering
\includegraphics[width=\linewidth]{Figures/vision/stage2.png}
\caption{Stage 2}
\end{subfigure}
\vspace{1em} % Add spacing between rows
% Row 2
\begin{subfigure}{0.45\textwidth}
\centering
\includegraphics[width=\linewidth]{Figures/vision/stage3.png}
\caption{Stage 3}
\end{subfigure}
\hfill
\begin{subfigure}{0.45\textwidth}
\centering
\includegraphics[width=\linewidth]{Figures/vision/stage4.png}
\caption{Stage 4}
\end{subfigure}
\vspace{1em} % Add spacing between rows
% Row 3
\begin{subfigure}{0.45\textwidth}
\centering
\includegraphics[width=\linewidth]{Figures/vision/stage5.png}
\caption{Stage 5}
\end{subfigure}
\hfill
\begin{subfigure}{0.45\textwidth}
\centering
\includegraphics[width=\linewidth]{Figures/vision/stage6.png}
\caption{Stage 6}
\end{subfigure}
\vspace{1em} % Add spacing between rows
% Row 4
\begin{subfigure}{0.45\textwidth}
\centering
\includegraphics[width=\linewidth]{Figures/vision/stage7.png}
\caption{Stage 7}
\end{subfigure}
\hfill
\begin{subfigure}{0.45\textwidth}
\centering
\includegraphics[width=\linewidth]{Figures/vision/stage8.png}
\caption{Stage 8}
\end{subfigure}
\caption{Planned web interface for setting up a Clan family network}
\label{fig:vision-stages}
\end{figure}

View File

@@ -2,243 +2,560 @@
\chapter{Methodology} % Main chapter title
\label{Methodology} % Change X to a consecutive number; for
% referencing this chapter elsewhere, use \ref{ChapterX}
\label{Methodology}
%----------------------------------------------------------------------------------------
% SECTION 1
%----------------------------------------------------------------------------------------
This chapter describes the methodology used to benchmark and analyze
peer-to-peer mesh VPN implementations. The evaluation combines
performance benchmarking under controlled network conditions with a
structured source code analysis of each implementation. All
dependencies, system configurations, and test procedures are pinned
or declared so that the experiments can be independently reproduced.
This chapter describes the methodology used to evaluate and analyze
the Clan framework. A summary of the logical flow of this research is
depicted in Figure \ref{fig:clan_thesis_argumentation_tree}.
\section{Experimental Setup}
\subsection{Hardware Configuration}
All experiments were conducted on three bare-metal servers with
identical specifications:
\begin{itemize}
\bitem{CPU:} Intel Model 94, 4 cores / 8 threads
\bitem{Memory:} 64 GB RAM
\bitem{Network:} 1 Gbps Ethernet (e1000e driver; one machine
uses r8169)
\bitem{Cryptographic acceleration:} AES-NI, AVX, AVX2, PCLMULQDQ,
RDRAND, SSE4.2
\end{itemize}
Results may differ on systems without hardware cryptographic
acceleration, since most of the tested VPNs offload encryption to
AES-NI.
\subsection{Network Topology}
The three machines are connected via a direct 1 Gbps LAN on the same
network segment. Each machine has a publicly reachable IPv4 address,
which is used to deploy configuration changes via Clan. On this
baseline topology, latency is sub-millisecond and there is no packet
loss, so measured overhead can be attributed to the VPN itself.
Figure~\ref{fig:mesh_topology} illustrates the full-mesh connectivity
between the three machines.
\begin{figure}[H]
\centering
\includesvg[width=1\textwidth,
keepaspectratio]{Figures/clan_thesis_argumentation_tree.drawio.svg}
\caption{Argumentation Tree for the Clan Thesis}
\label{fig:clan_thesis_argumentation_tree}
\begin{tikzpicture}[
node/.style={
draw, rounded corners, minimum width=2.2cm, minimum height=1cm,
font=\ttfamily\bfseries, align=center
},
link/.style={thick, <->}
]
% Nodes in an equilateral triangle
\node[node] (luna) at (0, 3.5) {luna};
\node[node] (yuki) at (-3, 0) {yuki};
\node[node] (lom) at (3, 0) {lom};
% Mesh links
\draw[link] (luna) -- node[left, font=\small] {1 Gbps} (yuki);
\draw[link] (luna) -- node[right, font=\small] {1 Gbps} (lom);
\draw[link] (yuki) -- node[below, font=\small] {1 Gbps} (lom);
\end{tikzpicture}
\caption{Full-mesh network topology of the three benchmark machines}
\label{fig:mesh_topology}
\end{figure}
The structure of this study adopts a multi-faceted approach,
addressing several interrelated challenges in enhancing the
reliability and manageability of \ac{P2P} networks.
The primary objective is to assess how the Clan framework effectively
addresses these challenges.
To simulate real-world network conditions, Linux traffic control
(\texttt{tc netem}) is used to inject latency, jitter, packet loss,
and reordering. These impairments are applied symmetrically on all
machines, meaning effective round-trip impairment is approximately
double the per-machine values.
The research methodology consists of two main components:
\begin{enumerate}
\item \textbf{Development of a Theoretical Model} \\
A theoretical model of the Clan framework will be constructed.
This includes a formal specification of the system's foundational
axioms, outlining the principles and properties that guide its
design. From these axioms, key theorems will be derived, along
with their boundary conditions. The aim is to understand the
mechanisms underpinning the framework and establish a basis for
its evaluation.
\subsection{Configuration Methodology}
\item \textbf{Empirical Validation of the Theoretical Model} \\
Practical experiments will be conducted to validate the
predictions of the theoretical model. These experiments will
evaluate how well the model aligns with observed performance in
real-world settings. This step is crucial to identifying the
models strengths and limitations.
\end{enumerate}
Each VPN is built from source within the Nix flake, with all
dependencies pinned to exact versions. VPNs not packaged in nixpkgs
(Hyprspace, EasyTier, VpnCloud) have dedicated build expressions
under \texttt{pkgs/} in the flake.
Cryptographic material (WireGuard keys, Nebula certificates, ZeroTier
identities) is generated deterministically via Clan's vars generator
system.
Generated keys are stored in version control under
\texttt{vars/per-machine/\{name\}/} and read at NixOS evaluation time,
so key material is part of the reproducible configuration.
\section{Benchmark Suite}
The benchmark suite includes synthetic throughput tests and
application-level workloads. Prior comparative work relied exclusively
on iperf3; the additional benchmarks here capture behavior that
iperf3 alone misses.
Table~\ref{tab:benchmark_suite} summarises each benchmark.
\begin{table}[H]
\centering
\caption{Benchmark suite overview}
\label{tab:benchmark_suite}
\begin{tabular}{llll}
\hline
\textbf{Benchmark} & \textbf{Protocol} & \textbf{Duration} &
\textbf{Key Metrics} \\
\hline
Ping & ICMP & 3 runs $\times$ 100 pkts & RTT, packet loss \\
TCP iPerf3 & TCP & 30 s & Throughput, retransmits, CPU \\
UDP iPerf3 & UDP & 30 s & Throughput, jitter, packet loss \\
Parallel iPerf3 & TCP & 60 s & Throughput under contention \\
QPerf & QUIC & 30 s & Bandwidth, TTFB, conn. time \\
RIST Streaming & RIST & 30 s & Bitrate, dropped frames, RTT \\
Nix Cache Download & HTTP & 2 runs & Download duration \\
\hline
\end{tabular}
\end{table}
The first four benchmarks use standard network testing tools;
the remaining three test application-level workloads.
The subsections below describe configuration details that the table
does not capture.
\subsection{Ping}
Sends 100 ICMP echo requests at 200\,ms intervals with a 1-second
per-packet timeout, repeated for 3 runs.
\subsection{TCP and UDP iPerf3}
Both tests run for 30 seconds in bidirectional mode with zero-copy
(\texttt{-Z}) to minimize CPU overhead. The UDP variant additionally
sets unlimited target bandwidth (\texttt{-b 0}) and enables 64-bit
counters.
\subsection{Parallel iPerf3}
Runs one bidirectional TCP stream on all three machine pairs
simultaneously in a circular pattern (A$\rightarrow$B,
B$\rightarrow$C, C$\rightarrow$A) for 60 seconds with zero-copy
(\texttt{-Z}). The three concurrent bidirectional links produce six
unidirectional flows in total. This contention stresses shared
resources that single-stream tests leave idle.
\subsection{QPerf}
Spawns one qperf process per CPU core, each running for 30 seconds.
Per-core bandwidth is summed per second. In addition to throughput,
QPerf reports time to first byte and connection establishment time,
which iPerf3 does not measure.
\subsection{RIST Video Streaming}
Generates a 4K ($3840\times2160$) H.264 test pattern at 30\,fps
(ultrafast preset, zerolatency tuning, 25\,Mbps bitrate cap) with
ffmpeg and transmits it over the RIST protocol for 30 seconds. Because
the synthetic test pattern is highly compressible, the actual encoding
bitrate is approximately 3.3\,Mbps, well below the configured cap. RIST
(Reliable Internet Stream Transport) is a protocol for low-latency
video contribution over unreliable networks. The benchmark records
encoding-side statistics (actual bitrate, frame rate, dropped frames)
and RIST-specific counters (packets recovered via retransmission,
quality score).
\subsection{Nix Cache Download}
A Harmonia Nix binary cache server on the target machine serves the
Firefox package. The client downloads it via \texttt{nix copy}
through the VPN. Unlike the iPerf3 tests, this workload issues many
short-lived HTTP requests instead of a single bulk transfer.
Benchmarked with hyperfine (1 warmup run, 2 timed runs); the local
Nix store and SQLite metadata are cleared between runs.
\section{Network Impairment Profiles}
Four impairment profiles simulate progressively worse network
conditions, from an unmodified baseline to a severely degraded link.
All impairments are injected with Linux traffic control
(\texttt{tc netem}) on the egress side of every machine's primary
interface.
Table~\ref{tab:impairment_profiles} lists the per-machine values.
Because impairments are applied on both ends of a connection, the
effective round-trip impact is roughly double the listed values.
\begin{table}[H]
\centering
\caption{Network impairment profiles (per-machine egress values)}
\label{tab:impairment_profiles}
\begin{tabular}{lccccc}
\hline
\textbf{Profile} & \textbf{Latency} & \textbf{Jitter} &
\textbf{Loss} & \textbf{Reorder} & \textbf{Correlation} \\
\hline
Baseline & - & - & - & - & - \\
Low & 2 ms & 2 ms & 0.25\% & 0.5\% & 25\% \\
Medium & 4 ms & 7 ms & 1.0\% & 2.5\% & 50\% \\
High & 6 ms & 15 ms & 2.5\% & 5\% & 50\% \\
\hline
\end{tabular}
\end{table}
Each column in Table~\ref{tab:impairment_profiles} controls one
aspect of the simulated degradation:
The methodology will particularly examine three core components of
the Clan framework:
\begin{itemize}
\item \textbf{Clan Deployment System} \\
The deployment system is the core of the Clan framework, enabling
the configuration and management of distributed software
components. It simplifies complex configurations through Python
code, which abstracts the intricacies of the Nix language.
Central to this system is the "inventory," a mergeable data
structure designed for ensuring consistent service configurations
across nodes without conflicts. This component will be analyzed
for its design, functionality, efficiency, scalability, and fault
resilience.
\item \textbf{Overlay Networks / Mesh VPNs} \\
Overlay networks, also known as "Mesh VPNs," are critical for
secure communication in Clans \ac{P2P} deployment. The study
will evaluate their performance in terms of security,
scalability, and resilience to network disruptions. Specifically,
the assessment will include how well these networks handle
traffic in environments where no device has a public IP address,
as well as the impact of node failures on overall
connectivity. The analysis will focus on:
\begin{itemize}
\item \textbf{ZeroTier}: A globally distributed "Ethernet Switch".
\item \textbf{Mycelium}: An end-to-end encrypted IPv6 overlay network.
\item \textbf{Hyprspace}: A lightweight VPN leveraging IPFS and libp2p.
\end{itemize}
Other Mesh VPN solutions may be considered as comparison:
\begin{itemize}
\item \textbf{Tailscale}: A secure network for teams.
\item \textbf{Nebula Lightouse}: A scalable overlay networking
tool with a focus on performance
\end{itemize}
\item \textbf{Data Mesher} \\
The Data Mesher is responsible for data synchronization across
nodes, ensuring eventual consistency in Clans decentralized network. This
component will be evaluated for synchronization speed, fault
tolerance, and conflict resolution mechanisms. Additionally, it
will be analyzed for its resilience in scenarios involving
malicious nodes, measuring how effectively it prevents and
mitigates manipulation or integrity violations during data
replication and distribution.
\item \textbf{Latency} is a constant delay added to every outgoing
packet. For example, 2\,ms on each machine adds roughly 4\,ms to
the round trip.
\item \textbf{Jitter} introduces random variation on top of the
fixed latency. A packet on the Low profile may see anywhere
between 0 and 4\,ms of total added delay instead of exactly
2\,ms.
\item \textbf{Loss} is the fraction of packets that are silently
dropped. At 0.25\,\% (Low profile), roughly 1 in 400 packets is
discarded.
\item \textbf{Reorder} is the fraction of packets that arrive out
of sequence. \texttt{tc netem} achieves this by giving selected
packets a shorter delay than their predecessors, so they overtake
earlier packets.
\item \textbf{Correlation} determines whether impairment events are
independent or bursty. At 0\,\%, each packet's fate is decided
independently. At higher values, a packet that was lost or
reordered raises the probability that the next packet suffers the
same fate, producing the burst patterns typical of real networks.
\end{itemize}
\section{Related Work}
A 30-second stabilization period follows TC application before
measurements begin so that queuing disciplines can settle.
The Clan framework operates within the realm of software deployment
and peer-to-peer networking,
necessitating a deep understanding of existing methodologies in these
areas to tackle contemporary challenges.
This section will discuss related works encompassing system
deployment, peer data management,
and low maintenance structured peer-to-peer overlays, which inform
the development and positioning of the Clan framework.
\section{Experimental Procedure}
\subsection{Nix: A Safe and Policy-Free System for Software Deployment}
\subsection{Automation}
Nix addresses significant issues in software deployment by utilizing
a technique that employs cryptographic
hashes to ensure unique paths for component instances \cite{dolstra_nix_2004}.
The system is distinguished by its features, such as concurrent
installation of multiple versions and variants,
atomic upgrades, and safe garbage collection.
These capabilities lead to a flexible deployment system that
harmonizes source and binary deployments.
Nix conceptualizes deployment without imposing rigid policies,
thereby offering adaptable strategies for component management.
This contrasts with many prevailing systems that are constrained by
policy-specific designs,
making Nix an easily extensible, safe and versatile deployment solution
for configuration files and software.
A Python orchestrator (\texttt{vpn\_bench/}) automates the full
benchmark suite. For each VPN under test, it:
As Clan makes extensive use of Nix for deployment, understanding the
foundations and principles of Nix is crucial for evaluating inner workings.
\begin{enumerate}
\item Cleans all state directories from previous VPN runs
\item Deploys the VPN configuration to all machines via Clan
\item Restarts the VPN service on every machine (with retry:
up to 3 attempts, 2-second backoff)
\item Verifies VPN connectivity via a connection-check service
(120-second timeout)
\item For each impairment profile:
\begin{enumerate}
\item Applies TC rules via context manager (guarantees cleanup)
\item Waits 30 seconds for stabilization
\item Executes each benchmark three times sequentially,
once per machine pair: $A\to B$, then
$B\to C$, lastly $C\to A$
\item Clears TC rules
\end{enumerate}
\item Collects results and metadata
\end{enumerate}
\subsection{NixOS: A Purely Functional Linux Distribution}
Figure~\ref{fig:orchestrator_flow} illustrates this procedure as a
flowchart.
NixOS is an extension of the principles established by Nix,
presenting a Linux distribution that manages system configurations
using purely functional methods \cite{dolstra_nixos_2008}. This model
ensures that system
configurations are reproducible and isolated
from stateful interactions typical in imperative models of package management.
Because NixOS configurations are built by pure functions, they can overcome the
challenges of easily rolling back changes, deploying multiple package versions
side-by-side, and achieving deterministic configuration reproduction .
The solution is particularly compelling in environments necessitating rigorous
reproducibility and minimal configuration drift—a valuable feature
for distributed networks .
\begin{figure}[H]
\centering
\begin{tikzpicture}[
box/.style={
draw, rounded corners, minimum width=4.8cm, minimum height=0.9cm,
font=\small, align=center, fill=white
},
decision/.style={
draw, diamond, aspect=2.5, minimum width=3cm,
font=\small, align=center, fill=white, inner sep=1pt
},
arr/.style={->, thick},
every node/.style={font=\small}
]
% Main flow
\node[box] (clean) at (0, 0) {Clean state directories};
\node[box] (deploy) at (0, -1.5) {Deploy VPN via Clan};
\node[box] (restart) at (0, -3) {Restart VPN services\\(up to 3 attempts)};
\node[box] (verify) at (0, -4.5) {Verify connectivity\\(120\,s timeout)};
Clan also leverages NixOS for system configuration and deployment,
making it essential to understand how NixOS's functional model works.
% Inner loop
\node[decision] (profile) at (0, -6.3) {Next impairment\\profile?};
\node[box] (tc) at (0, -8.3) {Apply TC rules};
\node[box] (wait) at (0, -9.8) {Wait 30\,s};
\node[box] (bench) at (0, -11.3) {Run benchmarks\\$A{\to}B,\;
B{\to}C,\; C{\to}A$};
\node[box] (clear) at (0, -12.8) {Clear TC rules};
\subsection{Disnix: A Toolset for Distributed Deployment}
% After loop
\node[box] (collect) at (0, -14.8) {Collect results};
Disnix extends the Nix philosophy to the challenge of distributed
deployment, offering a toolset that enables system administrators and
developers to perform automatic deployment of service-oriented
systems across a network of machines \cite{van_der_burg_disnix_2014}.
Disnix leverages the features of Nix to manage complex intra-dependencies.
Meaning dependencies that exist on a network level instead on a binary level.
The overlap with the Clan framework is evident in the focus on deployment, how
they differ will be explored in the evaluation of Clan's deployment system.
% Arrows -- main spine
\draw[arr] (clean) -- (deploy);
\draw[arr] (deploy) -- (restart);
\draw[arr] (restart) -- (verify);
\draw[arr] (verify) -- (profile);
\draw[arr] (profile) -- node[right] {yes} (tc);
\draw[arr] (tc) -- (wait);
\draw[arr] (wait) -- (bench);
\draw[arr] (bench) -- (clear);
\subsection{State of the Art in Software Defined Networking}
% Loop back
\draw[arr] (clear) -- ++(3.8, 0) |- (profile);
The work by Bakhshi \cite{bakhshi_state_2017} surveys the
foundational principles and recent developments in Software Defined
Networking (SDN). It describes SDN as a paradigm that separates the
control plane from the data plane, enabling centralized, programmable
control over network behavior. The paper focuses on the architectural
components of SDN, including the three-layer abstraction model—the
application layer, control layer, and data layer—and highlights the
role of SDN controllers such as OpenDaylight, Floodlight, and Ryu.
% Exit loop
\draw[arr] (profile) -- ++(-3.2, 0) node[above, pos=0.3] {no}
|- (collect);
\end{tikzpicture}
\caption{Flowchart of the benchmark orchestrator procedure for a
single VPN}
\label{fig:orchestrator_flow}
\end{figure}
A key contribution of the paper is its identification of challenges
and open research questions in SDN. These include issues related to
scalability, fault tolerance, and the security risks introduced by
centralized control.
\subsection{Retry Logic}
This work is relevant to evaluating Clans role as a
Software Defined Network deployment tool and as a
comparison point against the state of the art.
Tests use a retry wrapper with up to 2 retries (3 total attempts),
5-second initial delay, and 700-second maximum total time. The number
of attempts is recorded in test metadata so that retried results can
be identified during analysis.
\subsection{Low Maintenance Peer-to-Peer Overlays}
\subsection{Statistical Analysis}
Structured Peer-to-Peer (P2P) overlay networks offer scalability and
efficiency but often require significant maintenance to handle
challenges such as peer churn and mismatched logical and physical
topologies. Shukla et al. propose a novel approach to designing
Distributed Hash Table (DHT)-based P2P overlays by integrating
Software Defined Networks (SDNs) to dynamically adjust
application-specific network policies and rules
\cite{shukla_towards_2021}. This method reduces maintenance overhead
by aligning overlay topology with the underlying physical network,
thus improving performance and reducing communication costs.
Each metric is summarized as a statistics dictionary containing:
The relevance of this work to Clan lies in its addressing of
operational complexity in managing P2P networks.
\begin{itemize}
\bitem{min / max:} Extreme values observed
\bitem{average:} Arithmetic mean across samples
\bitem{p25 / p50 / p75:} Quartiles via Python's
\texttt{statistics.quantiles()} method
\end{itemize}
\subsection{Full-Mesh VPN Performance Evaluation}
Aggregation differs by benchmark type. Benchmarks that execute
multiple discrete runs, ping (3 runs of 100 packets each) and
nix-cache (2 timed runs via hyperfine), first compute statistics
within each run, then aggregate across runs: averages and percentiles
are averaged, while the reported minimum and maximum are the global
extremes across all runs. Concretely, if ping produces three runs
with mean RTTs of 5.1, 5.3, and 5.0\,ms, the reported average is
the mean of those three values (5.13\,ms). The reported minimum is
the single lowest RTT observed across all three runs.
The work by Kjorveziroski et al. \cite{kjorveziroski_full-mesh_2024}
provides a comprehensive evaluation of full-mesh VPN solutions,
specifically focusing on their use as underlay networks for
distributed systems, such as Kubernetes clusters. Their benchmarks
analyze the performance of VPNs with built-in NAT traversal
capabilities, including ZeroTier, emphasizing throughput, reliability
under packet loss, and behavior when relay mechanisms are used. For
the Clan framework, these insights are particularly relevant in
assessing the performance and scalability of its Overlay Networks
component. By benchmarking ZeroTier alongside its peers, the paper
offers an established reference point for evaluating how Mesh VPN
solutions like ZeroTier perform under conditions similar to the
intricacies of peer-to-peer systems managed by Clan.
Benchmarks that produce continuous per-second samples, qperf and
RIST streaming for example, pool all per-second measurements from a single
execution into one series before computing statistics. For qperf,
bandwidth is first summed across CPU cores for each second, and
statistics are then computed over the resulting time series.
\subsection{AMC: Towards Trustworthy and Explorable CRDT Applications}
The analysis reports empirical percentiles (p25, p50, p75) alongside
min/max bounds rather than parametric confidence intervals.
Benchmark latency and throughput distributions are often skewed or
multimodal, so parametric assumptions of normality would be
unreliable. The interquartile range (p25--p75) conveys the spread of
typical observations, while min and max capture outlier behavior.
The nix-cache benchmark additionally reports standard deviation via
hyperfine's built-in statistical output.
Jeffery and Mortier \cite{jeffery_amc_2023} present the Automerge
Model Checker (AMC), a tool aimed at verifying and dynamically
exploring the correctness of applications built on Conflict-Free
Replicated Data Types (CRDTs). Their work addresses critical
challenges associated with implementing and optimizing
operation-based (op-based) CRDTs, particularly emphasizing how these
optimizations can inadvertently introduce subtle bugs in distributed
systems despite rigorous testing methods like fuzz testing. As part
of their contributions, they implemented the "Automerge" library in
Rust, an op-based CRDT framework that exposes a JSON-like API and
supports local-first and asynchronous collaborative operations.
\section{Source Code Analysis}
This paper is particularly relevant to the development and evaluation
of the Data Mesher component of the Clan framework, which utilizes
state-based (or value-based) CRDTs for synchronizing distributed data
across peer-to-peer nodes. While Automerge addresses issues pertinent
to op-based CRDTs, the discussion on verification techniques, edge
case handling, and model-checking methodologies provides
cross-cutting insights to the complexities of ops based CRDTs and is
a good argument for using simpler state based CRDTs.
We also conducted a structured source code analysis of all ten VPN
implementations. The analysis followed three phases.
\subsection{Keep CALM and CRDT On}
\subsection{Repository Collection and LLM-Assisted Overview}
The latest main branch of each VPN's git repository was cloned,
together with key dependencies that implement core functionality
outside the main repository. For example, Yggdrasil delegates its
routing and cryptographic operations to the Ironwood library, which
was analyzed alongside the main codebase.
Ten LLM agents (Claude Code) were then spawned in parallel, one per
VPN. Each agent was instructed to read the full source tree and
produce an \texttt{overview.md} file documenting the following
aspects:
\begin{itemize}
\item Wire protocol and message framing
\item Encryption scheme and key exchange
\item Packet handling and performance
\item NAT traversal mechanism
\item Local routing and peer discovery
\item Security features and access control
\item Resilience / Central Point of Failure
\end{itemize}
Each agent was required to reference the specific file and line
range supporting every claim so that outputs could be verified
against the source.
\subsection{Manual Verification}
The LLM-generated overviews served as a navigational aid rather than
a trusted source. The most important code paths identified in each
overview were manually read and verified against the actual source
code. Where the automated summaries were inaccurate or superficial,
they were corrected and expanded.
\subsection{Feature Matrix and Maintainer Review}
The findings from both phases were consolidated into a feature matrix
of 131 features across all ten VPN implementations, covering protocol
characteristics, cryptographic primitives, NAT traversal strategies,
routing behavior, and security properties.
The completed feature matrix was published and sent to the respective
VPN maintainers for review. We incorporated their feedback as
corrections and clarifications to the final classification.
\section{Reproducibility}
The experimental stack pins or declares the variables that could
affect results.
\subsection{Dependency Pinning}
Every external dependency is pinned via \texttt{flake.lock}, which records
cryptographic hashes (\texttt{narHash}) and commit SHAs for each input.
Key pinned inputs include:
\begin{itemize}
\bitem{nixpkgs:} Follows \texttt{clan-core/nixpkgs}, so a single
version is used across the dependency graph
\bitem{clan-core:} The Clan framework, pinned to a specific commit
\bitem{VPN sources:} Hyprspace, EasyTier, Nebula locked to
exact commits
\bitem{Build infrastructure:} flake-parts, treefmt-nix, disko,
nixos-facter-modules
\end{itemize}
Custom packages not in nixpkgs (qperf, VpnCloud, iperf with auth patches,
EasyTier, Hyprspace) are built from source within the flake.
\subsection{Declarative System Configuration}
Each benchmark machine runs NixOS, where the entire operating system is
defined declaratively. There is no imperative package installation or
configuration drift. Given the same NixOS configuration, two machines
will have identical software, services, and kernel parameters.
Machine deployment is atomic: the system either switches to the new
configuration entirely or rolls back.
\subsection{Inventory-Driven Topology}
Clan's inventory system maps machines to service roles declaratively.
For each VPN, the orchestrator writes an inventory entry assigning
machines to roles (e.g., Nebula lighthouse vs.\ peer). The Clan module
system translates this into NixOS configuration; systemd services,
firewall rules, peer lists, and key references. The same inventory
entry always produces the same NixOS configuration.
\subsection{State Isolation}
Before installing a new VPN, the orchestrator deletes all state
directories from previous runs, including VPN-specific directories
(\texttt{/var/lib/zerotier-one}, \texttt{/var/lib/nebula}, etc.) and
benchmark directories. This prevents cross-contamination between tests.
\subsection{Data Provenance}
Results are organized in the four-level directory hierarchy shown in
Figure~\ref{fig:result-tree}. Each VPN directory stores a
\texttt{layout.json} capturing the machine topology used for that run.
Each impairment profile directory records the exact \texttt{tc}
parameters in \texttt{tc\_settings.json} and per-phase durations in
\texttt{timing\_breakdown.json}. Individual benchmark results are
stored in one subdirectory per machine pair.
\begin{figure}[ht]
\centering
\begin{forest}
for tree={
font=\ttfamily\small,
grow'=0,
folder,
s sep=2pt,
inner xsep=3pt,
inner ysep=2pt,
}
[date/
[vpn/
[layout.json]
[profile/
[tc\_settings.json]
[timing\_breakdown.json]
[parallel\_tcp\_iperf3.json]
[\textnormal{\textit{\{pos\}\_\{peer\}}}/
[ping.json]
[tcp\_iperf3.json]
[udp\_iperf3.json]
[qperf.json]
[rist\_stream.json]
[nix\_cache.json]
[connection\_timings.json]
]
]
]
[General/
[hardware.json]
[comparison/
[cross\_profile\_*.json]
[profile/
[benchmark\_stats.json]
[per-benchmark .json files]
]
]
]
]
\end{forest}
\caption{Directory hierarchy of benchmark results. Each run produces
per-VPN and per-profile directories alongside a \texttt{General/}
directory with cross-VPN comparison data.}
\label{fig:result-tree}
\end{figure}
Every benchmark result file uses a uniform JSON envelope with a
\texttt{status} field, a \texttt{data} object holding the
test-specific payload, and a \texttt{meta} object recording
wall-clock duration, number of attempts, VPN restart count and
duration, connectivity wait time, source and target machine names,
and on failure, the relevant service logs.
\section{VPNs Under Test}
VPNs were selected based on:
\begin{itemize}
\bitem{NAT traversal capability:} All selected VPNs can establish
connections between peers behind NAT without manual port forwarding.
\bitem{Decentralization:} Preference for solutions without mandatory
central servers, though coordinated-mesh VPNs were included for comparison.
\bitem{Active development:} Only VPNs with recent commits and
maintained releases were considered (with the exception of VpnCloud).
\bitem{Linux support:} All VPNs must run on Linux.
\end{itemize}
Table~\ref{tab:vpn_selection} lists the ten VPN implementations
selected for evaluation.
\begin{table}[H]
\centering
\caption{VPN implementations included in the benchmark}
\label{tab:vpn_selection}
\begin{tabular}{lll}
\hline
\textbf{VPN} & \textbf{Architecture} & \textbf{Notes} \\
\hline
Tailscale (Headscale) & Coordinated mesh & Open-source
coordination server \\
ZeroTier & Coordinated mesh & Global virtual Ethernet \\
Nebula & Coordinated mesh & Slack's overlay network \\
Tinc & Fully decentralized & Established since 1998 \\
Yggdrasil & Fully decentralized & Spanning-tree routing \\
Mycelium & Fully decentralized & End-to-end encrypted IPv6 overlay \\
Hyprspace & Fully decentralized & libp2p-based, IPFS-compatible \\
EasyTier & Fully decentralized & Rust-based, multi-protocol \\
VpnCloud & Fully decentralized & Lightweight, kernel bypass option \\
WireGuard & Point-to-point & Reference baseline (not a mesh VPN) \\
\hline
Internal (no VPN) & N/A & Baseline for raw network performance \\
\hline
\end{tabular}
\end{table}
WireGuard is not a mesh VPN but is included as a reference point.
Comparing its overhead to the mesh VPNs isolates the cost of mesh
coordination and NAT traversal.
The work by Laddad et al. \cite{laddad_keep_2022} complements and
expands upon concepts presented in the AMC paper. By revisiting the
foundations of CRDTs, the authors address limitations related to
reliance on eventual consistency and propose techniques to
distinguish between safe and unsafe queries using monotonicity
results derived from the CALM Theorem. This inquiry is highly
relevant for the Data Mesher component of Clan, as it delves into
operational and observable consistency guarantees that can optimize
both efficiency and safety in distributed query execution.
Specifically, the insights on query models and coordination-free
approaches advance the understanding of how CRDT-based systems, like
the Data Mesher, manage distributed state effectively without
compromising safety guarantees.

View File

@@ -1,185 +0,0 @@
\chapter{Motivation} % Main chapter title
\label{Motivation}
This thesis emerged from two interconnected research directions.
The initial focus was the Clan deployment framework,
which leverages Nix and NixOS to eliminate
entire classes of errors prevalent in contemporary infrastructure deployment.
By doing so, Clan reduces operational overhead to a degree
where a single administrator can reliably self-host
complex distributed services at scale.
During the development of the Clan framework,
which depends heavily on overlay VPNs for secure peer connectivity,
a recurring challenge became apparent:
practitioners held divergent preferences for mesh VPN solutions,
each citing different edge cases where their chosen VPN
proved unreliable or lacked essential features.
These discussions, however, were largely grounded in anecdotal evidence
rather than systematic evaluation.
This observation revealed a clear need for
rigorous, evidence-based comparison of Peer-to-Peer overlay VPN implementations.
This chapter introduces the Clan project, articulates its fundamental
objectives, outlines the key components, and examines the driving
factors motivating its development.
Peer-to-peer (P2P) technologies and decentralization have undergone
significant growth and evolution in recent years. These technologies
form the backbone of various systems, including P2P Edge
Computing—particularly in the context of the Internet of Things
(IoT)—Content Delivery Networks (CDNs), and Blockchain platforms such
as Ethereum. P2P architectures enable more democratic,
censorship-resistant, and fault-tolerant systems by reducing reliance
on single points of failure \cite{shukla_towards_2021}.
However, to fully realize these benefits, a P2P system must deploy
its nodes across a diverse set of entities. Greater diversity in
hosting increases the networks resilience to censorship and systemic failures.
Despite this, recent trends in Ethereum node hosting reveal a
significant reliance on centralized cloud providers. Notably, Amazon,
Hetzner, and OVH collectively host 70\% of all Ethereum nodes, as
illustrated in Figure \ref{fig:ethernodes_hosting}.
\begin{figure}[H]
\centering
\includegraphics[width=1\textwidth]{Figures/ethernodes_hosting.png}
\caption{Distribution of Ethereum nodes hosted by various providers
\cite{noauthor_isps_nodate}}
\label{fig:ethernodes_hosting}
\end{figure}
The centralized nature of these providers and their domicile within the
same regulatory jurisdiction—the United States—introduces vulnerability.
Such a configuration allows for possible governmental intervention,
which could lead to network shutdowns or manipulation by leveraging
control over these cloud services.
The reliance on cloud-based solutions is largely attributed to their
ease of use and reliability, as self-hosting introduces several
technical and operational challenges, which include:
\begin{itemize}
\item \textbf{NAT Traversal:} Establishing direct connections
between peers located behind Network Address Translation (NAT)
devices is complex and often requires workarounds such as port
forwarding or relay servers.
\item \textbf{Dynamic IP Addresses:} Peers often have non-static
(dynamic) IP addresses assigned by Internet Service Providers
(ISPs), which makes maintaining stable connections difficult
without additional solutions like Dynamic DNS services.
\item \textbf{Data Reliability:} Ensuring data durability and
preventing loss due to hardware failures, system crashes, or
insufficient backup mechanisms can be a challenge for individual
users managing their own infrastructure.
\item \textbf{Security Concerns:} Self-hosted systems must be
protected from malicious actors, including securing data in
transit, authenticating connections, and mitigating attacks such
as Distributed Denial of Service (DDoS).
\item \textbf{Maintenance Overhead:} Regular updates, hardware
repairs, and troubleshooting require time and effort, which may
discourage users unfamiliar with system administration.
\item \textbf{Steep Learning Curve:} Non-technical users face a
high entry barrier, as hosting and configuring their own P2P
nodes often involve understanding complex networking and software
setup processes.
\item \textbf{High Network Churn:} In dynamic P2P environments
where peers frequently join and leave, ensuring consistent
availability of services and maintaining network stability
present additional challenges.
\item \textbf{Uptime and Availability:} Keeping self-hosted systems
online and operational 24/7 can be difficult, especially in
situations of power outages, hardware failures, or limited
internet connectivity.
\end{itemize}
Recognizing this gap, the Clan project aims to address these
challenges by simplifying the process of self-hosting, making it as
straightforward, accessible, and reliable as using a cloud provider.
The project's vision is to empower users to deploy and manage their
own private P2P networks with minimal technical expertise,
significantly lowering the barrier to entry.
As illustrated in Figure \ref{fig:vision-stages}, the proposed
solution includes a user-friendly web interface. This interface
allows users to design and customize their private P2P networks with
just a few clicks. To further simplify the process, the inclusion of
a Large Language Model (LLM) is envisioned to assist users throughout
the network creation process. The LLM would provide contextual
guidance, answer configuration-related queries, and help resolve
potential issues, thus making the system approachable for a wider
audience without requiring advanced technical skills.
\begin{figure}[h!]
\centering
% Row 1
\begin{subfigure}{0.45\textwidth}
\centering
\includegraphics[width=\linewidth]{Figures/vision/stage1.png}
\caption{Stage 1}
\end{subfigure}
\hfill
\begin{subfigure}{0.45\textwidth}
\centering
\includegraphics[width=\linewidth]{Figures/vision/stage2.png}
\caption{Stage 2}
\end{subfigure}
\vspace{1em} % Add spacing between rows
% Row 2
\begin{subfigure}{0.45\textwidth}
\centering
\includegraphics[width=\linewidth]{Figures/vision/stage3.png}
\caption{Stage 3}
\end{subfigure}
\hfill
\begin{subfigure}{0.45\textwidth}
\centering
\includegraphics[width=\linewidth]{Figures/vision/stage4.png}
\caption{Stage 4}
\end{subfigure}
\vspace{1em} % Add spacing between rows
% Row 3
\begin{subfigure}{0.45\textwidth}
\centering
\includegraphics[width=\linewidth]{Figures/vision/stage5.png}
\caption{Stage 5}
\end{subfigure}
\hfill
\begin{subfigure}{0.45\textwidth}
\centering
\includegraphics[width=\linewidth]{Figures/vision/stage6.png}
\caption{Stage 6}
\end{subfigure}
\vspace{1em} % Add spacing between rows
% Row 4
\begin{subfigure}{0.45\textwidth}
\centering
\includegraphics[width=\linewidth]{Figures/vision/stage7.png}
\caption{Stage 7}
\end{subfigure}
\hfill
\begin{subfigure}{0.45\textwidth}
\centering
\includegraphics[width=\linewidth]{Figures/vision/stage8.png}
\caption{Stage 8}
\end{subfigure}
\caption{Visionary Webinterface to Setup a Clan Family Network}
\label{fig:vision-stages}
\end{figure}

885
Chapters/Results.tex Normal file
View File

@@ -0,0 +1,885 @@
% Chapter Template
\chapter{Results} % Main chapter title
\label{Results}
This chapter presents the results of the benchmark suite across all
ten VPN implementations and the internal baseline. The structure
follows the impairment profiles from ideal to degraded:
Section~\ref{sec:baseline} establishes overhead under ideal
conditions, then subsequent sections examine how each VPN responds to
increasing network impairment. The chapter concludes with findings
from the source code analysis. A recurring theme is that no single
metric captures VPN
performance; the rankings shift
depending on whether one measures throughput, latency, retransmit
behavior, or real-world application performance.
\section{Baseline Performance}
\label{sec:baseline}
The baseline impairment profile introduces no artificial loss or
reordering, so any performance gap between VPNs can be attributed to
the VPN itself. Throughout the plots in this section, the
\emph{internal} bar marks a direct host-to-host connection with no VPN
in the path; it represents the best the hardware can do. On its own,
this link delivers 934\,Mbps on a single TCP stream and a round-trip
latency of just
0.60\,ms. WireGuard comes remarkably close to these numbers, reaching
92.5\,\% of bare-metal throughput with only a single retransmit across
an entire 30-second test. Mycelium sits at the other extreme, adding
34.9\,ms of latency, roughly 58$\times$ the bare-metal figure.
\subsection{Test Execution Overview}
Running the full baseline suite across all ten VPNs and the internal
reference took just over four hours. The bulk of that time, about
2.6~hours (63\,\%), was spent on actual benchmark execution; VPN
installation and deployment accounted for another 45~minutes (19\,\%),
and roughly 21~minutes (9\,\%) went to waiting for VPN tunnels to come
up after restarts. The remaining time was consumed by VPN service restarts
and traffic-control (tc) stabilization.
Figure~\ref{fig:test_duration} breaks this down per VPN.
Most VPNs completed every benchmark without issues, but four failed
one test each: Nebula and Headscale timed out on the qperf
QUIC performance benchmark after six retries, while Hyprspace and
Mycelium failed the UDP iPerf3 test
with a 120-second timeout. Their individual success rate is
85.7\,\%, with all other VPNs passing the full suite
(Figure~\ref{fig:success_rate}).
\begin{figure}[H]
\centering
\begin{subfigure}[t]{1.0\textwidth}
\centering
\includegraphics[width=\textwidth]{{Figures/baseline/Average Test
Duration per Machine}.png}
\caption{Average test duration per VPN, including installation
time and benchmark execution}
\label{fig:test_duration}
\end{subfigure}
\vspace{1em}
\begin{subfigure}[t]{1.0\textwidth}
\centering
\includegraphics[width=\textwidth]{{Figures/baseline/Benchmark
Success Rate}.png}
\caption{Benchmark success rate across all seven tests}
\label{fig:success_rate}
\end{subfigure}
\caption{Test execution overview. Hyprspace has the longest average
duration due to UDP timeouts and long VPN connectivity
waits. WireGuard completes fastest. Nebula, Headscale,
Hyprspace, and Mycelium each fail one benchmark.}
\label{fig:test_overview}
\end{figure}
\subsection{TCP Throughput}
Each VPN ran a single-stream iPerf3 session for 30~seconds on every
link direction (lom$\rightarrow$yuki, yuki$\rightarrow$luna,
luna$\rightarrow$lom); Table~\ref{tab:tcp_baseline} shows the
averages. Three distinct performance tiers emerge, separated by
natural gaps in the data.
\begin{table}[H]
\centering
\caption{Single-stream TCP throughput at baseline, sorted by
throughput. Retransmits are averaged per 30-second test across
all three link directions. The horizontal rules separate the
three performance tiers.}
\label{tab:tcp_baseline}
\begin{tabular}{lrrr}
\hline
\textbf{VPN} & \textbf{Throughput (Mbps)} &
\textbf{Baseline (\%)} & \textbf{Retransmits} \\
\hline
Internal & 934 & 100.0 & 1.7 \\
WireGuard & 864 & 92.5 & 1 \\
ZeroTier & 814 & 87.2 & 1163 \\
Headscale & 800 & 85.6 & 102 \\
Yggdrasil & 795 & 85.1 & 75 \\
\hline
Nebula & 706 & 75.6 & 955 \\
EasyTier & 636 & 68.1 & 537 \\
VpnCloud & 539 & 57.7 & 857 \\
\hline
Hyprspace & 368 & 39.4 & 4965 \\
Tinc & 336 & 36.0 & 240 \\
Mycelium & 259 & 27.7 & 710 \\
\hline
\end{tabular}
\end{table}
The top tier ($>$80\,\% of baseline) groups WireGuard, ZeroTier,
Headscale, and Yggdrasil, all within 15\,\% of the bare-metal link.
A middle tier (55--80\,\%) follows with Nebula, EasyTier, and
VpnCloud, while Hyprspace, Tinc, and Mycelium occupy the bottom tier
at under 40\,\% of baseline.
Figure~\ref{fig:tcp_throughput} visualizes this hierarchy.
Raw throughput alone is incomplete, however. The retransmit column
reveals that not all high-throughput VPNs get there cleanly.
ZeroTier, for instance, reaches 814\,Mbps but accumulates
1\,163~retransmits per test, over 1\,000$\times$ what WireGuard
needs. ZeroTier compensates for tunnel-internal packet loss by
repeatedly triggering TCP congestion-control recovery, whereas
WireGuard sends data once and it arrives. Across all VPNs,
retransmit behaviour falls into three groups: \emph{clean} ($<$110:
WireGuard, Internal, Yggdrasil, Headscale), \emph{stressed}
(200--900: Tinc, EasyTier, Mycelium, VpnCloud), and
\emph{pathological} ($>$950: Nebula, ZeroTier, Hyprspace).
% TODO: Is this naming scheme any good?
% TODO: Fix TCP Throughput plot
\begin{figure}[H]
\centering
\begin{subfigure}[t]{\textwidth}
\centering
\includegraphics[width=\textwidth]{{Figures/baseline/tcp/TCP
Throughput}.png}
\caption{Average single-stream TCP throughput}
\label{fig:tcp_throughput}
\end{subfigure}
\vspace{1em}
\begin{subfigure}[t]{\textwidth}
\centering
\includegraphics[width=\textwidth]{{Figures/baseline/tcp/TCP
Retransmit Rate}.png}
\caption{Average TCP retransmits per 30-second test (log scale)}
\label{fig:tcp_retransmits}
\end{subfigure}
\caption{TCP throughput and retransmit rate at baseline. WireGuard
leads at 864\,Mbps with 1 retransmit. Hyprspace has nearly 5000
retransmits per test. The retransmit count does not always track
inversely with throughput: ZeroTier achieves high throughput
\emph{despite} high retransmits.}
\label{fig:tcp_results}
\end{figure}
Retransmits have a direct mechanical relationship with TCP congestion
control. Each retransmit triggers a reduction in the congestion window
(\texttt{cwnd}), throttling the sender. This relationship is visible
in Figure~\ref{fig:retransmit_correlations}: Hyprspace, with 4965
retransmits, maintains the smallest average congestion window in the
dataset (205\,KB), while Yggdrasil's 75 retransmits allow a 4.3\,MB
window, the largest of any VPN. At first glance this suggests a
clean inverse correlation between retransmits and congestion window
size, but the picture is misleading. Yggdrasil's outsized window is
largely an artifact of its jumbo overlay MTU (32\,731 bytes): each
segment carries far more data, so the window in bytes is inflated
relative to VPNs using a standard ${\sim}$1\,400-byte MTU. Comparing
congestion windows across different MTU sizes is not meaningful
without normalizing for segment size. What \emph{is} clear is that
high retransmit rates force TCP to spend more time in congestion
recovery than in steady-state transmission, capping throughput
regardless of available bandwidth. ZeroTier illustrates the
opposite extreme: brute-force retransmission can still yield high
throughput (814\,Mbps with 1\,163 retransmits), at the cost of wasted
bandwidth and unstable flow behavior.
VpnCloud stands out: its sender reports 538.8\,Mbps
but the receiver measures only 413.4\,Mbps, leaving a 23\,\% gap (the largest
in the dataset). This suggests significant in-tunnel packet loss or
buffering at the VpnCloud layer that the retransmit count (857)
alone does not fully explain.
Run-to-run variability also differs substantially. WireGuard ranges
from 824 to 884\,Mbps (a 60\,Mbps window), while Mycelium ranges
from 122 to 379\,Mbps, a 3:1 ratio between worst and best runs. A
VPN with wide variance is harder to capacity-plan around than one
with consistent performance, even if the average is lower.
\begin{figure}[H]
\centering
\begin{subfigure}[t]{\textwidth}
\centering
\includegraphics[width=\textwidth]{Figures/baseline/retransmits-vs-throughput.png}
\caption{Retransmits vs.\ throughput}
\label{fig:retransmit_throughput}
\end{subfigure}
\vspace{1em}
\begin{subfigure}[t]{\textwidth}
\centering
\includegraphics[width=\textwidth]{Figures/baseline/retransmits-vs-max-congestion-window.png}
\caption{Retransmits vs.\ max congestion window}
\label{fig:retransmit_cwnd}
\end{subfigure}
\caption{Retransmit correlations (log scale on x-axis). High
retransmits do not always mean low throughput (ZeroTier: 1\,163
retransmits, 814\,Mbps), but extreme retransmits do (Hyprspace:
4\,965 retransmits, 368\,Mbps). The apparent inverse correlation
between retransmits and congestion window size is dominated by
Yggdrasil's outlier (4.3\,MB \texttt{cwnd}), which is inflated
by its 32\,KB jumbo overlay MTU rather than by low retransmits
alone.}
\label{fig:retransmit_correlations}
\end{figure}
\subsection{Latency}
Sorting by latency rearranges the rankings considerably.
Table~\ref{tab:latency_baseline} lists the average ping round-trip
times, which cluster into three distinct ranges.
\begin{table}[H]
\centering
\caption{Average ping RTT at baseline, sorted by latency}
\label{tab:latency_baseline}
\begin{tabular}{lr}
\hline
\textbf{VPN} & \textbf{Avg RTT (ms)} \\
\hline
Internal & 0.60 \\
VpnCloud & 1.13 \\
Tinc & 1.19 \\
WireGuard & 1.20 \\
Nebula & 1.25 \\
ZeroTier & 1.28 \\
EasyTier & 1.33 \\
\hline
Headscale & 1.64 \\
Hyprspace & 1.79 \\
Yggdrasil & 2.20 \\
\hline
Mycelium & 34.9 \\
\hline
\end{tabular}
\end{table}
Six VPNs stay below 1.3\,ms, comfortably close to the bare-metal
0.60\,ms. VpnCloud posts the lowest latency of any VPN (1.13\,ms), below
WireGuard (1.20\,ms), yet its throughput tops out at only 539\,Mbps.
Low per-packet latency does not guarantee high bulk throughput. A
second group (Headscale,
Hyprspace, Yggdrasil) lands in the 1.5--2.2\,ms range, representing
moderate overhead. Then there is Mycelium at 34.9\,ms, so far
removed from the rest that Section~\ref{sec:mycelium_routing} gives
it a dedicated analysis.
ZeroTier's average of 1.28\,ms looks unremarkable, but its maximum
RTT spikes to 8.6\,ms, a 6.8$\times$ jump and the largest for any
sub-2\,ms VPN. These spikes point to periodic control-plane
interference that the average hides.
\begin{figure}[H]
\centering
\includegraphics[width=\textwidth]{{Figures/baseline/ping/Average RTT}.png}
\caption{Average ping RTT at baseline. Mycelium (34.9\,ms) is a
massive outlier at 58$\times$ the internal baseline. VpnCloud is
the fastest VPN at 1.13\,ms, slightly below WireGuard (1.20\,ms).}
\label{fig:ping_rtt}
\end{figure}
Tinc presents a paradox: it has the third-lowest latency (1.19\,ms)
but only the second-lowest throughput (336\,Mbps). Packets traverse
the tunnel quickly, yet single-threaded userspace processing cannot
keep up with the link speed. The qperf benchmark backs this up: Tinc
maxes out at
14.9\,\% CPU while delivering just 336\,Mbps, a clear sign that
the CPU, not the network, is the bottleneck.
Figure~\ref{fig:latency_throughput} makes this disconnect easy to
spot.
The qperf measurements also reveal a wide spread in CPU usage.
Hyprspace (55.1\,\%) and Yggdrasil
(52.8\,\%) consume 5--6$\times$ as much CPU as Internal's
9.7\,\%. WireGuard sits at 30.8\,\%, surprisingly high for a
kernel-level implementation, though much of that goes to
cryptographic processing. On the efficient end, VpnCloud
(14.9\,\%), Tinc (14.9\,\%), and EasyTier (15.4\,\%) do the most
with the least CPU time. Nebula and Headscale are missing from
this comparison because qperf failed for both.
%TODO: Explain why they consistently failed
\begin{figure}[H]
\centering
\includegraphics[width=\textwidth]{Figures/baseline/latency-vs-throughput.png}
\caption{Latency vs.\ throughput at baseline. Each point represents
one VPN. The quadrants reveal different bottleneck types:
VpnCloud (low latency, moderate throughput), Tinc (low latency,
low throughput, CPU-bound), Mycelium (high latency, low
throughput, overlay routing overhead).}
\label{fig:latency_throughput}
\end{figure}
\subsection{Parallel TCP Scaling}
The single-stream benchmark tests one link direction at a time. The
parallel benchmark changes this setup: all three link directions
(lom$\rightarrow$yuki, yuki$\rightarrow$luna,
luna$\rightarrow$lom) run simultaneously in a circular pattern for
60~seconds, each carrying one bidirectional TCP stream (six
unidirectional flows in total). Because three independent
link pairs now compete for shared tunnel resources at once, the
aggregate throughput is naturally higher than any single direction
alone, which is why even Internal reaches 1.50$\times$ its
single-stream figure. The scaling factor (parallel throughput
divided by single-stream throughput) captures two effects:
the benefit of using multiple link pairs in parallel, and how
well the VPN handles the resulting contention.
Table~\ref{tab:parallel_scaling} lists the results.
\begin{table}[H]
\centering
\caption{Parallel TCP scaling at baseline. Scaling factor is the
ratio of parallel to single-stream throughput. Internal's
1.50$\times$ represents the expected scaling on this hardware.}
\label{tab:parallel_scaling}
\begin{tabular}{lrrr}
\hline
\textbf{VPN} & \textbf{Single (Mbps)} &
\textbf{Parallel (Mbps)} & \textbf{Scaling} \\
\hline
Mycelium & 259 & 569 & 2.20$\times$ \\
Hyprspace & 368 & 803 & 2.18$\times$ \\
Tinc & 336 & 563 & 1.68$\times$ \\
Yggdrasil & 795 & 1265 & 1.59$\times$ \\
Headscale & 800 & 1228 & 1.54$\times$ \\
Internal & 934 & 1398 & 1.50$\times$ \\
ZeroTier & 814 & 1206 & 1.48$\times$ \\
WireGuard & 864 & 1281 & 1.48$\times$ \\
EasyTier & 636 & 927 & 1.46$\times$ \\
VpnCloud & 539 & 763 & 1.42$\times$ \\
Nebula & 706 & 648 & 0.92$\times$ \\
\hline
\end{tabular}
\end{table}
The VPNs that gain the most are those most constrained in
single-stream mode. Mycelium's 34.9\,ms RTT means a lone TCP stream
can never fill the pipe: the bandwidth-delay product demands a window
larger than any single flow maintains, so multiple concurrent flows
compensate for that constraint and push throughput to 2.20$\times$
the single-stream figure. Hyprspace scales almost as well
(2.18$\times$) but for a
different reason: multiple streams work around the buffer bloat that
cripples any individual flow
(Section~\ref{sec:hyprspace_bloat}). Tinc picks up a
1.68$\times$ boost because several streams can collectively keep its
single-threaded CPU busy during what would otherwise be idle gaps in
a single flow.
WireGuard and Internal both scale cleanly at around
1.48--1.50$\times$ with zero retransmits, suggesting that
WireGuard's overhead is a fixed per-packet cost that does not worsen
under multiplexing.
Nebula is the only VPN that actually gets \emph{slower} with more
streams: throughput drops from 706\,Mbps to 648\,Mbps
(0.92$\times$) while retransmits jump from 955 to 2\,462. The ten
streams are clearly fighting each other for resources inside the
tunnel.
More streams also amplify existing retransmit problems. Hyprspace
climbs from 4\,965 to 17\,426~retransmits;
VpnCloud from 857 to 6\,023. VPNs that were clean in single-stream
mode stay clean under load, while the stressed ones only get worse.
\begin{figure}[H]
\centering
\begin{subfigure}[t]{\textwidth}
\centering
\includegraphics[width=\textwidth]{Figures/baseline/single-stream-vs-parallel-tcp-throughput.png}
\caption{Single-stream vs.\ parallel throughput}
\label{fig:single_vs_parallel}
\end{subfigure}
\vspace{1em}
\begin{subfigure}[t]{\textwidth}
\centering
\includegraphics[width=\textwidth]{Figures/baseline/parallel-tcp-scaling-factor.png}
\caption{Parallel TCP scaling factor}
\label{fig:scaling_factor}
\end{subfigure}
\caption{Parallel TCP scaling at baseline. Nebula is the only VPN
where parallel throughput is lower than single-stream
(0.92$\times$). Mycelium and Hyprspace benefit most from
parallelism ($>$2$\times$), compensating for latency and buffer
bloat respectively. The dashed line at 1.0$\times$ marks the
break-even point.}
\label{fig:parallel_tcp}
\end{figure}
\subsection{UDP Stress Test}
The UDP iPerf3 test uses unlimited sender rate (\texttt{-b 0}),
which is a deliberate overload test rather than a realistic workload.
The sender throughput values are artifacts: they reflect how fast the
sender can write to the socket, not how fast data traverses the
tunnel. Yggdrasil, for example, reports 63,744\,Mbps sender
throughput because it uses a 32,731-byte block size (a jumbo-frame
overlay MTU), inflating the apparent rate per \texttt{send()} system
call. Only the receiver throughput is meaningful.
\begin{table}[H]
\centering
\caption{UDP receiver throughput and packet loss at baseline
(\texttt{-b 0} stress test). Hyprspace and Mycelium timed out
at 120 seconds and are excluded.}
\label{tab:udp_baseline}
\begin{tabular}{lrr}
\hline
\textbf{VPN} & \textbf{Receiver (Mbps)} &
\textbf{Loss (\%)} \\
\hline
Internal & 952 & 0.0 \\
WireGuard & 898 & 0.0 \\
Nebula & 890 & 76.2 \\
Headscale & 876 & 69.8 \\
EasyTier & 865 & 78.3 \\
Yggdrasil & 852 & 98.7 \\
ZeroTier & 851 & 89.5 \\
VpnCloud & 773 & 83.7 \\
Tinc & 471 & 89.9 \\
\hline
\end{tabular}
\end{table}
%TODO: Explain that the UDP test also crashes often,
% which makes the test somewhat unreliable
% but a good indicator if the network traffic is "different" then
% the programmer expected
Only Internal and WireGuard achieve 0\,\% packet loss. Both operate at
the kernel level with proper backpressure that matches sender to
receiver rate. Every userspace VPN shows massive loss (69--99\%)
because the sender overwhelms the tunnel's processing capacity.
Yggdrasil's 98.7\% loss is the most extreme: it sends the most data
(due to its large block size) but loses almost all of it. These loss
rates do not reflect real-world UDP behavior but reveal which VPNs
implement effective flow control. Hyprspace and Mycelium could not
complete the UDP test at all, timing out after 120 seconds.
The \texttt{blksize\_bytes} field reveals each VPN's effective path
MTU: Yggdrasil at 32,731 bytes (jumbo overlay), ZeroTier at 2728,
Internal at 1448, VpnCloud at 1375, WireGuard at 1368, Tinc at 1353,
EasyTier at 1288, Nebula at 1228, and Headscale at 1208 (the
smallest). These differences affect fragmentation behavior under real
workloads, particularly for protocols that send large datagrams.
%TODO: Mention QUIC
%TODO: Mention again that the "default" settings of every VPN have been used
% to better reflect real world use, as most users probably won't
% change these defaults
% and explain that good defaults are as much a part of good software as
% having the features but they are hard to configure correctly
\begin{figure}[H]
\centering
\begin{subfigure}[t]{\textwidth}
\centering
\includegraphics[width=\textwidth]{{Figures/baseline/udp/UDP
Throughput}.png}
\caption{UDP receiver throughput}
\label{fig:udp_throughput}
\end{subfigure}
\vspace{1em}
\begin{subfigure}[t]{\textwidth}
\centering
\includegraphics[width=\textwidth]{{Figures/baseline/udp/UDP
Packet Loss}.png}
\caption{UDP packet loss}
\label{fig:udp_loss}
\end{subfigure}
\caption{UDP stress test results at baseline (\texttt{-b 0},
unlimited sender rate). Internal and WireGuard are the only
implementations with 0\% loss. Hyprspace and Mycelium are
excluded due to 120-second timeouts.}
\label{fig:udp_results}
\end{figure}
% TODO: Compare parallel TCP retransmit rate
% with single TCP retransmit rate and see what changed
\subsection{Real-World Workloads}
Saturating a link with iPerf3 measures peak capacity, but not how a
VPN performs under realistic traffic. This subsection switches to
application-level workloads: downloading packages from a Nix binary
cache and streaming video over RIST. Both interact with the VPN
tunnel the way real software does, through many short-lived
connections, TLS handshakes, and latency-sensitive UDP packets.
\paragraph{Nix Binary Cache Downloads.}
This test downloads a fixed set of Nix packages through each VPN and
measures the total transfer time. The results
(Table~\ref{tab:nix_cache}) compress the throughput hierarchy
considerably: even Hyprspace, the worst performer, finishes in
11.92\,s, only 40\,\% slower than bare metal. Once connection
setup, TLS handshakes, and HTTP round-trips enter the picture,
throughput differences between 500 and 900\,Mbps matter far less
than per-connection latency.
\begin{table}[H]
\centering
\caption{Nix binary cache download time at baseline, sorted by
duration. Overhead is relative to the internal baseline (8.53\,s).}
\label{tab:nix_cache}
\begin{tabular}{lrr}
\hline
\textbf{VPN} & \textbf{Mean (s)} &
\textbf{Overhead (\%)} \\
\hline
Internal & 8.53 & -- \\
Nebula & 9.15 & +7.3 \\
ZeroTier & 9.22 & +8.1 \\
VpnCloud & 9.39 & +10.0 \\
EasyTier & 9.39 & +10.1 \\
WireGuard & 9.45 & +10.8 \\
Headscale & 9.79 & +14.8 \\
Tinc & 10.00 & +17.2 \\
Mycelium & 10.07 & +18.1 \\
Yggdrasil & 10.59 & +24.2 \\
Hyprspace & 11.92 & +39.7 \\
\hline
\end{tabular}
\end{table}
Several rankings invert relative to raw throughput. ZeroTier
finishes faster than WireGuard (9.22\,s vs.\ 9.45\,s) despite
30\,\% fewer raw Mbps and 1\,000$\times$ more retransmits. Yggdrasil
is the clearest example: it has the
third-highest throughput at 795\,Mbps, yet lands at 24\,\% overhead
because its
2.2\,ms latency adds up over the many small sequential HTTP requests
that constitute a Nix cache download.
Figure~\ref{fig:throughput_vs_download} confirms this weak link
between raw throughput and real-world download speed.
\begin{figure}[H]
\centering
\begin{subfigure}[t]{\textwidth}
\centering
\includegraphics[width=\textwidth]{{Figures/baseline/Nix Cache
Mean Download Time}.png}
\caption{Nix cache download time per VPN}
\label{fig:nix_cache}
\end{subfigure}
\vspace{1em}
\begin{subfigure}[t]{\textwidth}
\centering
\includegraphics[width=\textwidth]{Figures/baseline/raw-throughput-vs-nix-cache-download-time.png}
\caption{Raw throughput vs.\ download time}
\label{fig:throughput_vs_download}
\end{subfigure}
\caption{Application-level download performance. The throughput
hierarchy compresses under real HTTP workloads: the worst VPN
(Hyprspace, 11.92\,s) is only 40\% slower than bare metal.
Throughput explains some variance but not all: Yggdrasil
(795\,Mbps, 10.59\,s) is slower than Nebula (706\,Mbps, 9.15\,s)
because latency matters more for HTTP workloads.}
\label{fig:nix_download}
\end{figure}
\paragraph{Video Streaming (RIST).}
At just 3.3\,Mbps, the RIST video stream sits comfortably within
every VPN's throughput budget. This test therefore measures
something different: how well the VPN handles real-time UDP packet
delivery under steady load. Nine of the eleven VPNs pass without
incident, delivering 100\,\% video quality. The 14--16 dropped
frames that appear uniformly across all VPNs, including Internal,
trace back to encoder warm-up rather than tunnel overhead.
Headscale is the exception. It averages just 13.1\,\% quality,
dropping 288~packets per test interval. The degradation is not
bursty but sustained: median quality sits at 10\,\%, and the
interquartile range of dropped packets spans a narrow 255--330 band.
The qperf benchmark independently corroborates this, having failed
outright for Headscale, confirming that something beyond bulk TCP is
broken.
What makes this failure unexpected is that Headscale builds on
WireGuard, which handles video flawlessly. TCP throughput places
Headscale squarely in Tier~1. Yet the RIST test runs over UDP, and
qperf probes latency-sensitive paths using both TCP and UDP. The
pattern points toward Headscale's DERP relay or NAT traversal layer
as the source. Its effective path MTU of 1\,208~bytes, the smallest
of any VPN, likely compounds the issue: RIST packets that exceed
this limit must be fragmented, and reassembling fragments under
sustained load produces exactly the kind of steady, uniform packet
drops the data shows. For video conferencing, VoIP, or any
real-time media workload, this is a disqualifying result regardless
of TCP throughput.
Hyprspace reveals a different failure mode. Its average quality
reads 100\,\%, but the raw numbers underneath are far from stable:
mean packet drops of 1\,194 and a maximum spike of 55\,500, with
the 25th, 50th, and 75th percentiles all at zero. Hyprspace
alternates between perfect delivery and catastrophic bursts.
RIST's forward error correction compensates for most of these
events, but the worst spikes are severe enough to overwhelm FEC
entirely.
\begin{figure}[H]
\centering
\includegraphics[width=\textwidth]{{Figures/baseline/Video
Streaming/RIST Quality}.png}
\caption{RIST video streaming quality at baseline. Headscale at
13.1\% average quality is the clear outlier. Every other VPN
achieves 99.8\% or higher. Nebula is at 99.8\% (minor
degradation). The video bitrate (3.3\,Mbps) is well within every
VPN's throughput capacity, so this test reveals real-time UDP
handling quality rather than bandwidth limits.}
\label{fig:rist_quality}
\end{figure}
\subsection{Operational Resilience}
Sustained-load performance does not predict recovery speed. How
quickly a tunnel comes up after a reboot, and how reliably it
reconverges, matters as much as peak throughput for operational use.
First-time connectivity spans a wide range. Headscale and WireGuard
are ready in under 50\,ms, while ZeroTier (8--17\,s) and VpnCloud
(10--14\,s) spend seconds negotiating with their control planes
before passing traffic.
%TODO: Maybe we want to scrap first-time connectivity
Reboot reconnection rearranges the rankings. Hyprspace, the worst
performer under sustained TCP load, recovers in just 8.7~seconds on
average, faster than any other VPN. WireGuard and Nebula follow at
10.1\,s each. Nebula's consistency is striking: 10.06, 10.06,
10.07\,s across its three nodes, pointing to a hard-coded timer
rather than topology-dependent convergence.
Mycelium sits at the opposite end, needing 76.6~seconds and showing
the same suspiciously uniform pattern (75.7, 75.7, 78.3\,s),
suggesting a fixed protocol-level wait built into the overlay.
%TODO: Hard coded timer needs to be verified
Yggdrasil produces the most lopsided result in the dataset: its yuki
node is back in 7.1~seconds while lom and luna take 94.8 and
97.3~seconds respectively. The gap likely reflects the overlay's
spanning-tree rebuild: a node near the root of the tree reconverges
quickly, while one further out has to wait for the topology to
propagate.
%TODO: Needs clarifications what is a "spanning tree build"
\begin{figure}[H]
\centering
\begin{subfigure}[t]{\textwidth}
\centering
\includegraphics[width=\textwidth]{Figures/baseline/reboot-reconnection-time-per-vpn.png}
\caption{Average reconnection time per VPN}
\label{fig:reboot_bar}
\end{subfigure}
\vspace{1em}
\begin{subfigure}[t]{\textwidth}
\centering
\includegraphics[width=\textwidth]{Figures/baseline/reboot-reconnection-time-heatmap.png}
\caption{Per-node reconnection time heatmap}
\label{fig:reboot_heatmap}
\end{subfigure}
\caption{Reboot reconnection time at baseline. The heatmap reveals
Yggdrasil's extreme per-node asymmetry (7\,s for yuki vs.\
95--97\,s for lom/luna) and Mycelium's uniform slowness (75--78\,s
across all nodes). Hyprspace reconnects fastest (8.7\,s average)
despite its poor sustained-load performance.}
\label{fig:reboot_reconnection}
\end{figure}
\subsection{Pathological Cases}
\label{sec:pathological}
Three VPNs exhibit behaviors that the aggregate numbers alone cannot
explain. The following subsections piece together observations from
earlier benchmarks into per-VPN diagnoses.
\paragraph{Hyprspace: Buffer Bloat.}
\label{sec:hyprspace_bloat}
Hyprspace produces the most severe performance collapse in the
dataset. At idle, its ping latency is a modest 1.79\,ms.
Under TCP load, that number balloons to roughly 2\,800\,ms, a
1\,556$\times$ increase. This is not the network becoming
congested; it is the VPN tunnel itself filling up with buffered
packets and refusing to drain.
The consequences ripple through every TCP metric. With 4\,965
retransmits per 30-second test (one in every 200~segments), TCP
spends most of its time in congestion recovery rather than
steady-state transfer, shrinking the average congestion window to
205\,KB, the smallest in the dataset. Under parallel load the
situation worsens: retransmits climb to 17\,426. The buffering even
inverts iPerf3's measurements: the receiver reports 419.8\,Mbps
while the sender sees only 367.9\,Mbps, because massive ACK delays
cause the sender-side timer to undercount the actual data rate. The
UDP test never finished at all, timing out at 120~seconds.
% Should we always use percentages for retransmits?
What prevents Hyprspace from being entirely unusable is everything
\emph{except} sustained load. It has the fastest reboot
reconnection in the dataset (8.7\,s) and delivers 100\,\% video
quality outside of its burst events. The pathology is narrow but
severe: any continuous data stream saturates the tunnel's internal
buffers.
\paragraph{Mycelium: Routing Anomaly.}
\label{sec:mycelium_routing}
Mycelium's 34.9\,ms average latency appears to be the cost of
routing through a global overlay. The per-path numbers, however,
reveal a bimodal distribution:
\begin{itemize}
\bitem{luna$\rightarrow$lom:} 1.63\,ms (direct path, comparable
to Headscale at 1.64\,ms)
\bitem{lom$\rightarrow$yuki:} 51.47\,ms (overlay-routed)
\bitem{yuki$\rightarrow$luna:} 51.60\,ms (overlay-routed)
\end{itemize}
One of the three links has found a direct route; the other two still
bounce through the overlay. All three machines sit on the same
physical network, so Mycelium's path discovery is failing
intermittently, a more specific problem than blanket overlay
overhead. Throughput mirrors the split:
yuki$\rightarrow$luna reaches 379\,Mbps while
luna$\rightarrow$lom manages only 122\,Mbps, a 3:1 gap. In
bidirectional mode, the reverse direction on that worst link drops
to 58.4\,Mbps, the lowest single-direction figure in the entire
dataset.
\begin{figure}[H]
\centering
\includegraphics[width=\textwidth]{{Figures/baseline/tcp/Mycelium/Average
Throughput}.png}
\caption{Per-link TCP throughput for Mycelium, showing extreme
path asymmetry caused by inconsistent direct route discovery.
The 3:1 ratio between best (yuki$\rightarrow$luna, 379\,Mbps)
and worst (luna$\rightarrow$lom, 122\,Mbps) links reflects
different overlay routing paths.}
\label{fig:mycelium_paths}
\end{figure}
The overlay penalty shows up most clearly at connection setup.
Mycelium's average time-to-first-byte is 93.7\,ms (vs.\ Internal's
16.8\,ms, a 5.6$\times$ overhead), and connection establishment
alone costs 47.3\,ms (3$\times$ overhead). Every new connection
incurs that overhead, so workloads dominated by
short-lived connections accumulate it rapidly. Bulk downloads, by
contrast, amortize it: the Nix cache test finishes only 18\,\%
slower than Internal (10.07\,s vs.\ 8.53\,s) because once the
transfer phase begins, per-connection latency fades into the
background.
Mycelium is also the slowest VPN to recover from a reboot:
76.6~seconds on average, and almost suspiciously uniform across
nodes (75.7, 75.7, 78.3\,s). That kind of consistency points to a
hard-coded convergence timer in the overlay protocol rather than
anything topology-dependent. The UDP test timed out at
120~seconds, and even first-time connectivity required a
70-second wait at startup.
% Explain what topology-dependent means in this case.
\paragraph{Tinc: Userspace Processing Bottleneck.}
Tinc is a clear case of a CPU bottleneck masquerading as a network
problem. At 1.19\,ms latency, packets get through the
tunnel quickly. Yet throughput tops out at 336\,Mbps, barely a
third of the bare-metal link. The usual suspects do not apply:
Tinc's path MTU is a healthy 1\,500~bytes
(\texttt{blksize\_bytes} of 1\,353 from UDP iPerf3, comparable to
VpnCloud at 1\,375 and WireGuard at 1\,368), and its retransmit
count (240) is moderate. What limits Tinc is its single-threaded
userspace architecture: one CPU core simply cannot encrypt, copy,
and forward packets fast enough to fill the pipe.
The parallel benchmark confirms this diagnosis. Tinc scales to
563\,Mbps (1.68$\times$), beating Internal's 1.50$\times$ ratio.
Multiple TCP streams collectively keep that single core busy during
what would otherwise be idle gaps in any individual flow, squeezing
out throughput that no single stream could reach alone.
\section{Impact of Network Impairment}
This section examines how each VPN responds to the Low, Medium, and
High impairment profiles defined in Chapter~\ref{Methodology}.
\subsection{Ping}
% RTT and packet loss across impairment profiles.
\subsection{TCP Throughput}
% TCP iperf3: throughput, retransmits, congestion window.
\subsection{UDP Throughput}
% UDP iperf3: throughput, jitter, packet loss.
\subsection{Parallel TCP}
% Parallel iperf3: throughput under contention (A->B, B->C, C->A).
\subsection{QUIC Performance}
% qperf: bandwidth, TTFB, connection establishment time.
\subsection{Video Streaming}
% RIST: bitrate, dropped frames, packets recovered, quality score.
\subsection{Application-Level Download}
% Nix cache: download duration for Firefox package.
\section{Tailscale Under Degraded Conditions}
% The central finding: Tailscale outperforming the raw Linux
% networking stack under impairment.
\subsection{Observed Anomaly}
% Present the data showing Tailscale exceeding internal baseline
% throughput under Medium/High impairment.
\subsection{Congestion Control Analysis}
% Reno vs CUBIC, RACK disabled to avoid spurious retransmits
% under reordering.
\subsection{Tuned Kernel Parameters}
% Re-run results with tuned buffer sizes and congestion control
% on the internal baseline, showing the gap closes.
\section{Source Code Analysis}
\subsection{Feature Matrix Overview}
% Summary of the 131-feature matrix across all ten VPNs.
% Highlight key architectural differences that explain
% performance results.
\subsection{Security Vulnerabilities}
% Vulnerabilities discovered during source code review.
\section{Summary of Findings}
% Brief summary table or ranking of VPNs by key metrics.
% Save deeper interpretation for a Discussion chapter.

View File

@@ -0,0 +1,36 @@
%----------------------------------------------------------------------------------------
% GERMAN ABSTRACT PAGE
%----------------------------------------------------------------------------------------
\begingroup
\renewcommand{\abstractname}{Zusammenfassung}
\begin{abstract}
\addchaptertocentry{Zusammenfassung}
Diese Arbeit evaluiert zehn Peer-to-Peer-Mesh-VPN-Implementierungen
under kontrollierten Netzwerkbedingungen mithilfe eines
reproduzierbaren, Nix-basierten Benchmark-Frameworks, das auf einem
Deployment-System namens Clan aufbaut. Die Implementierungen reichen
von Kernel-Protokollen (WireGuard, also Reference-Baseline) bis zu
Userspace-Overlays (Tinc, Yggdrasil, Nebula, Hyprspace und
weitere). Jede wird under vier Beeinträchtigungsprofilen mit
variierendem Paketverlust, Paketumsortierung, Latenz und Jitter
getestet, was über 300 Messungen in sieben Benchmarks ergibt, von
reinem TCP- und UDP-Durchsatz bis zu Video-Streaming und
Anwendungs-Downloads.
In zentrales Ergebnis ist, dass keine einzelne Metrik die
VPN-Leistung vollständig erfasst: Die Rangfolge verschiebt sich je
nachdem, ob Durchsatz, Latenz, Retransmit-Verhalten oder
Transferzeit auf Anwendungsebene gemessen wird. Under
Netzwerkbeeinträchtigung übertrifft Tailscale (über Headscale) den
Standard-Netzwerkstack des Linux-Kernels, eine Anomalie, die wir
auf die optimierten Congestion-Control- und Pufferparameter seines
Userspace-IP-Stacks zurückführen. Eine erneute Durchführung der
internen Baseline mit entsprechend angepassten Kernel-Parametern
schließt die Lücke und bestätigt diese Erklärung. Die begleitende
Quellcodeanalyse deckte eine kritische Sicherheitslücke in einer
der evaluierten Implementierungen auf.
\end{abstract}
\endgroup

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 189 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 236 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 196 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 308 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 228 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 218 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 210 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 196 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 208 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 49 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 46 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 175 KiB

View File

@@ -250,8 +250,7 @@
\newcommand{\decoRule}{\rule{.8\textwidth}{.4pt}} % New command for a
% rule to be used under figures
\setcounter{tocdepth}{3} % The depth to which the document sections
% are printed to the table of contents
\setcounter{tocdepth}{1} % Only show chapters and sections in table of contents
\ProvideDocumentCommand{\addchaptertocentry}{ m }{%
\addcontentsline{toc}{chapter}{#1}%
}
@@ -390,8 +389,6 @@ KOMA-script documentation for details.}]{fancyhdr}
{\normalsize \degreename\par}% Degree name
\bigskip
{\normalsize\bfseries \@title \par}% Thesis title
\medskip
{\normalsize \byname{} \authorname \par}% Author name
\bigskip
\end{center}
}

View File

@@ -4,6 +4,7 @@ extend-exclude = [
"**/value",
"**.rev",
"**/facter-report.nix",
"Chapters/Zusammenfassung.tex",
"**/key.json",
"pkgs/clan-cli/clan_lib/machines/test_suggestions.py",
]
@@ -16,4 +17,4 @@ dynamicdns = "dynamicdns"
substituters = "substituters"
[default.extend-identifiers]
pn = "pn"
pn = "pn"

View File

@@ -1,62 +0,0 @@
@article{Reference1,
Abstract = {We have developed an enhanced Littrow configuration
extended cavity diode laser (ECDL) that can be tuned without
changing the direction of the output beam. The output of a
conventional Littrow ECDL is reflected from a plane mirror fixed
parallel to the tuning diffraction grating. Using a free-space
Michelson wavemeter to measure the laser wavelength, we can tune
the laser over a range greater than 10 nm without any alteration of
alignment.},
Author = {C. J. Hawthorn and K. P. Weber and R. E. Scholten},
Journal = {Review of Scientific Instruments},
Month = {12},
Number = {12},
Numpages = {3},
Pages = {4477--4479},
Title = {Littrow Configuration Tunable External Cavity Diode Laser
with Fixed Direction Output Beam},
Volume = {72},
Url = {http://link.aip.org/link/?RSI/72/4477/1},
Year = {2001}}
@article{Reference3,
Abstract = {Operating a laser diode in an extended cavity which
provides frequency-selective feedback is a very effective method of
reducing the laser's linewidth and improving its tunability. We
have developed an extremely simple laser of this type, built from
inexpensive commercial components with only a few minor
modifications. A 780~nm laser built to this design has an output
power of 80~mW, a linewidth of 350~kHz, and it has been
continuously locked to a Doppler-free rubidium transition for several days.},
Author = {A. S. Arnold and J. S. Wilson and M. G. Boshier and J. Smith},
Journal = {Review of Scientific Instruments},
Month = {3},
Number = {3},
Numpages = {4},
Pages = {1236--1239},
Title = {A Simple Extended-Cavity Diode Laser},
Volume = {69},
Url = {http://link.aip.org/link/?RSI/69/1236/1},
Year = {1998}}
@article{Reference2,
Abstract = {We present a review of the use of diode lasers in
atomic physics with an extensive list of references. We discuss the
relevant characteristics of diode lasers and explain how to
purchase and use them. We also review the various techniques that
have been used to control and narrow the spectral outputs of diode
lasers. Finally we present a number of examples illustrating the
use of diode lasers in atomic physics experiments. Review of
Scientific Instruments is copyrighted by The American Institute of Physics.},
Author = {Carl E. Wieman and Leo Hollberg},
Journal = {Review of Scientific Instruments},
Keywords = {Diode Laser},
Month = {1},
Number = {1},
Numpages = {20},
Pages = {1--20},
Title = {Using Diode Lasers for Atomic Physics},
Volume = {62},
Url = {http://link.aip.org/link/?RSI/62/1/1},
Year = {1991}}

View File

@@ -49,7 +49,10 @@
devShells.default = pkgs.mkShell {
buildInputs = [
pkgs.nodejs
pkgs.vite
texlive
pkgs.pandoc
pkgs.inkscape
pkgs.python3
];

View File

@@ -1,5 +1,5 @@
@Comment{$ biblatex control file $}
@Comment{$ biblatex bcf format version 3.11 $}
@Comment{$ biblatex bcf format version 3.10 $}
% Do not modify this file!
%
% This is an auxiliary file used by the 'biblatex' package.
@@ -7,5 +7,5 @@
% required.
@Control{biblatex-control,
options = {3.11:0:0:1:0:1:1:0:0:0:0:1:3:1:3:1:0:0:3:1:79:+:+:nty},
options = {3.10:0:0:1:0:1:1:0:0:0:0:1:3:1:3:1:0:0:3:1:79:+:+:nty},
}

154
main.tex
View File

@@ -25,10 +25,10 @@
\documentclass[
11pt, % The default document font size, options: 10pt, 11pt, 12pt
oneside, % Two side (alternating margins) for binding by default,
oneside,%twoside, % Two side (alternating margins) for binding by default,
% uncomment to switch to one side
english, % ngerman for German
singlespacing, % Single line spacing, alternatives: onehalfspacing
onehalfspacing, % Single line spacing, alternatives: onehalfspacing
% or doublespacing
%draft, % Uncomment to enable draft mode (no pictures, no links,
% overfull hboxes indicated)
@@ -38,15 +38,20 @@
% the table of contents
%toctotoc, % Uncomment to add the main table of contents to the
% table of contents
%parskip, % Uncomment to add space between paragraphs
parskip, % Add space between paragraphs and remove indentation
%nohyperref, % Uncomment to not load the hyperref package
headsepline, % Uncomment to get a line under the header
%chapterinoneline, % Uncomment to place the chapter title next to
% the number on one line
chapterinoneline, % Place the chapter title next to the number on one line
%consistentlayout, % Uncomment to change the layout of the
% declaration, abstract and acknowledgements pages to match the default layout
]{MastersDoctoralThesis} % The class file specifying the document structure
% Fix chapter prefix for chapterinoneline option
\makeatletter
\renewcommand{\mdtChapapp}{\@chapapp\space}
\renewcommand{\autodot}{:}
\makeatother
\usepackage[utf8]{inputenc} % Required for inputting international characters
\usepackage[T1]{fontenc} % Output font encoding for international characters
\usepackage{float}
@@ -54,6 +59,9 @@
\usepackage{svg}
\usepackage{acronym}
\usepackage{subcaption} % For subfigures
\usepackage{tikz}
\usetikzlibrary{shapes.geometric}
\usepackage[edges]{forest}
\usepackage[backend=bibtex,style=numeric,natbib=true]{biblatex} %
% Use the bibtex backend with the authoryear citation style (which
@@ -65,28 +73,32 @@
\usepackage[autostyle=true]{csquotes} % Required to generate
% language-dependent quotes in the bibliography
\newcommand{\bitem}[1]{
\item \textbf{#1}}
\setcounter{secnumdepth}{0} % Only number chapters, not sections or subsections
%----------------------------------------------------------------------------------------
% MARGIN SETTINGS
%----------------------------------------------------------------------------------------
\geometry{
paper=a4paper, % Change to letterpaper for US letter
inner=2.5cm, % Inner margin
outer=3.8cm, % Outer margin
bindingoffset=.5cm, % Binding offset
top=1.5cm, % Top margin
bottom=1.5cm, % Bottom margin
%showframe, % Uncomment to show how the type block is set on the page
paper=a4paper,
inner=3.0cm, % Bindungsseite braucht mehr Platz
outer=2.5cm, % Außenseite
bindingoffset=0.5cm, % Zusätzlich für Klebebindung
top=2.5cm,
bottom=2.0cm,
}
%----------------------------------------------------------------------------------------
% THESIS INFORMATION
%----------------------------------------------------------------------------------------
\thesistitle{Decrypting the Overlay: A Reproducible Analysis of P2P
VPN Implementation and Overhead} % Your thesis title, this is used in the title
\thesistitle{An Analysis of P2P VPN Implementation} % Your thesis
% title, this is used in the title
% and abstract, print it elsewhere with \ttitle
\supervisor{\textsc{Ber Lorke}} % Your supervisor's name, this is
%\supervisor{\textsc{Ber Lorke}} % Your supervisor's name, this is
% used in the title page, print it elsewhere with \supname
\examiner{Prof. Dr. \textsc{Stefan Schmid}} % Your examiner's name,
% this is not currently used anywhere in the template, print it
@@ -147,17 +159,19 @@ and Management}} % Your department's name and URL, this is used in
\begin{minipage}[t]{0.4\textwidth}
\begin{flushleft} \large
\emph{Author:}\\
\href{http://www.johnsmith.com}{\authorname} % Author name -
% remove the \href bracket to remove the link
\textcolor{mdtRed}{\authorname}
\end{flushleft}
\end{minipage}
\begin{minipage}[t]{0.4\textwidth}
\begin{flushright} \large
\emph{Supervisor:} \\
\href{http://www.jamessmith.com}{\supname} % Supervisor name
% - remove the \href bracket to remove the link
\emph{First Examiner:} \\
\textcolor{mdtRed}{Prof.\ Dr.\ \textsc{Stefan Schmid}}\\[1em]
\emph{Second Examiner:} \\
\textcolor{mdtRed}{\textsc{[TBD]}}\\[1em]
%\emph{Supervisor:} \\
%\textcolor{mdtRed}{\supname}
\end{flushright}
\end{minipage}\\[3cm]
\end{minipage}\\[2cm]
\vfill
@@ -218,44 +232,32 @@ and Management}} % Your department's name and URL, this is used in
\begin{abstract}
\addchaptertocentry{\abstractname} % Add the abstract to the table of contents
This thesis evaluates the performance and fault tolerance of
peer-to-peer mesh VPNs through an automated, reproducible
benchmarking framework
built on the Clan deployment system.
We establish a cloud APIindependent, binary-reproducible environment
for deploying and assessing various VPN implementations,
including Tailscale (via Headscale), Hyprspace, Lighthouse, Tinc,
and ZeroTier.
This thesis evaluates ten peer-to-peer mesh VPN implementations
under controlled network conditions using a reproducible, Nix-based
benchmarking framework built on a deployment system called Clan.
The implementations range from kernel-level protocols (WireGuard,
used as a reference baseline) to userspace overlays (Tinc,
Yggdrasil, Nebula, Hyprspace, and others). We test each against
four impairment profiles that vary packet loss, reordering, latency,
and jitter, producing over 300 measurements across seven benchmarks
from raw TCP and UDP throughput to video streaming and
application-level downloads.
To simulate real-world network conditions, we employ four impairment profiles
with varying degrees of packet loss, reordering, latency, and jitter.
Our benchmark suite comprises RIST video streaming, Nix cache downloads,
iperf3 throughput tests, QUIC transfers, and ping latency measurements.
The experiments run on three machines interconnected at 1\,Gbps,
each equipped with four CPU cores and eight threads.
In total, we evaluate ten VPNs across seven benchmarks and four
impairment profiles,
yielding over 300 unique measurements.
Our analysis reveals that Tailscale outperforms the Linux kernel's
default networking stack under degraded network conditions—a
counterintuitive finding
we investigate through source code analysis of packet handling,
encryption schemes, and resilience mechanisms.
This investigation also uncovered several critical security vulnerabilities
across the evaluated VPNs.
We validate our hypotheses by re-running benchmarks with tuned
Linux kernel parameters,
demonstrating measurable improvements in network throughput.
This work contributes to decentralized networking research
by providing an extensible framework for reproducible P2P benchmarks,
offering insights into overlay VPN implementation quality,
and demonstrating that default Linux kernel settings are suboptimal
for adverse network conditions.
A central finding is that no single metric captures VPN performance:
the rankings shift depending on whether one measures throughput,
latency, retransmit behavior, or application-level transfer time.
Under network impairment, Tailscale (via Headscale) outperforms the
Linux kernel's default networking stack, an anomaly we trace to its
userspace IP stack's tuned congestion-control and buffer parameters.
Re-running the internal baseline with matching kernel-side tuning
closes the gap, confirming the explanation. The accompanying source
code analysis uncovered a critical security vulnerability in one of
the evaluated implementations.
\end{abstract}
\input{Chapters/Zusammenfassung}
%----------------------------------------------------------------------------------------
% ACKNOWLEDGEMENTS
%----------------------------------------------------------------------------------------
@@ -264,7 +266,7 @@ and Management}} % Your department's name and URL, this is used in
\addchaptertocentry{\acknowledgementname} % Add the
% acknowledgements to the table of contents
I am very grateful to my work colleagues Mic92, Lassulus, W, Hsjobeki,
I am very grateful to my work colleagues Mic92, Lassulus, Hsjobeki, Enzime,
DavHau and Pinpox with whom I worked together to create the Clan framework.
As well as my supervisor, Ber Lorke, for his guidance and support
during my research.
@@ -283,18 +285,9 @@ and Management}} % Your department's name and URL, this is used in
\tableofcontents % Prints the main table of contents
\listoffigures % Prints the list of figures
%\listoffigures % Prints the list of figures
\listoftables % Prints the list of tables
%----------------------------------------------------------------------------------------
% ABBREVIATIONS
%----------------------------------------------------------------------------------------
\section*{Abbreviations}
\begin{acronym}[P2P] % [P2P] aligns entries to the longest label
\acro{P2P}{Peer to Peer}
\end{acronym}
%\listoftables % Prints the list of tables
%----------------------------------------------------------------------------------------
% PHYSICAL CONSTANTS/OTHER DEFINITIONS
@@ -315,17 +308,17 @@ and Management}} % Your department's name and URL, this is used in
% SYMBOLS
%----------------------------------------------------------------------------------------
\begin{symbols}{lll} % Include a list of Symbols (a three column table)
% \begin{symbols}{lll} % Include a list of Symbols (a three column table)
% $a$ & distance & \si{\meter} \\
% $P$ & power & \si{\watt} (\si{\joule\per\second}) \\
%Symbol & Name & Unit \\
% % $a$ & distance & \si{\meter} \\
% % $P$ & power & \si{\watt} (\si{\joule\per\second}) \\
% %Symbol & Name & Unit \\
\addlinespace % Gap to separate the Roman symbols from the Greek
% \addlinespace % Gap to separate the Roman symbols from the Greek
% $\omega$ & angular frequency & \si{\radian} \\
% % $\omega$ & angular frequency & \si{\radian} \\
\end{symbols}
% \end{symbols}
%----------------------------------------------------------------------------------------
% DEDICATION
@@ -343,10 +336,12 @@ and Management}} % Your department's name and URL, this is used in
% Include the chapters of the thesis as separate files from the Chapters folder
% Uncomment the lines as you write the chapters
\include{Chapters/Motivation}
\include{Chapters/Methodology}
\include{Chapters/Introduction}
\include{Chapters/Background}
\include{Chapters/Methodology}
\include{Chapters/Results}
\include{Chapters/Discussion}
\include{Chapters/Conclusion}
%\include{Chapters/Chapter1}
%\include{Chapters/Chapter2}
@@ -364,6 +359,11 @@ and Management}} % Your department's name and URL, this is used in
% Appendices folder
% Uncomment the lines as you write the Appendices
\chapter{Abbreviations}
\begin{acronym}[P2P] % [P2P] aligns entries to the longest label
\acro{P2P}{Peer to Peer}
\end{acronym}
%\include{Appendices/AppendixA}
%\include{Appendices/AppendixB}
%\include{Appendices/AppendixC}

View File

@@ -44,80 +44,6 @@
evaluation for a secure edgecloud continuum.pdf:application/pdf},
}
@inproceedings{hugerich_no-hop_2022,
location = {New York, {NY}, {USA}},
title = {No-hop: In-network Distributed Hash Tables},
isbn = {978-1-4503-9168-9},
url = {https://doi.org/10.1145/3493425.3502757},
doi = {10.1145/3493425.3502757},
series = {{ANCS} '21},
shorttitle = {No-hop},
abstract = {We make a case for a distributed hash table lookup in
the network data plane. We argue that the lookup time performance
of distributed hash tables can be further improved via an
in-network data plane implementation. To this end, we introduce
No-hop, an in-network distributed hash table implementation, which
leverages the data plane programmability at line rate gained from
P4. Our initial results of transporting distributed hash table
logic from hosts' user space to the fast path of switches in the
network data plane are promising. We show that No-hop improves the
performance of locating the responsible host and maintains the
properties of distributed hash tables while outperforming two baselines.},
pages = {80--87},
booktitle = {Proceedings of the Symposium on Architectures for
Networking and Communications Systems},
publisher = {Association for Computing Machinery},
author = {Hügerich, Lily and Shukla, Apoorv and Smaragdakis, Georgios},
urldate = {2024-09-23},
date = {2022-01},
file =
{Attachment:/home/lhebendanz/Zotero/storage/WCI9PCTE/inet_nohop_decen_hashtable.pdf:application/pdf},
}
@article{bakhshi_state_2017,
title = {State of the Art and Recent Research Advances in Software
Defined Networking},
volume = {2017},
rights = {Copyright © 2017 Taimur Bakhshi.},
issn = {1530-8677},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1155/2017/7191647},
doi = {10.1155/2017/7191647},
abstract = {Emerging network services and subsequent growth in the
networking infrastructure have gained tremendous momentum in recent
years. Application performance requiring rapid real-time network
provisioning, optimized traffic management, and virtualization of
shared resources has induced the conceptualization and adoption of
new networking models. Software defined networking ({SDN}), one of
the predominant and relatively new networking paradigms, seeks to
simplify network management by decoupling network control logic
from the underlying hardware and introduces real-time network
programmability enabling innovation. The present work reviews the
state of the art in software defined networking providing a
historical perspective on complementary technologies in network
programmability and the inherent shortcomings which paved the way
for {SDN}. The {SDN} architecture is discussed along with popular
protocols, platforms, and existing simulation and debugging
solutions. Furthermore, a detailed analysis is presented around
recent {SDN} development and deployment avenues ranging from mobile
communications and data centers to campus networks and residential
environments. The review concludes by highlighting implementation
challenges and subsequent research directions being pursued in
academia and industry to address issues related to application
performance, control plane scalability and design, security, and
interdomain connectivity in the context of {SDN}.},
pages = {7191647},
number = {1},
journaltitle = {Wireless Communications and Mobile Computing},
author = {Bakhshi, Taimur},
urldate = {2024-09-23},
date = {2017},
langid = {english},
file =
{Attachment:/home/lhebendanz/Zotero/storage/TXFJ8DJB/Wireless
Communications and Mobile Computing - 2017 - Bakhshi - State of the
Art and Recent Research Advances in Software.pdf:application/pdf},
}
@online{noauthor_sci-hub_nodate,
title = {Sci-Hub},
url = {https://sci-hub.usualwant.com/},
@@ -222,78 +148,6 @@
Snapshot:/home/lhebendanz/Zotero/storage/VHPTLVMW/S0167642312000639.html:text/html},
}
@article{laddad_keep_2022,
title = {Keep {CALM} and {CRDT} On},
volume = {16},
issn = {2150-8097},
url = {https://doi.org/10.14778/3574245.3574268},
doi = {10.14778/3574245.3574268},
abstract = {Despite decades of research and practical experience,
developers have few tools for programming reliable distributed
applications without resorting to expensive coordination
techniques. Conflict-free replicated datatypes ({CRDTs}) are a
promising line of work that enable coordination-free replication
and offer certain eventual consistency guarantees in a relatively
simple object-oriented {API}. Yet {CRDT} guarantees extend only to
data updates; observations of {CRDT} state are unconstrained and
unsafe. We propose an agenda that embraces the simplicity of
{CRDTs}, but provides richer, more uniform guarantees. We extend
{CRDTs} with a query model that reasons about which queries are
safe without coordination by applying monotonicity results from the
{CALM} Theorem, and lay out a larger agenda for developing {CRDT}
data stores that let developers safely and efficiently interact
with replicated application state.},
pages = {856--863},
number = {4},
journaltitle = {Proc. {VLDB} Endow.},
author = {Laddad, Shadaj and Power, Conor and Milano, Mae and
Cheung, Alvin and Crooks, Natacha and Hellerstein, Joseph M.},
urldate = {2024-11-24},
date = {2022},
file = {PDF:/home/lhebendanz/Zotero/storage/SEGUKMKS/Laddad et al.
- 2022 - Keep CALM and CRDT On.pdf:application/pdf},
}
@inproceedings{jeffery_amc_2023,
location = {New York, {NY}, {USA}},
title = {{AMC}: Towards Trustworthy and Explorable {CRDT}
Applications with the Automerge Model Checker},
isbn = {9798400700866},
url = {https://dl.acm.org/doi/10.1145/3578358.3591326},
doi = {10.1145/3578358.3591326},
series = {{PaPoC} '23},
shorttitle = {{AMC}},
abstract = {Conflict-free Replicated Data Types ({CRDTs}) enable
local-first operations and asynchronous collaboration without the
need for always-on centralised services. {CRDTs} can have a high
overhead, so implementations need to be optimised, but this
optimisation can lead to bugs despite the use of test suites and
fuzzing. Furthermore, using {CRDTs} in applications is complex,
observing unexpected conflict resolution, issues synchronising
documents and difficulties implementing appropriate data models.
Automerge is a library, exposing a {JSON} {CRDT}, that sees users
having difficulties in modelling their problems, understanding
their edge cases and implementing applications correctly. We
introduce the Automerge Model Checker ({AMC}), empowering
application developers to check properties about their
implementations and explore them dynamically. {AMC} can check a
range of applications as well as being able to check properties
about the core of Automerge itself, helping to make more
trustworthy Automerge applications.{AMC} is available open-source
at github.com/jeffa5/automerge-model-checker.},
pages = {44--50},
booktitle = {Proceedings of the 10th Workshop on Principles and
Practice of Consistency for Distributed Data},
publisher = {Association for Computing Machinery},
author = {Jeffery, Andrew and Mortier, Richard},
urldate = {2024-11-24},
date = {2023},
file = {Full Text
PDF:/home/lhebendanz/Zotero/storage/EEMPQUIR/Jeffery and Mortier -
2023 - AMC Towards Trustworthy and Explorable CRDT Applications
with the Automerge Model Checker.pdf:application/pdf},
}
@inproceedings{dolstra_nix_2004,
location = {{USA}},
title = {Nix: A Safe and Policy-Free System for Software Deployment},
@@ -330,3 +184,436 @@
file = {ISPs - ethernodes.org - The Ethereum Network & Node
Explorer:/home/lhebendanz/Zotero/storage/BH7E2FAL/Hosting.html:text/html},
}
@article{kern_lost_2023,
title = {Lost in Simulation: Route Property in Mininet},
url =
{https://www.net.in.tum.de/fileadmin/TUM/NET/NET-2023-06-1/NET-2023-06-1_03.pdf},
doi = {10.2313/NET-2023-06-1_03},
shorttitle = {Lost in Simulation},
abstract = {The Mininet network emulator enables the comparison of
speed, delay, jitter and packet loss across different topologies.
It provides a Python {API} to instantiate almost arbitrary layouts
of networks and connections with attributes like predetermined
packet loss. We inspect linear and grid-like topologies and
discover that both share similar performance characteristics.},
author = {Kern, Philipp},
editora = {Architectures, Chair Of Network},
editoratype = {collaborator},
urldate = {2025-01-07},
date = {2023},
langid = {english},
note = {Medium: {PDF}
Publisher: Chair of Network Architectures and Services, School of
Computation, Information and Technology, Technical University of Munich},
file = {PDF:/home/lhebendanz/Zotero/storage/KSRTDLCA/Kern - 2023 -
Lost in Simulation Route Property in Mininet.pdf:application/pdf},
}
@article{savolainen_modeling_2019,
title = {Modeling the interplay of information seeking and
information sharing: A conceptual analysis},
volume = {71},
issn = {2050-3806},
url =
{https://www.emerald.com/insight/content/doi/10.1108/ajim-10-2018-0266/full/html},
doi = {10.1108/AJIM-10-2018-0266},
shorttitle = {Modeling the interplay of information seeking and
information sharing},
abstract = {The purpose of this paper is to contribute to the
creation of a holistic picture of information behavior by examining
the connections between information seeking and sharing.,Conceptual
analysis is used to focus on the ways in which the researchers have
modeled the interplay of information seeking and sharing. The study
draws on conceptual analysis of 27 key studies examining the above
issue, with a focus on the scrutiny of six major models for
information behavior.,Researchers have employed three main
approaches to model the relationships between information seeking
and sharing. The indirect approach conceptualizes information
seeking and sharing as discrete activities connected by an
intermediating factor, for example, information need. The
sequential approach assumes that information seeking precedes
information sharing. From the viewpoint of the interactive
approach, information seeking and sharing appear as mutually
related activities shaping each other iteratively or in a cyclical
manner. The interactive approach provides the most sophisticated
research perspective on the relationships of information seeking
and sharing and contributes to holistic understanding of human
information behavior.,As the study focuses on information seeking
and sharing, no attention is devoted to other activities
constitutive of information behavior, for example, information
use.,The study pioneers by providing an in-depth analysis of the
connections of information seeking and information sharing.},
pages = {518--534},
number = {4},
journaltitle = {Aslib Journal of Information Management},
author = {Savolainen, Reijo},
urldate = {2025-01-24},
date = {2019-06-18},
langid = {english},
note = {Publisher: Emerald Publishing Limited},
file = {Full
Text:/home/lhebendanz/Zotero/storage/LK5QNLVT/Savolainen - 2019 -
Modeling the interplay of information seeking and information
sharing A conceptual
analysis.pdf:application/pdf;Snapshot:/home/lhebendanz/Zotero/storage/JMW3VDFN/html.html:text/html},
}
@online{noauthor_web_nodate,
title = {The Web of False Information: Rumors, Fake News, Hoaxes,
Clickbait, and Various Other Shenanigans: Journal of Data and
Information Quality: Vol 11, No 3},
url = {https://dl.acm.org/doi/abs/10.1145/3309699},
urldate = {2025-01-24},
file = {PDF:/home/lhebendanz/Zotero/storage/95QKTBA7/The Web of
False Information Rumors, Fake News, Hoaxes, Clickbait, and Various
Other Shenanigans J.pdf:application/pdf;The Web of False
Information\: Rumors, Fake News, Hoaxes, Clickbait, and Various
Other Shenanigans\: Journal of Data and Information Quality\: Vol
11, No 3:/home/lhebendanz/Zotero/storage/7A2CZ7A6/3309699.html:text/html},
}
@online{noauthor_netzdg_nodate,
title = {{NetzDG} - Gesetz zur Verbesserung der Rechtsdurchsetzung
in sozialen Netzwerken},
url = {https://www.gesetze-im-internet.de/netzdg/BJNR335210017.html},
urldate = {2025-02-24},
file = {NetzDG - Gesetz zur Verbesserung der Rechtsdurchsetzung in
sozialen
Netzwerken:/home/lhebendanz/Zotero/storage/VKPIEEDI/BJNR335210017.html:text/html},
}
@online{noauthor_packet_2025,
title = {Packet zum Gesetz über digitale Dienste {\textbar}
Gestaltung der digitalen Zukunft Europas},
url =
{https://digital-strategy.ec.europa.eu/de/policies/digital-services-act-package},
urldate = {2025-02-24},
date = {2025-02-21},
langid = {german},
file =
{Snapshot:/home/lhebendanz/Zotero/storage/362VSA4E/digital-services-act-package.html:text/html},
}
@article{salmi_constructing_2003,
title = {Constructing Knowledge Societies: New Challenges for
Tertiary Education},
volume = {28},
issn = {0379-7724, 1469-8358},
url = {https://www.tandfonline.com/doi/full/10.1080/0379772032000110125},
doi = {10.1080/0379772032000110125},
shorttitle = {Constructing Knowledge Societies},
pages = {65--69},
number = {1},
journaltitle = {Higher Education in Europe},
author = {Salmi, Jamil},
urldate = {2025-02-26},
date = {2003-04},
langid = {english},
file = {PDF:/home/lhebendanz/Zotero/storage/M7TSQ6XA/Salmi - 2003 -
Constructing Knowledge Societies New Challenges for Tertiary
Education.pdf:application/pdf},
}
@article{sahlberg_rethinking_2010,
title = {Rethinking accountability in a knowledge society},
volume = {11},
rights = {http://www.springer.com/tdm},
issn = {1389-2843, 1573-1812},
url = {http://link.springer.com/10.1007/s10833-008-9098-2},
doi = {10.1007/s10833-008-9098-2},
abstract = {Competition between schools combined with test-based
accountability to hold schools accountable for predetermined
knowledge standards have become a common solution in educational
change efforts to improve the performance of educational systems
around the world. This is happening as family and community social
capital declines in most parts of developed world. Increased
competition and individualism are not necessarily beneficial to
creating social capital in schools and their communities. This
article argues that: (1) the evidence remains controversial that
test-based accountability policies improve the quality and
efficiency of public education; (2) the current practice of
determining educational performance by using primarily standardized
knowledge tests as the main means of accountability is not a
necessary condition for much needed educational improvement; and
(3) there is growing evidence that increased high-stakes testing is
restricting students conceptual learning, engaging in creative
action and understanding innovation, all of which are essential
elements of contemporary schooling in a knowledge society. Finland
is used as an example to suggest that educational change should
rather contribute to increasing networking and social capital in
schools and in their communities through building trust and
strengthening collective responsibilities within and between
schools. This would create better prospects of worthwhile lifelong
learning in and out of schools. Based on this analysis, the article
concludes that education policies should be directed at promoting
more intelligent forms of accountability to meet external
accountability demands and to encourage cooperation rather than
competition among students, teachers and schools.},
pages = {45--61},
number = {1},
journaltitle = {J Educ Change},
author = {Sahlberg, Pasi},
urldate = {2025-02-26},
date = {2010-02},
langid = {english},
file = {PDF:/home/lhebendanz/Zotero/storage/Q7WXPVKN/Sahlberg -
2010 - Rethinking accountability in a knowledge society.pdf:application/pdf},
}
@article{vanderlind_effects_2017,
title = {Effects of Mental Health on Student Learning},
volume = {22},
issn = {1087-0059},
url = {https://eric.ed.gov/?id=EJ1154566},
abstract = {Learning can be hindered by students' mental health.
Given the increased reports of mental health concerns among college
students, it is imperative that we understand how best to provide
supports to this population to help them learn and succeed. This is
particularly significant given the body of research that
demonstrates how mental illness may negatively affect student
success and degree persistence. In order to best serve this growing
population, there are possible supports that can be provided in the
classroom embedded into current practices and learning
opportunities for all students across the board. This article
addresses the connections between learning and mental health,
practical takeaways for practitioners, and directions for future research.},
pages = {39--58},
number = {2},
journaltitle = {Learning Assistance Review},
author = {{VanderLind}, Ren},
urldate = {2025-02-26},
date = {2017},
langid = {english},
note = {Publisher: National College Learning Center Association
{ERIC} Number: {EJ}1154566},
keywords = {Academic Achievement, Anxiety, College Students,
Correlation, Depression (Psychology), Gender Differences, Learning,
Learning Theories, Mental Disorders, Mental Health, Metacognition,
Personality Traits, Success},
file = {Full Text
PDF:/home/lhebendanz/Zotero/storage/SGNY7WPR/VanderLind - 2017 -
Effects of Mental Health on Student Learning.pdf:application/pdf},
}
@article{sinsebox_supervision_2020,
title = {Supervision and Evaluation Practices That Impact Teacher
Learning: A Case Study of Rural Teachers Perspectives},
url = {https://fisherpub.sjf.edu/education_etd/474},
shorttitle = {Supervision and Evaluation Practices That Impact
Teacher Learning},
journaltitle = {Education Doctoral},
author = {Sinsebox, Jennifer},
date = {2020-12-01},
file = {"Supervision and Evaluation Practices That Impact Teacher
Learning\: A C" by Jennifer L.
Sinsebox:/home/lhebendanz/Zotero/storage/HWJMQJ9Z/474.html:text/html},
}
@inproceedings{halkes_udp_2011,
title = {{UDP} {NAT} and firewall puncturing in the wild},
volume = {6641},
isbn = {978-3-642-20797-6},
doi = {10.1007/978-3-642-20798-3_1},
abstract = {Peer-to-Peer (P2P) networks work on the presumption
that all nodes in the network are connectable. However, {NAT} boxes and
firewalls prevent connections to many nodes on the Internet. For
{UDP} based protocols, the {UDP} hole-punching technique has
been proposed to mitigate this problem.
This paper presents a study of the efficacy of {UDP} hole
punching on the Internet in the context of an actual P2P network.
To the best of our knowledge, no previous study has provided
similar measurements. Our results show that {UDP} hole punching
is an effective method to increase the connectability of peers on
the Internet: approximately 64\% of all peers are behind
a {NAT} box or firewall which should allow hole punching to work,
and more than 80\% of hole punching attempts between these
peers succeed.},
pages = {1--12},
author = {Halkes, Gertjan and Pouwelse, J.A.},
date = {2011-06-01},
file = {Full Text
PDF:/home/lhebendanz/Zotero/storage/VUJQDDIG/Halkes and Pouwelse -
2011 - UDP NAT and firewall puncturing in the wild.pdf:application/pdf},
}
@inproceedings{lackorzynski_comparative_2019,
title = {A Comparative Study on Virtual Private Networks for Future
Industrial Communication Systems},
url = {https://ieeexplore.ieee.org/document/8758010},
doi = {10.1109/WFCS.2019.8758010},
abstract = {The future industrial networks will not be created from
scratch. Rather, they will grow from existing installations without
displacing legacy components. The secure integration of these
legacy machines and networks will become an important building
block in order to realize the vision of Industry 4.0. Secure and
high performance virtual private networks ({VPNs}) will be
necessary for that purpose.Therefore, we investigated and compared
various {VPN} solutions. Their performance was tested on multiple
hardware platforms ranging from very resource constrained to very
powerful. Non-functional aspects, relating around security,
manageability and ease of use, were discussed in order to assess
their suitability for future use cases.We arrive at clear
recommendations on which software {VPN} solutions to choose for
future industrial setups.},
eventtitle = {2019 15th {IEEE} International Workshop on Factory
Communication Systems ({WFCS})},
pages = {1--8},
booktitle = {2019 15th {IEEE} International Workshop on Factory
Communication Systems ({WFCS})},
author = {Lackorzynski, Tim and Köpsell, Stefan and Strufe, Thorsten},
urldate = {2026-02-11},
date = {2019-05},
keywords = {Encryption, Hardware, industrial {IoT}, industrial
networks, Industry, Logic gates, network security, Production
facilities, secure transport, Software, tunneling, Virtual private
networks, {VPN}},
file = {PDF:/home/lhebendanz/Zotero/storage/6Q5SUJX5/Lackorzynski
et al. - 2019 - A Comparative Study on Virtual Private Networks for
Future Industrial Communication
Systems.pdf:application/pdf;Snapshot:/home/lhebendanz/Zotero/storage/PXWNAC6D/8758010.html:text/html},
}
@online{noauthor_nat_2026,
title = {{NAT} Traversal: How It Works},
url = {https://dev.to/alakkadshaw/nat-traversal-how-it-works-4dnc},
shorttitle = {{NAT} Traversal},
abstract = {{NAT} traversal is the set of techniques that solves
this problem: discovering public addresses,...},
titleaddon = {{DEV} Community},
urldate = {2026-02-11},
date = {2026-01-30},
langid = {english},
file =
{Snapshot:/home/lhebendanz/Zotero/storage/UHJQ84AV/nat-traversal-how-it-works-4dnc.html:text/html},
}
@software{krause_krausestjs-framework-benchmark_2026,
title = {krausest/js-framework-benchmark},
rights = {Apache-2.0},
url = {https://github.com/krausest/js-framework-benchmark},
abstract = {A comparison of the performance of a few popular
javascript frameworks},
author = {Krause, Stefan},
urldate = {2026-02-11},
date = {2026-02-11},
note = {original-date: 2015-12-09T20:10:53Z},
}
@article{leung_overview_2007,
title = {An Overview of Packet Reordering in Transmission Control
Protocol ({TCP}): Problems, Solutions, and Challenges},
volume = {18},
issn = {1558-2183},
url = {https://ieeexplore.ieee.org/document/4118693},
doi = {10.1109/TPDS.2007.1011},
shorttitle = {An Overview of Packet Reordering in Transmission
Control Protocol ({TCP})},
abstract = {Transmission control protocol ({TCP}) is the most
popular transport layer protocol for the Internet. Due to various
reasons, such as multipath routing, route fluttering, and
retransmissions, packets belonging to the same flow may arrive out
of order at a destination. Such packet reordering violates the
design principles of some traffic control mechanisms in {TCP} and,
thus, poses performance problems. In this paper, we provide a
comprehensive and in-depth survey on recent research on packet
reordering in {TCP}. The causes and problems for packet reordering
are discussed. Various representative algorithms are examined and
compared by computer simulations. The ported program codes and
simulation scripts are available for download. Some open questions
are discussed to stimulate further research in this area},
pages = {522--535},
number = {4},
journaltitle = {{IEEE} Transactions on Parallel and Distributed Systems},
author = {Leung, Ka-cheong and Li, Victor O.k. and Yang, Daiqin},
urldate = {2026-02-16},
date = {2007-04},
keywords = {{ARPANET}, Communication system control, Computational
modeling, Computer simulations of {TCP}, congestion control, flow
control, Internet, {IP} networks, Out of order, packet reordering,
Routing, Telecommunication network reliability, Traffic control,
Transmission Control Protocol ({TCP})., Transport protocols},
file =
{Snapshot:/home/lhebendanz/Zotero/storage/NEVVLVJL/4118693.html:text/html;Submitted
Version:/home/lhebendanz/Zotero/storage/5SQILKLX/Leung et al. -
2007 - An Overview of Packet Reordering in Transmission Control
Protocol (TCP) Problems, Solutions, and Ch.pdf:application/pdf},
}
@inproceedings{mcclellan_estimating_2013,
title = {Estimating Retransmission Timeouts in {IP}-Based Transport
Protocols},
author = {{McClellan}, Stan and Peng, Wuxu},
date = {2013-04-01},
file = {Full Text
PDF:/home/lhebendanz/Zotero/storage/9PZP9SVG/McClellan and Peng -
2013 - Estimating Retransmission Timeouts in IP-Based Transport
Protocols.pdf:application/pdf},
}
@misc{guo_implementation_2025,
title = {Implementation and Performance Evaluation of {TCP} over
{QUIC} Tunnels},
url = {http://arxiv.org/abs/2504.10054},
doi = {10.48550/arXiv.2504.10054},
abstract = {{QUIC}, a {UDP}-based transport protocol, addresses
several limitations of {TCP} by offering built-in encryption,
stream multiplexing, and improved loss recovery. To extend these
benefits to legacy {TCP}-based applications, this paper explores
the implementation and evaluation of a {TCP} over {QUIC} tunneling
approach. A lightweight, stream-based tunnel is constructed using
the Rust-based Quinn library, enabling {TCP} traffic to traverse
{QUIC} connections transparently. Performance is evaluated under
varying network conditions, including packet loss, high latency,
and out-of-order delivery. Results indicate that {TCP} over {QUIC}
maintains significantly higher throughput than native {TCP} in
lossy or unstable environments, with up to a high improvement under
20{\textbackslash}\% packet loss. However, under ideal network
conditions, tunneling introduces modest overhead due to encryption
and user-space processing. These findings provide insights into the
trade-offs of {TCP} over {QUIC} tunneling and its suitability for
deployment in dynamic or impaired networks.},
number = {{arXiv}:2504.10054},
publisher = {{arXiv}},
author = {Guo, Xuanhong and Bao, Zekun and Chen, Ying},
urldate = {2026-02-16},
date = {2025-10-07},
eprinttype = {arxiv},
eprint = {2504.10054 [cs]},
keywords = {Computer Science - Networking and Internet Architecture},
file = {Preprint PDF:/home/lhebendanz/Zotero/storage/FXJSBRXL/Guo
et al. - 2025 - Implementation and Performance Evaluation of TCP
over QUIC
Tunnels.pdf:application/pdf;Snapshot:/home/lhebendanz/Zotero/storage/LJ56UH99/2504.html:text/html},
}
@report{whitner_improved_2008,
title = {Improved Packet Reordering Metrics},
url = {https://datatracker.ietf.org/doc/rfc5236},
abstract = {This document presents two improved metrics for packet
reordering, namely, Reorder Density ({RD}) and Reorder
Buffer-occupancy Density ({RBD}). A threshold is used to clearly
define when a packet is considered lost, to bound computational
complexity at O(N), and to keep the memory requirement for
evaluation independent of N, where N is the length of the packet
sequence. {RD} is a comprehensive metric that captures the
characteristics of reordering, while {RBD} evaluates the sequences
from the point of view of recovery from reordering. These metrics
are simple to compute yet comprehensive in their characterization
of packet reordering. The measures are robust and orthogonal to
packet loss and duplication. This memo provides information for the
Internet community.},
number = {{RFC} 5236},
institution = {Internet Engineering Task Force},
type = {Request for Comments},
author = {Whitner, Rick and Banka, Tarun and Piratla, Nischal M.
and Bare, Abhijit A. and Jayasumana, Professor Anura P.},
urldate = {2026-02-16},
date = {2008-06},
doi = {10.17487/RFC5236},
note = {Num Pages: 26},
file = {Full Text
PDF:/home/lhebendanz/Zotero/storage/KM9D625Y/Whitner et al. - 2008
- Improved Packet Reordering Metrics.pdf:application/pdf},
}

View File

@@ -3,7 +3,9 @@
imports = [ inputs.treefmt-nix.flakeModule ];
perSystem =
{ ... }:
{
...
}:
{
treefmt = {
# Used to find the project root
@@ -17,6 +19,7 @@
"AI_Data/**"
"Figures/**"
];
programs.typos = {
enable = true;
threads = 4;