Compare commits
19 Commits
d396de9f62
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 8a6d676e93 | |||
| f3cf653ab5 | |||
| 64aeeb5772 | |||
| 6b32967f32 | |||
| 910a7b2a81 | |||
| f29d810240 | |||
| 0a0ca0800a | |||
| b6ac4e20bf | |||
| c168a1b441 | |||
| ec32331bae | |||
| 841973f26f | |||
| c1c94fdf78 | |||
| c08df6b885 | |||
| 2e2d8880c0 | |||
| 6f3b6ffa07 | |||
| f1049f51f2 | |||
| 170461431b | |||
| c5ee9deeba | |||
| 200c8ba004 |
8
.gitignore
vendored
@@ -1,4 +1,6 @@
|
||||
# LaTeX intermediate and output files
|
||||
CLAUDE.md
|
||||
result
|
||||
*.aux
|
||||
**/*.bak.*
|
||||
*.bbl
|
||||
@@ -22,7 +24,11 @@
|
||||
# SyncTeX files
|
||||
*.synctex.gz
|
||||
*.synctex(busy)
|
||||
|
||||
openspec
|
||||
.claude
|
||||
**/node_modules
|
||||
**/dist
|
||||
log.txt
|
||||
# PDF files
|
||||
*.pdf
|
||||
|
||||
|
||||
44
Chapters/Background.tex
Normal file
@@ -0,0 +1,44 @@
|
||||
\chapter{Background} % Main chapter title
|
||||
|
||||
\label{Background}
|
||||
|
||||
\subsection{Nix: A Safe and Policy-Free System for Software Deployment}
|
||||
|
||||
Nix addresses significant issues in software deployment by utilizing
|
||||
cryptographic hashes to ensure unique paths for component instances
|
||||
\cite{dolstra_nix_2004}. Features such as concurrent installation of
|
||||
multiple versions, atomic upgrades, and safe garbage collection make
|
||||
Nix a flexible deployment system. This work uses Nix to ensure that
|
||||
all VPN builds and system configurations are deterministic.
|
||||
|
||||
\subsection{NixOS: A Purely Functional Linux Distribution}
|
||||
|
||||
NixOS extends Nix principles to Linux system configuration
|
||||
\cite{dolstra_nixos_2008}. System configurations are reproducible and
|
||||
isolated from stateful interactions typical in imperative package
|
||||
management. This property is essential for ensuring identical test
|
||||
environments across benchmark runs.
|
||||
|
||||
\subsection{UDP NAT and Firewall Puncturing in the Wild}
|
||||
|
||||
Halkes and Pouwelse~\cite{halkes_udp_2011} measure UDP hole punching
|
||||
efficacy on a live P2P network using the Tribler BitTorrent client.
|
||||
Their study finds that 79\% of peers are unreachable due to NAT or
|
||||
firewall restrictions, yet 64\% reside behind configurations amenable
|
||||
to hole punching. Among compatible peers, over 80\% of puncturing
|
||||
attempts succeed, establishing hole punching as a practical NAT
|
||||
traversal technique. Their timeout measurements further indicate that
|
||||
keep-alive messages must be sent at least every 55 seconds to maintain
|
||||
open NAT mappings.
|
||||
|
||||
These findings directly inform the evaluation criteria for this thesis.
|
||||
All mesh VPNs tested rely on UDP hole punching for NAT traversal;
|
||||
the 80\% success rate sets a baseline expectation, while the 55-second
|
||||
timeout informs analysis of each implementation's keep-alive behavior
|
||||
during source code review.
|
||||
|
||||
\subsection{An Overview of Packet Reordering in TCP}
|
||||
TODO \cite{leung_overview_2007}
|
||||
|
||||
\subsection{Performance Evaluation of TCP over QUIC Tunnels}
|
||||
TODO \cite{guo_implementation_2025}
|
||||
@@ -1,603 +0,0 @@
|
||||
% Chapter 1
|
||||
|
||||
\chapter{Chapter Title Here} % Main chapter title
|
||||
|
||||
\label{Chapter1} % For referencing the chapter elsewhere, use \ref{Chapter1}
|
||||
|
||||
%----------------------------------------------------------------------------------------
|
||||
|
||||
% Define some commands to keep the formatting separated from the content
|
||||
\newcommand{\keyword}[1]{\textbf{#1}}
|
||||
\newcommand{\tabhead}[1]{\textbf{#1}}
|
||||
\newcommand{\code}[1]{\texttt{#1}}
|
||||
\newcommand{\file}[1]{\texttt{\bfseries#1}}
|
||||
\newcommand{\option}[1]{\texttt{\itshape#1}}
|
||||
|
||||
%----------------------------------------------------------------------------------------
|
||||
|
||||
\section{Welcome and Thank You}
|
||||
Welcome to this \LaTeX{} Thesis Template, a beautiful and easy to use
|
||||
template for writing a thesis using the \LaTeX{} typesetting system.
|
||||
|
||||
If you are writing a thesis (or will be in the future) and its
|
||||
subject is technical or mathematical (though it doesn't have to be),
|
||||
then creating it in \LaTeX{} is highly recommended as a way to make
|
||||
sure you can just get down to the essential writing without having to
|
||||
worry over formatting or wasting time arguing with your word processor.
|
||||
|
||||
\LaTeX{} is easily able to professionally typeset documents that run
|
||||
to hundreds or thousands of pages long. With simple mark-up commands,
|
||||
it automatically sets out the table of contents, margins, page
|
||||
headers and footers and keeps the formatting consistent and
|
||||
beautiful. One of its main strengths is the way it can easily typeset
|
||||
mathematics, even \emph{heavy} mathematics. Even if those equations
|
||||
are the most horribly twisted and most difficult mathematical
|
||||
problems that can only be solved on a super-computer, you can at
|
||||
least count on \LaTeX{} to make them look stunning.
|
||||
|
||||
%----------------------------------------------------------------------------------------
|
||||
|
||||
\section{Learning \LaTeX{}}
|
||||
|
||||
\LaTeX{} is not a \textsc{wysiwyg} (What You See is What You Get)
|
||||
program, unlike word processors such as Microsoft Word or Apple's
|
||||
Pages. Instead, a document written for \LaTeX{} is actually a simple,
|
||||
plain text file that contains \emph{no formatting}. You tell \LaTeX{}
|
||||
how you want the formatting in the finished document by writing in
|
||||
simple commands amongst the text, for example, if I want to use
|
||||
\emph{italic text for emphasis}, I write the \verb|\emph{text}|
|
||||
command and put the text I want in italics in between the curly
|
||||
braces. This means that \LaTeX{} is a \enquote{mark-up} language,
|
||||
very much like HTML.
|
||||
|
||||
\subsection{A (not so short) Introduction to \LaTeX{}}
|
||||
|
||||
If you are new to \LaTeX{}, there is a very good eBook -- freely
|
||||
available online as a PDF file -- called, \enquote{The Not So Short
|
||||
Introduction to \LaTeX{}}. The book's title is typically shortened to
|
||||
just \emph{lshort}. You can download the latest version (as it is
|
||||
occasionally updated) from here:
|
||||
\url{http://www.ctan.org/tex-archive/info/lshort/english/lshort.pdf}
|
||||
|
||||
It is also available in several other languages. Find yours from the
|
||||
list on this page: \url{http://www.ctan.org/tex-archive/info/lshort/}
|
||||
|
||||
It is recommended to take a little time out to learn how to use
|
||||
\LaTeX{} by creating several, small `test' documents, or having a
|
||||
close look at several templates on:\\
|
||||
\url{http://www.LaTeXTemplates.com}\\
|
||||
Making the effort now means you're not stuck learning the system when
|
||||
what you \emph{really} need to be doing is writing your thesis.
|
||||
|
||||
\subsection{A Short Math Guide for \LaTeX{}}
|
||||
|
||||
If you are writing a technical or mathematical thesis, then you may
|
||||
want to read the document by the AMS (American Mathematical Society)
|
||||
called, \enquote{A Short Math Guide for \LaTeX{}}. It can be found online here:
|
||||
\url{http://www.ams.org/tex/amslatex.html}
|
||||
under the \enquote{Additional Documentation} section towards the
|
||||
bottom of the page.
|
||||
|
||||
\subsection{Common \LaTeX{} Math Symbols}
|
||||
There are a multitude of mathematical symbols available for \LaTeX{}
|
||||
and it would take a great effort to learn the commands for them all.
|
||||
The most common ones you are likely to use are shown on this page:
|
||||
\url{http://www.sunilpatel.co.uk/latex-type/latex-math-symbols/}
|
||||
|
||||
You can use this page as a reference or crib sheet, the symbols are
|
||||
rendered as large, high quality images so you can quickly find the
|
||||
\LaTeX{} command for the symbol you need.
|
||||
|
||||
\subsection{\LaTeX{} on a Mac}
|
||||
|
||||
The \LaTeX{} distribution is available for many systems including
|
||||
Windows, Linux and Mac OS X. The package for OS X is called MacTeX
|
||||
and it contains all the applications you need -- bundled together and
|
||||
pre-customized -- for a fully working \LaTeX{} environment and work flow.
|
||||
|
||||
MacTeX includes a custom dedicated \LaTeX{} editor called TeXShop for
|
||||
writing your `\file{.tex}' files and BibDesk: a program to manage
|
||||
your references and create your bibliography section just as easily
|
||||
as managing songs and creating playlists in iTunes.
|
||||
|
||||
%----------------------------------------------------------------------------------------
|
||||
|
||||
\section{Getting Started with this Template}
|
||||
|
||||
If you are familiar with \LaTeX{}, then you should explore the
|
||||
directory structure of the template and then proceed to place your
|
||||
own information into the \emph{THESIS INFORMATION} block of the
|
||||
\file{main.tex} file. You can then modify the rest of this file to
|
||||
your unique specifications based on your degree/university. Section
|
||||
\ref{FillingFile} on page \pageref{FillingFile} will help you do
|
||||
this. Make sure you also read section \ref{ThesisConventions} about
|
||||
thesis conventions to get the most out of this template.
|
||||
|
||||
If you are new to \LaTeX{} it is recommended that you carry on
|
||||
reading through the rest of the information in this document.
|
||||
|
||||
Before you begin using this template you should ensure that its style
|
||||
complies with the thesis style guidelines imposed by your
|
||||
institution. In most cases this template style and layout will be
|
||||
suitable. If it is not, it may only require a small change to bring
|
||||
the template in line with your institution's recommendations. These
|
||||
modifications will need to be done on the \file{MastersDoctoralThesis.cls} file.
|
||||
|
||||
\subsection{About this Template}
|
||||
|
||||
This \LaTeX{} Thesis Template is originally based and created around
|
||||
a \LaTeX{} style file created by Steve R.\ Gunn from the University
|
||||
of Southampton (UK), department of Electronics and Computer Science.
|
||||
You can find his original thesis style file at his site, here:
|
||||
\url{http://www.ecs.soton.ac.uk/~srg/softwaretools/document/templates/}
|
||||
|
||||
Steve's \file{ecsthesis.cls} was then taken by Sunil Patel who
|
||||
modified it by creating a skeleton framework and folder structure to
|
||||
place the thesis files in. The resulting template can be found on
|
||||
Sunil's site here:
|
||||
\url{http://www.sunilpatel.co.uk/thesis-template}
|
||||
|
||||
Sunil's template was made available through
|
||||
\url{http://www.LaTeXTemplates.com} where it was modified many times
|
||||
based on user requests and questions. Version 2.0 and onwards of this
|
||||
template represents a major modification to Sunil's template and is,
|
||||
in fact, hardly recognisable. The work to make version 2.0 possible
|
||||
was carried out by \href{mailto:vel@latextemplates.com}{Vel} and
|
||||
Johannes Böttcher.
|
||||
|
||||
%----------------------------------------------------------------------------------------
|
||||
|
||||
\section{What this Template Includes}
|
||||
|
||||
\subsection{Folders}
|
||||
|
||||
This template comes as a single zip file that expands out to several
|
||||
files and folders. The folder names are mostly self-explanatory:
|
||||
|
||||
\keyword{Appendices} -- this is the folder where you put the
|
||||
appendices. Each appendix should go into its own separate \file{.tex}
|
||||
file. An example and template are included in the directory.
|
||||
|
||||
\keyword{Chapters} -- this is the folder where you put the thesis
|
||||
chapters. A thesis usually has about six chapters, though there is no
|
||||
hard rule on this. Each chapter should go in its own separate
|
||||
\file{.tex} file and they can be split as:
|
||||
\begin{itemize}
|
||||
\item Chapter 1: Introduction to the thesis topic
|
||||
\item Chapter 2: Background information and theory
|
||||
\item Chapter 3: (Laboratory) experimental setup
|
||||
\item Chapter 4: Details of experiment 1
|
||||
\item Chapter 5: Details of experiment 2
|
||||
\item Chapter 6: Discussion of the experimental results
|
||||
\item Chapter 7: Conclusion and future directions
|
||||
\end{itemize}
|
||||
This chapter layout is specialised for the experimental sciences,
|
||||
your discipline may be different.
|
||||
|
||||
\keyword{Figures} -- this folder contains all figures for the thesis.
|
||||
These are the final images that will go into the thesis document.
|
||||
|
||||
\subsection{Files}
|
||||
|
||||
Included are also several files, most of them are plain text and you
|
||||
can see their contents in a text editor. After initial compilation,
|
||||
you will see that more auxiliary files are created by \LaTeX{} or
|
||||
BibTeX and which you don't need to delete or worry about:
|
||||
|
||||
\keyword{example.bib} -- this is an important file that contains all
|
||||
the bibliographic information and references that you will be citing
|
||||
in the thesis for use with BibTeX. You can write it manually, but
|
||||
there are reference manager programs available that will create and
|
||||
manage it for you. Bibliographies in \LaTeX{} are a large subject and
|
||||
you may need to read about BibTeX before starting with this. Many
|
||||
modern reference managers will allow you to export your references in
|
||||
BibTeX format which greatly eases the amount of work you have to do.
|
||||
|
||||
\keyword{MastersDoctoralThesis.cls} -- this is an important file. It
|
||||
is the class file that tells \LaTeX{} how to format the thesis.
|
||||
|
||||
\keyword{main.pdf} -- this is your beautifully typeset thesis (in the
|
||||
PDF file format) created by \LaTeX{}. It is supplied in the PDF with
|
||||
the template and after you compile the template you should get an
|
||||
identical version.
|
||||
|
||||
\keyword{main.tex} -- this is an important file. This is the file
|
||||
that you tell \LaTeX{} to compile to produce your thesis as a PDF
|
||||
file. It contains the framework and constructs that tell \LaTeX{} how
|
||||
to layout the thesis. It is heavily commented so you can read exactly
|
||||
what each line of code does and why it is there. After you put your
|
||||
own information into the \emph{THESIS INFORMATION} block -- you have
|
||||
now started your thesis!
|
||||
|
||||
Files that are \emph{not} included, but are created by \LaTeX{} as
|
||||
auxiliary files include:
|
||||
|
||||
\keyword{main.aux} -- this is an auxiliary file generated by
|
||||
\LaTeX{}, if it is deleted \LaTeX{} simply regenerates it when you
|
||||
run the main \file{.tex} file.
|
||||
|
||||
\keyword{main.bbl} -- this is an auxiliary file generated by BibTeX,
|
||||
if it is deleted, BibTeX simply regenerates it when you run the
|
||||
\file{main.aux} file. Whereas the \file{.bib} file contains all the
|
||||
references you have, this \file{.bbl} file contains the references
|
||||
you have actually cited in the thesis and is used to build the
|
||||
bibliography section of the thesis.
|
||||
|
||||
\keyword{main.blg} -- this is an auxiliary file generated by BibTeX,
|
||||
if it is deleted BibTeX simply regenerates it when you run the main
|
||||
\file{.aux} file.
|
||||
|
||||
\keyword{main.lof} -- this is an auxiliary file generated by
|
||||
\LaTeX{}, if it is deleted \LaTeX{} simply regenerates it when you
|
||||
run the main \file{.tex} file. It tells \LaTeX{} how to build the
|
||||
\emph{List of Figures} section.
|
||||
|
||||
\keyword{main.log} -- this is an auxiliary file generated by
|
||||
\LaTeX{}, if it is deleted \LaTeX{} simply regenerates it when you
|
||||
run the main \file{.tex} file. It contains messages from \LaTeX{}, if
|
||||
you receive errors and warnings from \LaTeX{}, they will be in this
|
||||
\file{.log} file.
|
||||
|
||||
\keyword{main.lot} -- this is an auxiliary file generated by
|
||||
\LaTeX{}, if it is deleted \LaTeX{} simply regenerates it when you
|
||||
run the main \file{.tex} file. It tells \LaTeX{} how to build the
|
||||
\emph{List of Tables} section.
|
||||
|
||||
\keyword{main.out} -- this is an auxiliary file generated by
|
||||
\LaTeX{}, if it is deleted \LaTeX{} simply regenerates it when you
|
||||
run the main \file{.tex} file.
|
||||
|
||||
So from this long list, only the files with the \file{.bib},
|
||||
\file{.cls} and \file{.tex} extensions are the most important ones.
|
||||
The other auxiliary files can be ignored or deleted as \LaTeX{} and
|
||||
BibTeX will regenerate them.
|
||||
|
||||
%----------------------------------------------------------------------------------------
|
||||
|
||||
\section{Filling in Your Information in the \file{main.tex}
|
||||
File}\label{FillingFile}
|
||||
|
||||
You will need to personalise the thesis template and make it your own
|
||||
by filling in your own information. This is done by editing the
|
||||
\file{main.tex} file in a text editor or your favourite LaTeX environment.
|
||||
|
||||
Open the file and scroll down to the third large block titled
|
||||
\emph{THESIS INFORMATION} where you can see the entries for
|
||||
\emph{University Name}, \emph{Department Name}, etc \ldots
|
||||
|
||||
Fill out the information about yourself, your group and institution.
|
||||
You can also insert web links, if you do, make sure you use the full
|
||||
URL, including the \code{http://} for this. If you don't want these
|
||||
to be linked, simply remove the \verb|\href{url}{name}| and only leave the name.
|
||||
|
||||
When you have done this, save the file and recompile \code{main.tex}.
|
||||
All the information you filled in should now be in the PDF, complete
|
||||
with web links. You can now begin your thesis proper!
|
||||
|
||||
%----------------------------------------------------------------------------------------
|
||||
|
||||
\section{The \code{main.tex} File Explained}
|
||||
|
||||
The \file{main.tex} file contains the structure of the thesis. There
|
||||
are plenty of written comments that explain what pages, sections and
|
||||
formatting the \LaTeX{} code is creating. Each major document element
|
||||
is divided into commented blocks with titles in all capitals to make
|
||||
it obvious what the following bit of code is doing. Initially there
|
||||
seems to be a lot of \LaTeX{} code, but this is all formatting, and
|
||||
it has all been taken care of so you don't have to do it.
|
||||
|
||||
Begin by checking that your information on the title page is correct.
|
||||
For the thesis declaration, your institution may insist on something
|
||||
different than the text given. If this is the case, just replace what
|
||||
you see with what is required in the \emph{DECLARATION PAGE} block.
|
||||
|
||||
Then comes a page which contains a funny quote. You can put your own,
|
||||
or quote your favourite scientist, author, person, and so on. Make
|
||||
sure to put the name of the person who you took the quote from.
|
||||
|
||||
Following this is the abstract page which summarises your work in a
|
||||
condensed way and can almost be used as a standalone document to
|
||||
describe what you have done. The text you write will cause the
|
||||
heading to move up so don't worry about running out of space.
|
||||
|
||||
Next come the acknowledgements. On this page, write about all the
|
||||
people who you wish to thank (not forgetting parents, partners and
|
||||
your advisor/supervisor).
|
||||
|
||||
The contents pages, list of figures and tables are all taken care of
|
||||
for you and do not need to be manually created or edited. The next
|
||||
set of pages are more likely to be optional and can be deleted since
|
||||
they are for a more technical thesis: insert a list of abbreviations
|
||||
you have used in the thesis, then a list of the physical constants
|
||||
and numbers you refer to and finally, a list of mathematical symbols
|
||||
used in any formulae. Making the effort to fill these tables means
|
||||
the reader has a one-stop place to refer to instead of searching the
|
||||
internet and references to try and find out what you meant by certain
|
||||
abbreviations or symbols.
|
||||
|
||||
The list of symbols is split into the Roman and Greek alphabets.
|
||||
Whereas the abbreviations and symbols ought to be listed in
|
||||
alphabetical order (and this is \emph{not} done automatically for
|
||||
you) the list of physical constants should be grouped into similar themes.
|
||||
|
||||
The next page contains a one line dedication. Who will you dedicate
|
||||
your thesis to?
|
||||
|
||||
Finally, there is the block where the chapters are included.
|
||||
Uncomment the lines (delete the \code{\%} character) as you write the
|
||||
chapters. Each chapter should be written in its own file and put into
|
||||
the \emph{Chapters} folder and named \file{Chapter1},
|
||||
\file{Chapter2}, etc\ldots Similarly for the appendices, uncomment
|
||||
the lines as you need them. Each appendix should go into its own file
|
||||
and placed in the \emph{Appendices} folder.
|
||||
|
||||
After the preamble, chapters and appendices finally comes the
|
||||
bibliography. The bibliography style (called \option{authoryear}) is
|
||||
used for the bibliography and is a fully featured style that will
|
||||
even include links to where the referenced paper can be found online.
|
||||
Do not underestimate how grateful your reader will be to find that a
|
||||
reference to a paper is just a click away. Of course, this relies on
|
||||
you putting the URL information into the BibTeX file in the first place.
|
||||
|
||||
%----------------------------------------------------------------------------------------
|
||||
|
||||
\section{Thesis Features and Conventions}\label{ThesisConventions}
|
||||
|
||||
To get the best out of this template, there are a few conventions
|
||||
that you may want to follow.
|
||||
|
||||
One of the most important (and most difficult) things to keep track
|
||||
of in such a long document as a thesis is consistency. Using certain
|
||||
conventions and ways of doing things (such as using a Todo list)
|
||||
makes the job easier. Of course, all of these are optional and you
|
||||
can adopt your own method.
|
||||
|
||||
\subsection{Printing Format}
|
||||
|
||||
This thesis template is designed for double sided printing (i.e.
|
||||
content on the front and back of pages) as most theses are printed
|
||||
and bound this way. Switching to one sided printing is as simple as
|
||||
uncommenting the \option{oneside} option of the \code{documentclass}
|
||||
command at the top of the \file{main.tex} file. You may then wish to
|
||||
adjust the margins to suit specifications from your institution.
|
||||
|
||||
The headers for the pages contain the page number on the outer side
|
||||
(so it is easy to flick through to the page you want) and the chapter
|
||||
name on the inner side.
|
||||
|
||||
The text is set to 11 point by default with single line spacing,
|
||||
again, you can tune the text size and spacing should you want or need
|
||||
to using the options at the very start of \file{main.tex}. The
|
||||
spacing can be changed similarly by replacing the
|
||||
\option{singlespacing} with \option{onehalfspacing} or \option{doublespacing}.
|
||||
|
||||
\subsection{Using US Letter Paper}
|
||||
|
||||
The paper size used in the template is A4, which is the standard size
|
||||
in Europe. If you are using this thesis template elsewhere and
|
||||
particularly in the United States, then you may have to change the A4
|
||||
paper size to the US Letter size. This can be done in the margins
|
||||
settings section in \file{main.tex}.
|
||||
|
||||
Due to the differences in the paper size, the resulting margins may
|
||||
be different to what you like or require (as it is common for
|
||||
institutions to dictate certain margin sizes). If this is the case,
|
||||
then the margin sizes can be tweaked by modifying the values in the
|
||||
same block as where you set the paper size. Now your document should
|
||||
be set up for US Letter paper size with suitable margins.
|
||||
|
||||
\subsection{References}
|
||||
|
||||
The \code{biblatex} package is used to format the bibliography and
|
||||
inserts references such as this one \parencite{Reference1}. The
|
||||
options used in the \file{main.tex} file mean that the in-text
|
||||
citations of references are formatted with the author(s) listed with
|
||||
the date of the publication. Multiple references are separated by
|
||||
semicolons (e.g. \parencite{Reference2, Reference1}) and references
|
||||
with more than three authors only show the first author with \emph{et
|
||||
al.} indicating there are more authors (e.g. \parencite{Reference3}).
|
||||
This is done automatically for you. To see how you use references,
|
||||
have a look at the \file{Chapter1.tex} source file. Many reference
|
||||
managers allow you to simply drag the reference into the document as you type.
|
||||
|
||||
Scientific references should come \emph{before} the punctuation mark
|
||||
if there is one (such as a comma or period). The same goes for
|
||||
footnotes\footnote{Such as this footnote, here down at the bottom of
|
||||
the page.}. You can change this but the most important thing is to
|
||||
keep the convention consistent throughout the thesis. Footnotes
|
||||
themselves should be full, descriptive sentences (beginning with a
|
||||
capital letter and ending with a full stop). The APA6 states:
|
||||
\enquote{Footnote numbers should be superscripted, [...], following
|
||||
any punctuation mark except a dash.} The Chicago manual of style
|
||||
states: \enquote{A note number should be placed at the end of a
|
||||
sentence or clause. The number follows any punctuation mark except
|
||||
the dash, which it precedes. It follows a closing parenthesis.}
|
||||
|
||||
The bibliography is typeset with references listed in alphabetical
|
||||
order by the first author's last name. This is similar to the APA
|
||||
referencing style. To see how \LaTeX{} typesets the bibliography,
|
||||
have a look at the very end of this document (or just click on the
|
||||
reference number links in in-text citations).
|
||||
|
||||
\subsubsection{A Note on bibtex}
|
||||
|
||||
The bibtex backend used in the template by default does not correctly
|
||||
handle unicode character encoding (i.e. "international" characters).
|
||||
You may see a warning about this in the compilation log and, if your
|
||||
references contain unicode characters, they may not show up correctly
|
||||
or at all. The solution to this is to use the biber backend instead
|
||||
of the outdated bibtex backend. This is done by finding this in
|
||||
\file{main.tex}: \option{backend=bibtex} and changing it to
|
||||
\option{backend=biber}. You will then need to delete all auxiliary
|
||||
BibTeX files and navigate to the template directory in your terminal
|
||||
(command prompt). Once there, simply type \code{biber main} and biber
|
||||
will compile your bibliography. You can then compile \file{main.tex}
|
||||
as normal and your bibliography will be updated. An alternative is to
|
||||
set up your LaTeX editor to compile with biber instead of bibtex, see
|
||||
\href{http://tex.stackexchange.com/questions/154751/biblatex-with-biber-configuring-my-editor-to-avoid-undefined-citations/}{here}
|
||||
for how to do this for various editors.
|
||||
|
||||
\subsection{Tables}
|
||||
|
||||
Tables are an important way of displaying your results, below is an
|
||||
example table which was generated with this code:
|
||||
|
||||
{\small
|
||||
\begin{verbatim}
|
||||
\begin{table}
|
||||
\caption{The effects of treatments X and Y on the four groups studied.}
|
||||
\label{tab:treatments}
|
||||
\centering
|
||||
\begin{tabular}{l l l}
|
||||
\toprule
|
||||
\tabhead{Groups} & \tabhead{Treatment X} & \tabhead{Treatment Y} \\
|
||||
\midrule
|
||||
1 & 0.2 & 0.8\\
|
||||
2 & 0.17 & 0.7\\
|
||||
3 & 0.24 & 0.75\\
|
||||
4 & 0.68 & 0.3\\
|
||||
\bottomrule\\
|
||||
\end{tabular}
|
||||
\end{table}
|
||||
\end{verbatim}
|
||||
}
|
||||
|
||||
\begin{table}
|
||||
\caption{The effects of treatments X and Y on the four groups studied.}
|
||||
\label{tab:treatments}
|
||||
\centering
|
||||
\begin{tabular}{l l l}
|
||||
\toprule
|
||||
\tabhead{Groups} & \tabhead{Treatment X} & \tabhead{Treatment Y} \\
|
||||
\midrule
|
||||
1 & 0.2 & 0.8\\
|
||||
2 & 0.17 & 0.7\\
|
||||
3 & 0.24 & 0.75\\
|
||||
4 & 0.68 & 0.3\\
|
||||
\bottomrule\\
|
||||
\end{tabular}
|
||||
\end{table}
|
||||
|
||||
You can reference tables with \verb|\ref{<label>}| where the label is
|
||||
defined within the table environment. See \file{Chapter1.tex} for an
|
||||
example of the label and citation (e.g. Table~\ref{tab:treatments}).
|
||||
|
||||
\subsection{Figures}
|
||||
|
||||
There will hopefully be many figures in your thesis (that should be
|
||||
placed in the \emph{Figures} folder). The way to insert figures into
|
||||
your thesis is to use a code template like this:
|
||||
\begin{verbatim}
|
||||
\begin{figure}
|
||||
\centering
|
||||
\includegraphics{Figures/Electron}
|
||||
\decoRule
|
||||
\caption[An Electron]{An electron (artist's impression).}
|
||||
\label{fig:Electron}
|
||||
\end{figure}
|
||||
\end{verbatim}
|
||||
Also look in the source file. Putting this code into the source file
|
||||
produces the picture of the electron that you can see in the figure below.
|
||||
|
||||
\begin{figure}[th]
|
||||
\centering
|
||||
\includegraphics{Figures/Electron}
|
||||
\decoRule
|
||||
\caption[An Electron]{An electron (artist's impression).}
|
||||
\label{fig:Electron}
|
||||
\end{figure}
|
||||
|
||||
Sometimes figures don't always appear where you write them in the
|
||||
source. The placement depends on how much space there is on the page
|
||||
for the figure. Sometimes there is not enough room to fit a figure
|
||||
directly where it should go (in relation to the text) and so \LaTeX{}
|
||||
puts it at the top of the next page. Positioning figures is the job
|
||||
of \LaTeX{} and so you should only worry about making them look good!
|
||||
|
||||
Figures usually should have captions just in case you need to refer
|
||||
to them (such as in Figure~\ref{fig:Electron}). The \verb|\caption|
|
||||
command contains two parts, the first part, inside the square
|
||||
brackets is the title that will appear in the \emph{List of Figures},
|
||||
and so should be short. The second part in the curly brackets should
|
||||
contain the longer and more descriptive caption text.
|
||||
|
||||
The \verb|\decoRule| command is optional and simply puts an aesthetic
|
||||
horizontal line below the image. If you do this for one image, do it
|
||||
for all of them.
|
||||
|
||||
\LaTeX{} is capable of using images in pdf, jpg and png format.
|
||||
|
||||
\subsection{Typesetting mathematics}
|
||||
|
||||
If your thesis is going to contain heavy mathematical content, be
|
||||
sure that \LaTeX{} will make it look beautiful, even though it won't
|
||||
be able to solve the equations for you.
|
||||
|
||||
The \enquote{Not So Short Introduction to \LaTeX} (available on
|
||||
\href{http://www.ctan.org/tex-archive/info/lshort/english/lshort.pdf}{CTAN})
|
||||
should tell you everything you need to know for most cases of
|
||||
typesetting mathematics. If you need more information, a much more
|
||||
thorough mathematical guide is available from the AMS called,
|
||||
\enquote{A Short Math Guide to \LaTeX} and can be downloaded from:
|
||||
\url{ftp://ftp.ams.org/pub/tex/doc/amsmath/short-math-guide.pdf}
|
||||
|
||||
There are many different \LaTeX{} symbols to remember, luckily you
|
||||
can find the most common symbols in
|
||||
\href{http://ctan.org/pkg/comprehensive}{The Comprehensive \LaTeX~Symbol List}.
|
||||
|
||||
You can write an equation, which is automatically given an equation
|
||||
number by \LaTeX{} like this:
|
||||
\begin{verbatim}
|
||||
\begin{equation}
|
||||
E = mc^{2}
|
||||
\label{eqn:Einstein}
|
||||
\end{equation}
|
||||
\end{verbatim}
|
||||
|
||||
This will produce Einstein's famous energy-matter equivalence equation:
|
||||
\begin{equation}
|
||||
E = mc^{2}
|
||||
\label{eqn:Einstein}
|
||||
\end{equation}
|
||||
|
||||
All equations you write (which are not in the middle of paragraph
|
||||
text) are automatically given equation numbers by \LaTeX{}. If you
|
||||
don't want a particular equation numbered, use the unnumbered form:
|
||||
\begin{verbatim}
|
||||
\[ a^{2}=4 \]
|
||||
\end{verbatim}
|
||||
|
||||
%----------------------------------------------------------------------------------------
|
||||
|
||||
\section{Sectioning and Subsectioning}
|
||||
|
||||
You should break your thesis up into nice, bite-sized sections and
|
||||
subsections. \LaTeX{} automatically builds a table of Contents by
|
||||
looking at all the \verb|\chapter{}|, \verb|\section{}| and
|
||||
\verb|\subsection{}| commands you write in the source.
|
||||
|
||||
The Table of Contents should only list the sections to three (3)
|
||||
levels. A \verb|chapter{}| is level zero (0). A \verb|\section{}| is
|
||||
level one (1) and so a \verb|\subsection{}| is level two (2). In your
|
||||
thesis it is likely that you will even use a \verb|subsubsection{}|,
|
||||
which is level three (3). The depth to which the Table of Contents is
|
||||
formatted is set within \file{MastersDoctoralThesis.cls}. If you need
|
||||
this changed, you can do it in \file{main.tex}.
|
||||
|
||||
%----------------------------------------------------------------------------------------
|
||||
|
||||
\section{In Closing}
|
||||
|
||||
You have reached the end of this mini-guide. You can now rename or
|
||||
overwrite this pdf file and begin writing your own
|
||||
\file{Chapter1.tex} and the rest of your thesis. The easy work of
|
||||
setting up the structure and framework has been taken care of for
|
||||
you. It's now your job to fill it out!
|
||||
|
||||
Good luck and have lots of fun!
|
||||
|
||||
\begin{flushright}
|
||||
Guide written by ---\\
|
||||
Sunil Patel: \href{http://www.sunilpatel.co.uk}{www.sunilpatel.co.uk}\\
|
||||
Vel: \href{http://www.LaTeXTemplates.com}{LaTeXTemplates.com}
|
||||
\end{flushright}
|
||||
4
Chapters/Conclusion.tex
Normal file
@@ -0,0 +1,4 @@
|
||||
|
||||
\chapter{Conclusion} % Main chapter title
|
||||
|
||||
\label{Conclusion}
|
||||
4
Chapters/Discussion.tex
Normal file
@@ -0,0 +1,4 @@
|
||||
|
||||
\chapter{Discussion} % Main chapter title
|
||||
|
||||
\label{Discussion}
|
||||
@@ -2,6 +2,210 @@
|
||||
|
||||
\label{Introduction}
|
||||
|
||||
This chapter introduces the Clan project, articulates its fundamental
|
||||
objectives, outlines the key components, and examines the driving
|
||||
factors motivating its development.
|
||||
Peer-to-peer overlay VPNs allow nodes to connect directly regardless
|
||||
of NAT or firewall restrictions. Yet practitioners choosing among the
|
||||
growing number of mesh VPN implementations must rely largely on
|
||||
anecdotal evidence: systematic, reproducible comparisons under
|
||||
realistic conditions are scarce.
|
||||
|
||||
This thesis addresses that gap. We benchmark ten peer-to-peer VPN
|
||||
implementations across seven workloads and four network impairment
|
||||
profiles. We complement these performance benchmarks with a source
|
||||
code analysis of each implementation, verified by the respective
|
||||
maintainers. The entire
|
||||
experimental framework is built on Nix, NixOS, and the Clan deployment
|
||||
system, so every result is independently reproducible.
|
||||
|
||||
\section{Motivation}
|
||||
|
||||
Peer-to-peer architectures can provide censorship-resistant,
|
||||
fault-tolerant infrastructure because they have no single point of
|
||||
failure \cite{shukla_towards_2021}. Blockchain platforms like Ethereum
|
||||
depend on this property, as do IoT edge networks and content delivery
|
||||
systems. But these benefits only hold when nodes are spread across
|
||||
diverse hosting entities.
|
||||
|
||||
In practice, this diversity remains illusory.
|
||||
Amazon, Hetzner, and OVH collectively host 70\% of all Ethereum nodes
|
||||
(see Figure~\ref{fig:ethernodes_hosting}), so nominally decentralized
|
||||
infrastructure actually sits in a handful of cloud providers.
|
||||
More concerning, these providers operate under overlapping regulatory
|
||||
jurisdictions,
|
||||
predominantly the United States and the European Union.
|
||||
This concentration undermines technical sovereignty:
|
||||
a single governmental action could compel service termination,
|
||||
data disclosure, or traffic manipulation across a majority of the network.
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\includegraphics[width=1\textwidth]{Figures/ethernodes_hosting.png}
|
||||
\caption{Distribution of Ethereum nodes hosted by various providers
|
||||
\cite{noauthor_isps_nodate}}
|
||||
\label{fig:ethernodes_hosting}
|
||||
\end{figure}
|
||||
|
||||
This centralization persists because self-hosting is hard. Cloud
|
||||
providers offer static IP addresses and publicly routable endpoints,
|
||||
which avoids the networking problems that residential and small-office
|
||||
deployments face.
|
||||
Most internet-connected devices sit behind Network Address Translation (NAT),
|
||||
which prevents incoming connections without explicit port forwarding
|
||||
or relay infrastructure.
|
||||
Combined with dynamic IP assignments from ISPs, stable peer
|
||||
connectivity from self-hosted infrastructure has traditionally
|
||||
required significant technical expertise.
|
||||
|
||||
Overlay VPNs solve this problem. They establish encrypted tunnels
|
||||
that traverse NAT boundaries, so peers can connect directly without
|
||||
static IP addresses or manual firewall configuration. Each node
|
||||
receives a stable virtual address within the overlay network,
|
||||
regardless of its physical network topology. A device behind
|
||||
consumer-grade NAT can therefore participate as a first-class peer
|
||||
in a distributed system.
|
||||
|
||||
The Clan deployment framework uses Nix and NixOS to eliminate
|
||||
configuration drift and dependency conflicts, which makes it
|
||||
practical for a single administrator to self-host distributed
|
||||
services.
|
||||
Overlay VPNs are central to Clan's architecture: they supply the
|
||||
peer connectivity that lets nodes form a network regardless of
|
||||
physical location or NAT situation.
|
||||
As illustrated in Figure~\ref{fig:vision-stages}, Clan plans to offer
|
||||
a web interface that lets users design and deploy private P2P networks
|
||||
with minimal configuration, assisted by an integrated LLM.
|
||||
|
||||
During Clan's development, a recurring problem surfaced:
|
||||
practitioners disagreed on which mesh VPN to use, each pointing to
|
||||
different edge cases where their preferred VPN failed or lacked a
|
||||
needed feature. These discussions relied on anecdotal evidence rather
|
||||
than systematic evaluation, which motivated the present work.
|
||||
|
||||
\subsection{Related Work}
|
||||
|
||||
Existing research offers only partial coverage of this space.
|
||||
Lackorzynski et al.\ \cite{lackorzynski_comparative_2019} benchmark
|
||||
OpenVPN, IPSec, Tinc, Freelan, MACsec, and WireGuard in the context
|
||||
of industrial communication systems. They measure point-to-point
|
||||
throughput, latency, and CPU overhead but do not address overlay
|
||||
network behavior such as NAT traversal or dynamic peer discovery.
|
||||
The most closely related study by Kjorveziroski et al.\
|
||||
\cite{kjorveziroski_full-mesh_2024} evaluates full-mesh VPN solutions
|
||||
for distributed systems, looking at throughput, reliability under
|
||||
packet loss, and relay behavior for VPNs including ZeroTier. However,
|
||||
it focuses primarily on solutions with a central point of failure and
|
||||
limits its workloads to synthetic iperf3 tests.
|
||||
|
||||
This thesis extends that work in several directions. It evaluates a
|
||||
broader set of VPN implementations with emphasis on fully
|
||||
decentralized architectures and tests them under application-level
|
||||
workloads such as video streaming and package downloads. It also
|
||||
applies multiple network impairment profiles and provides a
|
||||
reproducible experimental framework built on Nix, NixOS, and Clan.
|
||||
|
||||
A secondary goal was to create an automated benchmarking framework
|
||||
that generates a public leaderboard, similar in spirit to the
|
||||
js-framework-benchmark (see Figure~\ref{fig:js-framework-benchmark}).
|
||||
A web interface with regularly updated results gives VPN developers a
|
||||
concrete baseline to measure against.
|
||||
|
||||
\section{Research Contribution}
|
||||
|
||||
This thesis makes the following contributions:
|
||||
|
||||
\begin{enumerate}
|
||||
\item A benchmark of ten peer-to-peer VPN implementations across
|
||||
seven workloads and four network impairment profiles. The workloads
|
||||
include video streaming and package downloads alongside synthetic
|
||||
throughput tests.
|
||||
\item A source code analysis of all ten VPN implementations. Manual
|
||||
code review was combined with LLM-assisted analysis and the results
|
||||
were verified by the respective maintainers on GitHub.
|
||||
\item A reproducible experimental framework built on Nix, NixOS,
|
||||
and the Clan deployment system. Dependencies are pinned and system
|
||||
configuration is declarative, down to deterministic cryptographic
|
||||
material generation. Every result can be independently replicated.
|
||||
\item A performance analysis showing that Tailscale outperforms the
|
||||
Linux kernel's default networking stack under degraded conditions,
|
||||
and that kernel parameter tuning (Reno congestion control in place
|
||||
of CUBIC, with RACK disabled) yields measurable throughput
|
||||
improvements.
|
||||
\item The discovery of several security vulnerabilities across
|
||||
the evaluated VPN implementations.
|
||||
\item An automated benchmarking framework that produces a public
|
||||
leaderboard, giving VPN developers a target to optimize
|
||||
against.
|
||||
\end{enumerate}
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\includegraphics[width=1\textwidth]{Figures/krause-js-framework.png}
|
||||
\caption{js-framework-benchmark results for Chrome 144.0
|
||||
\cite{krause_krausestjs-framework-benchmark_2026}}
|
||||
\label{fig:js-framework-benchmark}
|
||||
\end{figure}
|
||||
|
||||
\begin{figure}[h]
|
||||
\centering
|
||||
|
||||
% Row 1
|
||||
\begin{subfigure}{0.45\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{Figures/vision/stage1.png}
|
||||
\caption{Stage 1}
|
||||
\end{subfigure}
|
||||
\hfill
|
||||
\begin{subfigure}{0.45\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{Figures/vision/stage2.png}
|
||||
\caption{Stage 2}
|
||||
\end{subfigure}
|
||||
|
||||
\vspace{1em} % Add spacing between rows
|
||||
|
||||
% Row 2
|
||||
\begin{subfigure}{0.45\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{Figures/vision/stage3.png}
|
||||
\caption{Stage 3}
|
||||
\end{subfigure}
|
||||
\hfill
|
||||
\begin{subfigure}{0.45\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{Figures/vision/stage4.png}
|
||||
\caption{Stage 4}
|
||||
\end{subfigure}
|
||||
|
||||
\vspace{1em} % Add spacing between rows
|
||||
|
||||
% Row 3
|
||||
\begin{subfigure}{0.45\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{Figures/vision/stage5.png}
|
||||
\caption{Stage 5}
|
||||
\end{subfigure}
|
||||
\hfill
|
||||
\begin{subfigure}{0.45\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{Figures/vision/stage6.png}
|
||||
\caption{Stage 6}
|
||||
\end{subfigure}
|
||||
|
||||
\vspace{1em} % Add spacing between rows
|
||||
|
||||
% Row 4
|
||||
\begin{subfigure}{0.45\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{Figures/vision/stage7.png}
|
||||
\caption{Stage 7}
|
||||
\end{subfigure}
|
||||
\hfill
|
||||
\begin{subfigure}{0.45\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{Figures/vision/stage8.png}
|
||||
\caption{Stage 8}
|
||||
\end{subfigure}
|
||||
|
||||
\caption{Planned web interface for setting up a Clan family network}
|
||||
\label{fig:vision-stages}
|
||||
\end{figure}
|
||||
|
||||
|
||||
@@ -4,11 +4,12 @@
|
||||
|
||||
\label{Methodology}
|
||||
|
||||
This chapter describes the methodology used to benchmark peer-to-peer
|
||||
mesh VPN implementations. The experimental design prioritizes
|
||||
reproducibility at every layer---from dependency management to network
|
||||
conditions---enabling independent verification of results and
|
||||
facilitating future comparative studies.
|
||||
This chapter describes the methodology used to benchmark and analyze
|
||||
peer-to-peer mesh VPN implementations. The evaluation combines
|
||||
performance benchmarking under controlled network conditions with a
|
||||
structured source code analysis of each implementation. All
|
||||
dependencies, system configurations, and test procedures are pinned
|
||||
or declared so that the experiments can be independently reproduced.
|
||||
|
||||
\section{Experimental Setup}
|
||||
|
||||
@@ -18,23 +19,50 @@ All experiments were conducted on three bare-metal servers with
|
||||
identical specifications:
|
||||
|
||||
\begin{itemize}
|
||||
\item \textbf{CPU:} Intel Model 94, 4 cores / 8 threads
|
||||
\item \textbf{Memory:} 64 GB RAM
|
||||
\item \textbf{Network:} 1 Gbps Ethernet (e1000e driver; one machine uses r8169)
|
||||
\item \textbf{Cryptographic acceleration:} AES-NI, AVX, AVX2, PCLMULQDQ,
|
||||
\bitem{CPU:} Intel Model 94, 4 cores / 8 threads
|
||||
\bitem{Memory:} 64 GB RAM
|
||||
\bitem{Network:} 1 Gbps Ethernet (e1000e driver; one machine
|
||||
uses r8169)
|
||||
\bitem{Cryptographic acceleration:} AES-NI, AVX, AVX2, PCLMULQDQ,
|
||||
RDRAND, SSE4.2
|
||||
\end{itemize}
|
||||
|
||||
The presence of hardware cryptographic acceleration is relevant because
|
||||
many VPN implementations leverage AES-NI for encryption, and the results
|
||||
may differ on systems without these features.
|
||||
Results may differ on systems without hardware cryptographic
|
||||
acceleration, since most of the tested VPNs offload encryption to
|
||||
AES-NI.
|
||||
|
||||
\subsection{Network Topology}
|
||||
|
||||
The three machines are connected via a direct 1 Gbps LAN on the same
|
||||
network segment. This baseline topology provides a controlled environment
|
||||
with minimal latency and no packet loss, allowing the overhead introduced
|
||||
by each VPN implementation to be measured in isolation.
|
||||
network segment. Each machine has a publicly reachable IPv4 address,
|
||||
which is used to deploy configuration changes via Clan. On this
|
||||
baseline topology, latency is sub-millisecond and there is no packet
|
||||
loss, so measured overhead can be attributed to the VPN itself.
|
||||
Figure~\ref{fig:mesh_topology} illustrates the full-mesh connectivity
|
||||
between the three machines.
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\begin{tikzpicture}[
|
||||
node/.style={
|
||||
draw, rounded corners, minimum width=2.2cm, minimum height=1cm,
|
||||
font=\ttfamily\bfseries, align=center
|
||||
},
|
||||
link/.style={thick, <->}
|
||||
]
|
||||
% Nodes in an equilateral triangle
|
||||
\node[node] (luna) at (0, 3.5) {luna};
|
||||
\node[node] (yuki) at (-3, 0) {yuki};
|
||||
\node[node] (lom) at (3, 0) {lom};
|
||||
|
||||
% Mesh links
|
||||
\draw[link] (luna) -- node[left, font=\small] {1 Gbps} (yuki);
|
||||
\draw[link] (luna) -- node[right, font=\small] {1 Gbps} (lom);
|
||||
\draw[link] (yuki) -- node[below, font=\small] {1 Gbps} (lom);
|
||||
\end{tikzpicture}
|
||||
\caption{Full-mesh network topology of the three benchmark machines}
|
||||
\label{fig:mesh_topology}
|
||||
\end{figure}
|
||||
|
||||
To simulate real-world network conditions, Linux traffic control
|
||||
(\texttt{tc netem}) is used to inject latency, jitter, packet loss,
|
||||
@@ -42,164 +70,114 @@ and reordering. These impairments are applied symmetrically on all
|
||||
machines, meaning effective round-trip impairment is approximately
|
||||
double the per-machine values.
|
||||
|
||||
\section{VPNs Under Test}
|
||||
|
||||
Ten VPN implementations were selected for evaluation, spanning a range
|
||||
of architectures from centralized coordination to fully decentralized
|
||||
mesh topologies. Table~\ref{tab:vpn_selection} summarizes the selection.
|
||||
|
||||
\begin{table}[H]
|
||||
\centering
|
||||
\caption{VPN implementations included in the benchmark}
|
||||
\label{tab:vpn_selection}
|
||||
\begin{tabular}{lll}
|
||||
\hline
|
||||
\textbf{VPN} & \textbf{Architecture} & \textbf{Notes} \\
|
||||
\hline
|
||||
Tailscale (Headscale) & Coordinated mesh & Open-source coordination server \\
|
||||
ZeroTier & Coordinated mesh & Global virtual Ethernet \\
|
||||
Nebula & Coordinated mesh & Slack's overlay network \\
|
||||
Tinc & Fully decentralized & Established since 1998 \\
|
||||
Yggdrasil & Fully decentralized & Spanning-tree routing \\
|
||||
Mycelium & Fully decentralized & End-to-end encrypted IPv6 overlay \\
|
||||
Hyprspace & Fully decentralized & libp2p-based, IPFS-compatible \\
|
||||
EasyTier & Fully decentralized & Rust-based, multi-protocol \\
|
||||
VpnCloud & Fully decentralized & Lightweight, kernel bypass option \\
|
||||
WireGuard & Point-to-point & Reference baseline (not a mesh VPN) \\
|
||||
\hline
|
||||
Internal (no VPN) & N/A & Baseline for raw network performance \\
|
||||
\hline
|
||||
\end{tabular}
|
||||
\end{table}
|
||||
|
||||
WireGuard is included as a reference point despite not being a mesh VPN.
|
||||
Its minimal overhead and widespread adoption make it a useful comparison
|
||||
for understanding the cost of mesh coordination and NAT traversal logic.
|
||||
|
||||
\subsection{Selection Criteria}
|
||||
|
||||
VPNs were selected based on:
|
||||
\begin{itemize}
|
||||
\item \textbf{NAT traversal capability:} All selected VPNs can establish
|
||||
connections between peers behind NAT without manual port forwarding.
|
||||
\item \textbf{Decentralization:} Preference for solutions without mandatory
|
||||
central servers, though coordinated-mesh VPNs were included for comparison.
|
||||
\item \textbf{Active development:} Only VPNs with recent commits and
|
||||
maintained releases were considered.
|
||||
\item \textbf{Linux support:} All VPNs must run on Linux.
|
||||
\end{itemize}
|
||||
|
||||
\subsection{Configuration Methodology}
|
||||
|
||||
Each VPN is built from source within the Nix flake, ensuring that all
|
||||
dependencies are pinned to exact versions. VPNs not packaged in nixpkgs
|
||||
(Hyprspace, EasyTier, VpnCloud, qperf) have dedicated build expressions
|
||||
Each VPN is built from source within the Nix flake, with all
|
||||
dependencies pinned to exact versions. VPNs not packaged in nixpkgs
|
||||
(Hyprspace, EasyTier, VpnCloud) have dedicated build expressions
|
||||
under \texttt{pkgs/} in the flake.
|
||||
|
||||
Cryptographic material (WireGuard keys, Nebula certificates, ZeroTier
|
||||
identities) is generated deterministically via Clan's vars generator
|
||||
system. For example, WireGuard keys are generated as:
|
||||
|
||||
\begin{verbatim}
|
||||
wg genkey > "$out/private-key"
|
||||
wg pubkey < "$out/private-key" > "$out/public-key"
|
||||
\end{verbatim}
|
||||
system.
|
||||
|
||||
Generated keys are stored in version control under
|
||||
\texttt{vars/per-machine/\{name\}/} and read at NixOS evaluation time,
|
||||
making key material part of the reproducible configuration.
|
||||
so key material is part of the reproducible configuration.
|
||||
|
||||
\section{Benchmark Suite}
|
||||
|
||||
The benchmark suite includes both synthetic throughput tests and
|
||||
real-world workloads. This combination addresses a limitation of prior
|
||||
work that relied exclusively on iperf3.
|
||||
The benchmark suite includes synthetic throughput tests and
|
||||
application-level workloads. Prior comparative work relied exclusively
|
||||
on iperf3; the additional benchmarks here capture behavior that
|
||||
iperf3 alone misses.
|
||||
Table~\ref{tab:benchmark_suite} summarises each benchmark.
|
||||
|
||||
\begin{table}[H]
|
||||
\centering
|
||||
\caption{Benchmark suite overview}
|
||||
\label{tab:benchmark_suite}
|
||||
\begin{tabular}{llll}
|
||||
\hline
|
||||
\textbf{Benchmark} & \textbf{Protocol} & \textbf{Duration} &
|
||||
\textbf{Key Metrics} \\
|
||||
\hline
|
||||
Ping & ICMP & 3 runs $\times$ 100 pkts & RTT, packet loss \\
|
||||
TCP iPerf3 & TCP & 30 s & Throughput, retransmits, CPU \\
|
||||
UDP iPerf3 & UDP & 30 s & Throughput, jitter, packet loss \\
|
||||
Parallel iPerf3 & TCP & 60 s & Throughput under contention \\
|
||||
QPerf & QUIC & 30 s & Bandwidth, TTFB, conn. time \\
|
||||
RIST Streaming & RIST & 30 s & Bitrate, dropped frames, RTT \\
|
||||
Nix Cache Download & HTTP & 2 runs & Download duration \\
|
||||
\hline
|
||||
\end{tabular}
|
||||
\end{table}
|
||||
|
||||
The first four benchmarks use standard network testing tools;
|
||||
the remaining three test application-level workloads.
|
||||
The subsections below describe configuration details that the table
|
||||
does not capture.
|
||||
|
||||
\subsection{Ping}
|
||||
|
||||
Measures round-trip latency and packet delivery reliability.
|
||||
Sends 100 ICMP echo requests at 200\,ms intervals with a 1-second
|
||||
per-packet timeout, repeated for 3 runs.
|
||||
|
||||
\begin{itemize}
|
||||
\item \textbf{Method:} 100 ICMP echo requests at 200 ms intervals,
|
||||
1-second per-packet timeout, repeated for 3 runs.
|
||||
\item \textbf{Metrics:} RTT (min, avg, max, mdev), packet loss percentage,
|
||||
per-packet RTTs.
|
||||
\end{itemize}
|
||||
\subsection{TCP and UDP iPerf3}
|
||||
|
||||
\subsection{iPerf3}
|
||||
Both tests run for 30 seconds in bidirectional mode with zero-copy
|
||||
(\texttt{-Z}) to minimize CPU overhead. The UDP variant additionally
|
||||
sets unlimited target bandwidth (\texttt{-b 0}) and enables 64-bit
|
||||
counters.
|
||||
|
||||
Measures bulk data transfer throughput.
|
||||
\subsection{Parallel iPerf3}
|
||||
|
||||
\textbf{TCP variant:} 30-second bidirectional test with RSA authentication
|
||||
and zero-copy mode (\texttt{-Z}) to minimize CPU overhead.
|
||||
Runs one bidirectional TCP stream on all three machine pairs
|
||||
simultaneously in a circular pattern (A$\rightarrow$B,
|
||||
B$\rightarrow$C, C$\rightarrow$A) for 60 seconds with zero-copy
|
||||
(\texttt{-Z}). The three concurrent bidirectional links produce six
|
||||
unidirectional flows in total. This contention stresses shared
|
||||
resources that single-stream tests leave idle.
|
||||
|
||||
\textbf{UDP variant:} Same configuration with unlimited target bandwidth
|
||||
(\texttt{-b 0}) and 64-bit counters.
|
||||
\subsection{QPerf}
|
||||
|
||||
\textbf{Parallel TCP variant:} Tests concurrent mesh traffic by running
|
||||
TCP streams on all machines simultaneously in a circular pattern
|
||||
(A$\rightarrow$B, B$\rightarrow$C, C$\rightarrow$A) for 60 seconds.
|
||||
This simulates contention across the mesh.
|
||||
|
||||
\begin{itemize}
|
||||
\item \textbf{Metrics:} Throughput (bits/s), retransmits, congestion window,
|
||||
jitter (UDP), packet loss (UDP).
|
||||
\end{itemize}
|
||||
|
||||
\subsection{qPerf}
|
||||
|
||||
Measures connection-level performance rather than bulk throughput.
|
||||
|
||||
\begin{itemize}
|
||||
\item \textbf{Method:} One qperf instance per CPU core in parallel, each
|
||||
running for 30 seconds. Bandwidth from all cores is summed per second.
|
||||
\item \textbf{Metrics:} Total bandwidth (Mbps), CPU usage, time to first
|
||||
byte (TTFB), connection establishment time.
|
||||
\end{itemize}
|
||||
Spawns one qperf process per CPU core, each running for 30 seconds.
|
||||
Per-core bandwidth is summed per second. In addition to throughput,
|
||||
QPerf reports time to first byte and connection establishment time,
|
||||
which iPerf3 does not measure.
|
||||
|
||||
\subsection{RIST Video Streaming}
|
||||
|
||||
Measures real-time multimedia streaming performance.
|
||||
|
||||
\begin{itemize}
|
||||
\item \textbf{Method:} The sender generates a 4K (3840$\times$2160) test
|
||||
pattern at 30 fps using ffmpeg with H.264 encoding (ultrafast preset,
|
||||
zerolatency tuning) at 25 Mbps target bitrate. The stream is transmitted
|
||||
over the RIST protocol to a receiver on the target machine for 30 seconds.
|
||||
\item \textbf{Encoding metrics:} Actual bitrate, frame rate, dropped frames.
|
||||
\item \textbf{Network metrics:} Packets dropped, packets recovered via
|
||||
RIST retransmission, RTT, quality score (0--100), received bitrate.
|
||||
\end{itemize}
|
||||
|
||||
RIST (Reliable Internet Stream Transport) is a protocol designed for
|
||||
low-latency video contribution over unreliable networks, making it a
|
||||
realistic test of VPN behavior under multimedia workloads.
|
||||
Generates a 4K ($3840\times2160$) H.264 test pattern at 30\,fps
|
||||
(ultrafast preset, zerolatency tuning, 25\,Mbps bitrate cap) with
|
||||
ffmpeg and transmits it over the RIST protocol for 30 seconds. Because
|
||||
the synthetic test pattern is highly compressible, the actual encoding
|
||||
bitrate is approximately 3.3\,Mbps, well below the configured cap. RIST
|
||||
(Reliable Internet Stream Transport) is a protocol for low-latency
|
||||
video contribution over unreliable networks. The benchmark records
|
||||
encoding-side statistics (actual bitrate, frame rate, dropped frames)
|
||||
and RIST-specific counters (packets recovered via retransmission,
|
||||
quality score).
|
||||
|
||||
\subsection{Nix Cache Download}
|
||||
|
||||
Measures sustained download performance using a real-world workload.
|
||||
|
||||
\begin{itemize}
|
||||
\item \textbf{Method:} A Harmonia Nix binary cache server on the target
|
||||
machine serves the Firefox package. The client downloads it via
|
||||
\texttt{nix copy} through the VPN. Benchmarked with hyperfine:
|
||||
1 warmup run followed by 2 timed runs. The local cache and Nix's
|
||||
SQLite metadata are cleared between runs.
|
||||
\item \textbf{Metrics:} Mean duration (seconds), standard deviation,
|
||||
min/max duration.
|
||||
\end{itemize}
|
||||
|
||||
This benchmark tests realistic HTTP traffic patterns and sustained
|
||||
sequential download performance, complementing the synthetic throughput
|
||||
tests.
|
||||
A Harmonia Nix binary cache server on the target machine serves the
|
||||
Firefox package. The client downloads it via \texttt{nix copy}
|
||||
through the VPN. Unlike the iPerf3 tests, this workload issues many
|
||||
short-lived HTTP requests instead of a single bulk transfer.
|
||||
Benchmarked with hyperfine (1 warmup run, 2 timed runs); the local
|
||||
Nix store and SQLite metadata are cleared between runs.
|
||||
|
||||
\section{Network Impairment Profiles}
|
||||
|
||||
Four impairment profiles simulate a range of network conditions, from
|
||||
ideal to severely degraded. Impairments are applied via Linux traffic
|
||||
control (\texttt{tc netem}) on every machine's primary interface.
|
||||
Table~\ref{tab:impairment_profiles} shows the per-machine values;
|
||||
effective round-trip impairment is approximately doubled.
|
||||
Four impairment profiles simulate progressively worse network
|
||||
conditions, from an unmodified baseline to a severely degraded link.
|
||||
All impairments are injected with Linux traffic control
|
||||
(\texttt{tc netem}) on the egress side of every machine's primary
|
||||
interface.
|
||||
Table~\ref{tab:impairment_profiles} lists the per-machine values.
|
||||
Because impairments are applied on both ends of a connection, the
|
||||
effective round-trip impact is roughly double the listed values.
|
||||
|
||||
\begin{table}[H]
|
||||
\centering
|
||||
@@ -208,30 +186,50 @@ effective round-trip impairment is approximately doubled.
|
||||
\begin{tabular}{lccccc}
|
||||
\hline
|
||||
\textbf{Profile} & \textbf{Latency} & \textbf{Jitter} &
|
||||
\textbf{Loss} & \textbf{Reorder} & \textbf{Correlation} \\
|
||||
\textbf{Loss} & \textbf{Reorder} & \textbf{Correlation} \\
|
||||
\hline
|
||||
Baseline & --- & --- & --- & --- & --- \\
|
||||
Baseline & - & - & - & - & - \\
|
||||
Low & 2 ms & 2 ms & 0.25\% & 0.5\% & 25\% \\
|
||||
Medium & 4 ms & 7 ms & 1.0\% & 2.5\% & 50\% \\
|
||||
High & 12 ms & 30 ms & 5.0\% & 10\% & 50\% \\
|
||||
High & 6 ms & 15 ms & 2.5\% & 5\% & 50\% \\
|
||||
\hline
|
||||
\end{tabular}
|
||||
\end{table}
|
||||
|
||||
The ``Low'' profile approximates a well-provisioned continental
|
||||
connection, ``Medium'' represents intercontinental links or congested
|
||||
networks, and ``High'' simulates severely degraded conditions such as
|
||||
satellite links or highly congested mobile networks.
|
||||
Each column in Table~\ref{tab:impairment_profiles} controls one
|
||||
aspect of the simulated degradation:
|
||||
|
||||
\begin{itemize}
|
||||
\item \textbf{Latency} is a constant delay added to every outgoing
|
||||
packet. For example, 2\,ms on each machine adds roughly 4\,ms to
|
||||
the round trip.
|
||||
\item \textbf{Jitter} introduces random variation on top of the
|
||||
fixed latency. A packet on the Low profile may see anywhere
|
||||
between 0 and 4\,ms of total added delay instead of exactly
|
||||
2\,ms.
|
||||
\item \textbf{Loss} is the fraction of packets that are silently
|
||||
dropped. At 0.25\,\% (Low profile), roughly 1 in 400 packets is
|
||||
discarded.
|
||||
\item \textbf{Reorder} is the fraction of packets that arrive out
|
||||
of sequence. \texttt{tc netem} achieves this by giving selected
|
||||
packets a shorter delay than their predecessors, so they overtake
|
||||
earlier packets.
|
||||
\item \textbf{Correlation} determines whether impairment events are
|
||||
independent or bursty. At 0\,\%, each packet's fate is decided
|
||||
independently. At higher values, a packet that was lost or
|
||||
reordered raises the probability that the next packet suffers the
|
||||
same fate, producing the burst patterns typical of real networks.
|
||||
\end{itemize}
|
||||
|
||||
A 30-second stabilization period follows TC application before
|
||||
measurements begin, allowing queuing disciplines to settle.
|
||||
measurements begin so that queuing disciplines can settle.
|
||||
|
||||
\section{Experimental Procedure}
|
||||
|
||||
\subsection{Automation}
|
||||
|
||||
The benchmark suite is fully automated via a Python orchestrator
|
||||
(\texttt{vpn\_bench/}). For each VPN under test, the orchestrator:
|
||||
A Python orchestrator (\texttt{vpn\_bench/}) automates the full
|
||||
benchmark suite. For each VPN under test, it:
|
||||
|
||||
\begin{enumerate}
|
||||
\item Cleans all state directories from previous VPN runs
|
||||
@@ -244,12 +242,70 @@ The benchmark suite is fully automated via a Python orchestrator
|
||||
\begin{enumerate}
|
||||
\item Applies TC rules via context manager (guarantees cleanup)
|
||||
\item Waits 30 seconds for stabilization
|
||||
\item Executes all benchmarks
|
||||
\item Executes each benchmark three times sequentially,
|
||||
once per machine pair: $A\to B$, then
|
||||
$B\to C$, lastly $C\to A$
|
||||
\item Clears TC rules
|
||||
\end{enumerate}
|
||||
\item Collects results and metadata
|
||||
\end{enumerate}
|
||||
|
||||
Figure~\ref{fig:orchestrator_flow} illustrates this procedure as a
|
||||
flowchart.
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\begin{tikzpicture}[
|
||||
box/.style={
|
||||
draw, rounded corners, minimum width=4.8cm, minimum height=0.9cm,
|
||||
font=\small, align=center, fill=white
|
||||
},
|
||||
decision/.style={
|
||||
draw, diamond, aspect=2.5, minimum width=3cm,
|
||||
font=\small, align=center, fill=white, inner sep=1pt
|
||||
},
|
||||
arr/.style={->, thick},
|
||||
every node/.style={font=\small}
|
||||
]
|
||||
% Main flow
|
||||
\node[box] (clean) at (0, 0) {Clean state directories};
|
||||
\node[box] (deploy) at (0, -1.5) {Deploy VPN via Clan};
|
||||
\node[box] (restart) at (0, -3) {Restart VPN services\\(up to 3 attempts)};
|
||||
\node[box] (verify) at (0, -4.5) {Verify connectivity\\(120\,s timeout)};
|
||||
|
||||
% Inner loop
|
||||
\node[decision] (profile) at (0, -6.3) {Next impairment\\profile?};
|
||||
\node[box] (tc) at (0, -8.3) {Apply TC rules};
|
||||
\node[box] (wait) at (0, -9.8) {Wait 30\,s};
|
||||
\node[box] (bench) at (0, -11.3) {Run benchmarks\\$A{\to}B,\;
|
||||
B{\to}C,\; C{\to}A$};
|
||||
\node[box] (clear) at (0, -12.8) {Clear TC rules};
|
||||
|
||||
% After loop
|
||||
\node[box] (collect) at (0, -14.8) {Collect results};
|
||||
|
||||
% Arrows -- main spine
|
||||
\draw[arr] (clean) -- (deploy);
|
||||
\draw[arr] (deploy) -- (restart);
|
||||
\draw[arr] (restart) -- (verify);
|
||||
\draw[arr] (verify) -- (profile);
|
||||
\draw[arr] (profile) -- node[right] {yes} (tc);
|
||||
\draw[arr] (tc) -- (wait);
|
||||
\draw[arr] (wait) -- (bench);
|
||||
\draw[arr] (bench) -- (clear);
|
||||
|
||||
% Loop back
|
||||
\draw[arr] (clear) -- ++(3.8, 0) |- (profile);
|
||||
|
||||
% Exit loop
|
||||
\draw[arr] (profile) -- ++(-3.2, 0) node[above, pos=0.3] {no}
|
||||
|- (collect);
|
||||
\end{tikzpicture}
|
||||
\caption{Flowchart of the benchmark orchestrator procedure for a
|
||||
single VPN}
|
||||
\label{fig:orchestrator_flow}
|
||||
\end{figure}
|
||||
|
||||
\subsection{Retry Logic}
|
||||
|
||||
Tests use a retry wrapper with up to 2 retries (3 total attempts),
|
||||
@@ -262,22 +318,92 @@ be identified during analysis.
|
||||
Each metric is summarized as a statistics dictionary containing:
|
||||
|
||||
\begin{itemize}
|
||||
\item \textbf{min / max:} Extreme values observed
|
||||
\item \textbf{average:} Arithmetic mean across samples
|
||||
\item \textbf{p25 / p50 / p75:} Quartiles via \texttt{statistics.quantiles()}
|
||||
\bitem{min / max:} Extreme values observed
|
||||
\bitem{average:} Arithmetic mean across samples
|
||||
\bitem{p25 / p50 / p75:} Quartiles via Python's
|
||||
\texttt{statistics.quantiles()} method
|
||||
\end{itemize}
|
||||
|
||||
Multi-run tests (ping, nix-cache) aggregate across runs. Per-second
|
||||
tests (qperf, RIST) aggregate across all per-second samples.
|
||||
Aggregation differs by benchmark type. Benchmarks that execute
|
||||
multiple discrete runs, ping (3 runs of 100 packets each) and
|
||||
nix-cache (2 timed runs via hyperfine), first compute statistics
|
||||
within each run, then aggregate across runs: averages and percentiles
|
||||
are averaged, while the reported minimum and maximum are the global
|
||||
extremes across all runs. Concretely, if ping produces three runs
|
||||
with mean RTTs of 5.1, 5.3, and 5.0\,ms, the reported average is
|
||||
the mean of those three values (5.13\,ms). The reported minimum is
|
||||
the single lowest RTT observed across all three runs.
|
||||
|
||||
The approach uses empirical percentiles rather than parametric
|
||||
confidence intervals, which is appropriate for benchmark data that
|
||||
may not follow a normal distribution. The nix-cache test (via hyperfine)
|
||||
additionally reports standard deviation.
|
||||
Benchmarks that produce continuous per-second samples, qperf and
|
||||
RIST streaming for example, pool all per-second measurements from a single
|
||||
execution into one series before computing statistics. For qperf,
|
||||
bandwidth is first summed across CPU cores for each second, and
|
||||
statistics are then computed over the resulting time series.
|
||||
|
||||
The analysis reports empirical percentiles (p25, p50, p75) alongside
|
||||
min/max bounds rather than parametric confidence intervals.
|
||||
Benchmark latency and throughput distributions are often skewed or
|
||||
multimodal, so parametric assumptions of normality would be
|
||||
unreliable. The interquartile range (p25--p75) conveys the spread of
|
||||
typical observations, while min and max capture outlier behavior.
|
||||
The nix-cache benchmark additionally reports standard deviation via
|
||||
hyperfine's built-in statistical output.
|
||||
|
||||
\section{Source Code Analysis}
|
||||
|
||||
We also conducted a structured source code analysis of all ten VPN
|
||||
implementations. The analysis followed three phases.
|
||||
|
||||
\subsection{Repository Collection and LLM-Assisted Overview}
|
||||
|
||||
The latest main branch of each VPN's git repository was cloned,
|
||||
together with key dependencies that implement core functionality
|
||||
outside the main repository. For example, Yggdrasil delegates its
|
||||
routing and cryptographic operations to the Ironwood library, which
|
||||
was analyzed alongside the main codebase.
|
||||
|
||||
Ten LLM agents (Claude Code) were then spawned in parallel, one per
|
||||
VPN. Each agent was instructed to read the full source tree and
|
||||
produce an \texttt{overview.md} file documenting the following
|
||||
aspects:
|
||||
|
||||
\begin{itemize}
|
||||
\item Wire protocol and message framing
|
||||
\item Encryption scheme and key exchange
|
||||
\item Packet handling and performance
|
||||
\item NAT traversal mechanism
|
||||
\item Local routing and peer discovery
|
||||
\item Security features and access control
|
||||
\item Resilience / Central Point of Failure
|
||||
\end{itemize}
|
||||
|
||||
Each agent was required to reference the specific file and line
|
||||
range supporting every claim so that outputs could be verified
|
||||
against the source.
|
||||
|
||||
\subsection{Manual Verification}
|
||||
|
||||
The LLM-generated overviews served as a navigational aid rather than
|
||||
a trusted source. The most important code paths identified in each
|
||||
overview were manually read and verified against the actual source
|
||||
code. Where the automated summaries were inaccurate or superficial,
|
||||
they were corrected and expanded.
|
||||
|
||||
\subsection{Feature Matrix and Maintainer Review}
|
||||
|
||||
The findings from both phases were consolidated into a feature matrix
|
||||
of 131 features across all ten VPN implementations, covering protocol
|
||||
characteristics, cryptographic primitives, NAT traversal strategies,
|
||||
routing behavior, and security properties.
|
||||
|
||||
The completed feature matrix was published and sent to the respective
|
||||
VPN maintainers for review. We incorporated their feedback as
|
||||
corrections and clarifications to the final classification.
|
||||
|
||||
\section{Reproducibility}
|
||||
|
||||
Reproducibility is ensured at every layer of the experimental stack.
|
||||
The experimental stack pins or declares the variables that could
|
||||
affect results.
|
||||
|
||||
\subsection{Dependency Pinning}
|
||||
|
||||
@@ -286,16 +412,17 @@ cryptographic hashes (\texttt{narHash}) and commit SHAs for each input.
|
||||
Key pinned inputs include:
|
||||
|
||||
\begin{itemize}
|
||||
\item \textbf{nixpkgs:} Follows \texttt{clan-core/nixpkgs}, ensuring a
|
||||
single version across the dependency graph
|
||||
\item \textbf{clan-core:} The Clan framework, pinned to a specific commit
|
||||
\item \textbf{VPN sources:} Hyprspace, EasyTier, Nebula locked to exact commits
|
||||
\item \textbf{Build infrastructure:} flake-parts, treefmt-nix, disko,
|
||||
\bitem{nixpkgs:} Follows \texttt{clan-core/nixpkgs}, so a single
|
||||
version is used across the dependency graph
|
||||
\bitem{clan-core:} The Clan framework, pinned to a specific commit
|
||||
\bitem{VPN sources:} Hyprspace, EasyTier, Nebula locked to
|
||||
exact commits
|
||||
\bitem{Build infrastructure:} flake-parts, treefmt-nix, disko,
|
||||
nixos-facter-modules
|
||||
\end{itemize}
|
||||
|
||||
Custom packages not in nixpkgs (qperf, VpnCloud, iperf with auth patches,
|
||||
phantun, EasyTier, Hyprspace) are built from source within the flake.
|
||||
EasyTier, Hyprspace) are built from source within the flake.
|
||||
|
||||
\subsection{Declarative System Configuration}
|
||||
|
||||
@@ -312,7 +439,7 @@ configuration entirely or rolls back.
|
||||
Clan's inventory system maps machines to service roles declaratively.
|
||||
For each VPN, the orchestrator writes an inventory entry assigning
|
||||
machines to roles (e.g., Nebula lighthouse vs.\ peer). The Clan module
|
||||
system translates this into NixOS configuration---systemd services,
|
||||
system translates this into NixOS configuration; systemd services,
|
||||
firewall rules, peer lists, and key references. The same inventory
|
||||
entry always produces the same NixOS configuration.
|
||||
|
||||
@@ -325,67 +452,110 @@ benchmark directories. This prevents cross-contamination between tests.
|
||||
|
||||
\subsection{Data Provenance}
|
||||
|
||||
Every test result includes metadata recording:
|
||||
Results are organized in the four-level directory hierarchy shown in
|
||||
Figure~\ref{fig:result-tree}. Each VPN directory stores a
|
||||
\texttt{layout.json} capturing the machine topology used for that run.
|
||||
Each impairment profile directory records the exact \texttt{tc}
|
||||
parameters in \texttt{tc\_settings.json} and per-phase durations in
|
||||
\texttt{timing\_breakdown.json}. Individual benchmark results are
|
||||
stored in one subdirectory per machine pair.
|
||||
|
||||
\begin{figure}[ht]
|
||||
\centering
|
||||
\begin{forest}
|
||||
for tree={
|
||||
font=\ttfamily\small,
|
||||
grow'=0,
|
||||
folder,
|
||||
s sep=2pt,
|
||||
inner xsep=3pt,
|
||||
inner ysep=2pt,
|
||||
}
|
||||
[date/
|
||||
[vpn/
|
||||
[layout.json]
|
||||
[profile/
|
||||
[tc\_settings.json]
|
||||
[timing\_breakdown.json]
|
||||
[parallel\_tcp\_iperf3.json]
|
||||
[\textnormal{\textit{\{pos\}\_\{peer\}}}/
|
||||
[ping.json]
|
||||
[tcp\_iperf3.json]
|
||||
[udp\_iperf3.json]
|
||||
[qperf.json]
|
||||
[rist\_stream.json]
|
||||
[nix\_cache.json]
|
||||
[connection\_timings.json]
|
||||
]
|
||||
]
|
||||
]
|
||||
[General/
|
||||
[hardware.json]
|
||||
[comparison/
|
||||
[cross\_profile\_*.json]
|
||||
[profile/
|
||||
[benchmark\_stats.json]
|
||||
[per-benchmark .json files]
|
||||
]
|
||||
]
|
||||
]
|
||||
]
|
||||
\end{forest}
|
||||
\caption{Directory hierarchy of benchmark results. Each run produces
|
||||
per-VPN and per-profile directories alongside a \texttt{General/}
|
||||
directory with cross-VPN comparison data.}
|
||||
\label{fig:result-tree}
|
||||
\end{figure}
|
||||
|
||||
Every benchmark result file uses a uniform JSON envelope with a
|
||||
\texttt{status} field, a \texttt{data} object holding the
|
||||
test-specific payload, and a \texttt{meta} object recording
|
||||
wall-clock duration, number of attempts, VPN restart count and
|
||||
duration, connectivity wait time, source and target machine names,
|
||||
and on failure, the relevant service logs.
|
||||
|
||||
\section{VPNs Under Test}
|
||||
|
||||
VPNs were selected based on:
|
||||
\begin{itemize}
|
||||
\item Wall-clock duration
|
||||
\item Number of attempts (1 = first try succeeded)
|
||||
\item VPN restart attempts and duration
|
||||
\item Connectivity wait duration
|
||||
\item Source and target machine names
|
||||
\item Service logs (on failure)
|
||||
\bitem{NAT traversal capability:} All selected VPNs can establish
|
||||
connections between peers behind NAT without manual port forwarding.
|
||||
\bitem{Decentralization:} Preference for solutions without mandatory
|
||||
central servers, though coordinated-mesh VPNs were included for comparison.
|
||||
\bitem{Active development:} Only VPNs with recent commits and
|
||||
maintained releases were considered (with the exception of VpnCloud).
|
||||
\bitem{Linux support:} All VPNs must run on Linux.
|
||||
\end{itemize}
|
||||
|
||||
Results are organized hierarchically by VPN, TC profile, and machine
|
||||
pair. Each profile directory contains a \texttt{tc\_settings.json}
|
||||
snapshot of the exact impairment parameters applied.
|
||||
Table~\ref{tab:vpn_selection} lists the ten VPN implementations
|
||||
selected for evaluation.
|
||||
|
||||
\section{Related Work}
|
||||
|
||||
\subsection{Nix: A Safe and Policy-Free System for Software Deployment}
|
||||
|
||||
Nix addresses significant issues in software deployment by utilizing
|
||||
cryptographic hashes to ensure unique paths for component instances
|
||||
\cite{dolstra_nix_2004}. Features such as concurrent installation of
|
||||
multiple versions, atomic upgrades, and safe garbage collection make
|
||||
Nix a flexible deployment system. This work uses Nix to ensure that
|
||||
all VPN builds and system configurations are deterministic.
|
||||
|
||||
\subsection{NixOS: A Purely Functional Linux Distribution}
|
||||
|
||||
NixOS extends Nix principles to Linux system configuration
|
||||
\cite{dolstra_nixos_2008}. System configurations are reproducible and
|
||||
isolated from stateful interactions typical in imperative package
|
||||
management. This property is essential for ensuring identical test
|
||||
environments across benchmark runs.
|
||||
|
||||
\subsection{A Comparative Study on Virtual Private Networks}
|
||||
|
||||
Lackorzynski et al.\ \cite{lackorzynski_comparative_2019} evaluate
|
||||
VPN protocols in the context of industrial communication systems (Industry 4.0),
|
||||
benchmarking OpenVPN, IPSec, Tinc, Freelan, MACsec, and WireGuard.
|
||||
Their analysis focuses on point-to-point protocol performance---throughput,
|
||||
latency, and CPU overhead---rather than overlay network behavior.
|
||||
In contrast, this thesis evaluates VPNs that provide a full data plane
|
||||
with peer-to-peer connectivity, NAT traversal, and dynamic peer discovery.
|
||||
|
||||
|
||||
\subsection{Full-Mesh VPN Performance Evaluation}
|
||||
|
||||
Kjorveziroski et al.\ \cite{kjorveziroski_full-mesh_2024} provide a
|
||||
comprehensive evaluation of full-mesh VPN solutions for distributed
|
||||
systems. Their benchmarks analyze throughput, reliability under packet
|
||||
loss, and relay behavior for VPNs including ZeroTier.
|
||||
|
||||
This thesis extends their work in several ways:
|
||||
\begin{itemize}
|
||||
\item Broader VPN selection with emphasis on fully decentralized
|
||||
architectures
|
||||
\item Real-world workloads (video streaming, package downloads)
|
||||
beyond synthetic iperf3 tests
|
||||
\item Multiple impairment profiles to characterize behavior under
|
||||
varying network conditions
|
||||
\item Fully reproducible experimental framework via Nix/NixOS/Clan
|
||||
\end{itemize}
|
||||
\begin{table}[H]
|
||||
\centering
|
||||
\caption{VPN implementations included in the benchmark}
|
||||
\label{tab:vpn_selection}
|
||||
\begin{tabular}{lll}
|
||||
\hline
|
||||
\textbf{VPN} & \textbf{Architecture} & \textbf{Notes} \\
|
||||
\hline
|
||||
Tailscale (Headscale) & Coordinated mesh & Open-source
|
||||
coordination server \\
|
||||
ZeroTier & Coordinated mesh & Global virtual Ethernet \\
|
||||
Nebula & Coordinated mesh & Slack's overlay network \\
|
||||
Tinc & Fully decentralized & Established since 1998 \\
|
||||
Yggdrasil & Fully decentralized & Spanning-tree routing \\
|
||||
Mycelium & Fully decentralized & End-to-end encrypted IPv6 overlay \\
|
||||
Hyprspace & Fully decentralized & libp2p-based, IPFS-compatible \\
|
||||
EasyTier & Fully decentralized & Rust-based, multi-protocol \\
|
||||
VpnCloud & Fully decentralized & Lightweight, kernel bypass option \\
|
||||
WireGuard & Point-to-point & Reference baseline (not a mesh VPN) \\
|
||||
\hline
|
||||
Internal (no VPN) & N/A & Baseline for raw network performance \\
|
||||
\hline
|
||||
\end{tabular}
|
||||
\end{table}
|
||||
|
||||
WireGuard is not a mesh VPN but is included as a reference point.
|
||||
Comparing its overhead to the mesh VPNs isolates the cost of mesh
|
||||
coordination and NAT traversal.
|
||||
|
||||
|
||||
@@ -1,165 +0,0 @@
|
||||
\chapter{Motivation} % Main chapter title
|
||||
|
||||
\label{Motivation}
|
||||
|
||||
Peer-to-peer architectures promise censorship-resistant, fault-tolerant
|
||||
infrastructure by eliminating single points of failure \cite{shukla_towards_2021}.
|
||||
These architectures underpin a growing range of systems---from IoT edge computing
|
||||
and content delivery networks to blockchain platforms like Ethereum.
|
||||
Yet realizing these benefits requires distributing nodes across
|
||||
genuinely diverse hosting entities.
|
||||
|
||||
In practice, this diversity remains illusory.
|
||||
Amazon, Hetzner, and OVH collectively host 70\% of all Ethereum nodes
|
||||
(see Figure~\ref{fig:ethernodes_hosting}),
|
||||
concentrating nominally decentralized infrastructure
|
||||
within a handful of cloud providers.
|
||||
More concerning, these providers operate under overlapping regulatory jurisdictions,
|
||||
predominantly the United States and the European Union.
|
||||
This concentration undermines technical sovereignty:
|
||||
a single governmental action could compel service termination,
|
||||
data disclosure, or traffic manipulation across a majority of the network.
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\includegraphics[width=1\textwidth]{Figures/ethernodes_hosting.png}
|
||||
\caption{Distribution of Ethereum nodes hosted by various providers
|
||||
\cite{noauthor_isps_nodate}}
|
||||
\label{fig:ethernodes_hosting}
|
||||
\end{figure}
|
||||
|
||||
Why does this centralization persist despite the explicit goals of decentralization?
|
||||
The answer lies in the practical barriers to self-hosting.
|
||||
Cloud providers offer static IP addresses and publicly routable endpoints,
|
||||
eliminating the networking complexity that plagues residential and
|
||||
small-office deployments.
|
||||
Most internet-connected devices sit behind Network Address Translation (NAT),
|
||||
which prevents incoming connections without explicit port forwarding
|
||||
or relay infrastructure.
|
||||
Combined with dynamic IP assignments from ISPs, maintaining stable peer connectivity
|
||||
from self-hosted infrastructure traditionally required significant technical expertise.
|
||||
|
||||
Overlay VPNs offer a solution to this fundamental barrier.
|
||||
By establishing encrypted tunnels that traverse NAT boundaries,
|
||||
mesh VPNs enable direct peer-to-peer connectivity without requiring
|
||||
static IP addresses or manual firewall configuration.
|
||||
Each node receives a stable virtual address within the overlay network,
|
||||
regardless of its underlying network topology.
|
||||
This capability is transformative:
|
||||
it allows a device behind consumer-grade NAT to participate
|
||||
as a first-class peer in a distributed system,
|
||||
removing the primary technical advantage that cloud providers hold.
|
||||
|
||||
The Clan deployment framework builds on this foundation.
|
||||
Clan leverages Nix and NixOS to eliminate entire classes of
|
||||
configuration errors prevalent in contemporary infrastructure deployment,
|
||||
reducing operational overhead to a degree where a single administrator
|
||||
can reliably self-host complex distributed services.
|
||||
Overlay VPNs are central to Clan's architecture,
|
||||
providing the secure peer connectivity that enables nodes
|
||||
to form cohesive networks regardless of their physical location or NAT situation.
|
||||
As illustrated in Figure~\ref{fig:vision-stages}, Clan envisions
|
||||
a web interface that enables users to design and deploy private P2P networks
|
||||
with minimal configuration, assisted by an integrated LLM
|
||||
for contextual guidance and troubleshooting.
|
||||
|
||||
During the development of Clan, a recurring challenge became apparent:
|
||||
practitioners held divergent preferences for mesh VPN solutions,
|
||||
each citing different edge cases where their chosen VPN
|
||||
proved unreliable or lacked essential features.
|
||||
These discussions were largely grounded in anecdotal evidence
|
||||
rather than systematic evaluation.
|
||||
This observation revealed a clear need for rigorous,
|
||||
evidence-based comparison of peer-to-peer overlay VPN implementations.
|
||||
|
||||
Existing research on this topic remains sparse.
|
||||
One notable work from 2024, ``Full-mesh VPN performance evaluation
|
||||
for a secure edge-cloud continuum'' \cite{kjorveziroski_full-mesh_2024},
|
||||
benchmarks a subset of mesh VPNs but focuses primarily
|
||||
on solutions with a central point of failure.
|
||||
In contrast, this thesis evaluates more widely adopted mesh VPNs
|
||||
with an emphasis on fully decentralized architectures.
|
||||
Furthermore, that study relied exclusively on iperf3 for performance measurement,
|
||||
whereas our benchmark suite includes real-world workloads
|
||||
to better reflect practical usage patterns.
|
||||
|
||||
A further motivation was to create a fully automated benchmarking framework
|
||||
capable of generating a public leaderboard,
|
||||
similar in spirit to the js-framework-benchmark
|
||||
(see Figure~\ref{fig:js-framework-benchmark}).
|
||||
By providing an accessible web interface with regularly updated results,
|
||||
we hope to encourage P2P VPN developers to optimize their implementations
|
||||
in pursuit of top rankings.
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\includegraphics[width=1\textwidth]{Figures/krause-js-framework.png}
|
||||
\caption{js-framework-benchmark results for Chrome 144.0
|
||||
\cite{krause_krausestjs-framework-benchmark_2026}}
|
||||
\label{fig:js-framework-benchmark}
|
||||
\end{figure}
|
||||
|
||||
\begin{figure}[h]
|
||||
\centering
|
||||
|
||||
% Row 1
|
||||
\begin{subfigure}{0.45\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{Figures/vision/stage1.png}
|
||||
\caption{Stage 1}
|
||||
\end{subfigure}
|
||||
\hfill
|
||||
\begin{subfigure}{0.45\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{Figures/vision/stage2.png}
|
||||
\caption{Stage 2}
|
||||
\end{subfigure}
|
||||
|
||||
\vspace{1em} % Add spacing between rows
|
||||
|
||||
% Row 2
|
||||
\begin{subfigure}{0.45\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{Figures/vision/stage3.png}
|
||||
\caption{Stage 3}
|
||||
\end{subfigure}
|
||||
\hfill
|
||||
\begin{subfigure}{0.45\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{Figures/vision/stage4.png}
|
||||
\caption{Stage 4}
|
||||
\end{subfigure}
|
||||
|
||||
\vspace{1em} % Add spacing between rows
|
||||
|
||||
% Row 3
|
||||
\begin{subfigure}{0.45\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{Figures/vision/stage5.png}
|
||||
\caption{Stage 5}
|
||||
\end{subfigure}
|
||||
\hfill
|
||||
\begin{subfigure}{0.45\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{Figures/vision/stage6.png}
|
||||
\caption{Stage 6}
|
||||
\end{subfigure}
|
||||
|
||||
\vspace{1em} % Add spacing between rows
|
||||
|
||||
% Row 4
|
||||
\begin{subfigure}{0.45\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{Figures/vision/stage7.png}
|
||||
\caption{Stage 7}
|
||||
\end{subfigure}
|
||||
\hfill
|
||||
\begin{subfigure}{0.45\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\linewidth]{Figures/vision/stage8.png}
|
||||
\caption{Stage 8}
|
||||
\end{subfigure}
|
||||
|
||||
\caption{Visionary Webinterface to Setup a Clan Family Network}
|
||||
\label{fig:vision-stages}
|
||||
\end{figure}
|
||||
885
Chapters/Results.tex
Normal file
@@ -0,0 +1,885 @@
|
||||
% Chapter Template
|
||||
|
||||
\chapter{Results} % Main chapter title
|
||||
|
||||
\label{Results}
|
||||
|
||||
This chapter presents the results of the benchmark suite across all
|
||||
ten VPN implementations and the internal baseline. The structure
|
||||
follows the impairment profiles from ideal to degraded:
|
||||
Section~\ref{sec:baseline} establishes overhead under ideal
|
||||
conditions, then subsequent sections examine how each VPN responds to
|
||||
increasing network impairment. The chapter concludes with findings
|
||||
from the source code analysis. A recurring theme is that no single
|
||||
metric captures VPN
|
||||
performance; the rankings shift
|
||||
depending on whether one measures throughput, latency, retransmit
|
||||
behavior, or real-world application performance.
|
||||
|
||||
\section{Baseline Performance}
|
||||
\label{sec:baseline}
|
||||
|
||||
The baseline impairment profile introduces no artificial loss or
|
||||
reordering, so any performance gap between VPNs can be attributed to
|
||||
the VPN itself. Throughout the plots in this section, the
|
||||
\emph{internal} bar marks a direct host-to-host connection with no VPN
|
||||
in the path; it represents the best the hardware can do. On its own,
|
||||
this link delivers 934\,Mbps on a single TCP stream and a round-trip
|
||||
latency of just
|
||||
0.60\,ms. WireGuard comes remarkably close to these numbers, reaching
|
||||
92.5\,\% of bare-metal throughput with only a single retransmit across
|
||||
an entire 30-second test. Mycelium sits at the other extreme, adding
|
||||
34.9\,ms of latency, roughly 58$\times$ the bare-metal figure.
|
||||
|
||||
\subsection{Test Execution Overview}
|
||||
|
||||
Running the full baseline suite across all ten VPNs and the internal
|
||||
reference took just over four hours. The bulk of that time, about
|
||||
2.6~hours (63\,\%), was spent on actual benchmark execution; VPN
|
||||
installation and deployment accounted for another 45~minutes (19\,\%),
|
||||
and roughly 21~minutes (9\,\%) went to waiting for VPN tunnels to come
|
||||
up after restarts. The remaining time was consumed by VPN service restarts
|
||||
and traffic-control (tc) stabilization.
|
||||
Figure~\ref{fig:test_duration} breaks this down per VPN.
|
||||
|
||||
Most VPNs completed every benchmark without issues, but four failed
|
||||
one test each: Nebula and Headscale timed out on the qperf
|
||||
QUIC performance benchmark after six retries, while Hyprspace and
|
||||
Mycelium failed the UDP iPerf3 test
|
||||
with a 120-second timeout. Their individual success rate is
|
||||
85.7\,\%, with all other VPNs passing the full suite
|
||||
(Figure~\ref{fig:success_rate}).
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\begin{subfigure}[t]{1.0\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{{Figures/baseline/Average Test
|
||||
Duration per Machine}.png}
|
||||
\caption{Average test duration per VPN, including installation
|
||||
time and benchmark execution}
|
||||
\label{fig:test_duration}
|
||||
\end{subfigure}
|
||||
|
||||
\vspace{1em}
|
||||
|
||||
\begin{subfigure}[t]{1.0\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{{Figures/baseline/Benchmark
|
||||
Success Rate}.png}
|
||||
\caption{Benchmark success rate across all seven tests}
|
||||
\label{fig:success_rate}
|
||||
\end{subfigure}
|
||||
\caption{Test execution overview. Hyprspace has the longest average
|
||||
duration due to UDP timeouts and long VPN connectivity
|
||||
waits. WireGuard completes fastest. Nebula, Headscale,
|
||||
Hyprspace, and Mycelium each fail one benchmark.}
|
||||
\label{fig:test_overview}
|
||||
\end{figure}
|
||||
|
||||
\subsection{TCP Throughput}
|
||||
|
||||
Each VPN ran a single-stream iPerf3 session for 30~seconds on every
|
||||
link direction (lom$\rightarrow$yuki, yuki$\rightarrow$luna,
|
||||
luna$\rightarrow$lom); Table~\ref{tab:tcp_baseline} shows the
|
||||
averages. Three distinct performance tiers emerge, separated by
|
||||
natural gaps in the data.
|
||||
|
||||
\begin{table}[H]
|
||||
\centering
|
||||
\caption{Single-stream TCP throughput at baseline, sorted by
|
||||
throughput. Retransmits are averaged per 30-second test across
|
||||
all three link directions. The horizontal rules separate the
|
||||
three performance tiers.}
|
||||
\label{tab:tcp_baseline}
|
||||
\begin{tabular}{lrrr}
|
||||
\hline
|
||||
\textbf{VPN} & \textbf{Throughput (Mbps)} &
|
||||
\textbf{Baseline (\%)} & \textbf{Retransmits} \\
|
||||
\hline
|
||||
Internal & 934 & 100.0 & 1.7 \\
|
||||
WireGuard & 864 & 92.5 & 1 \\
|
||||
ZeroTier & 814 & 87.2 & 1163 \\
|
||||
Headscale & 800 & 85.6 & 102 \\
|
||||
Yggdrasil & 795 & 85.1 & 75 \\
|
||||
\hline
|
||||
Nebula & 706 & 75.6 & 955 \\
|
||||
EasyTier & 636 & 68.1 & 537 \\
|
||||
VpnCloud & 539 & 57.7 & 857 \\
|
||||
\hline
|
||||
Hyprspace & 368 & 39.4 & 4965 \\
|
||||
Tinc & 336 & 36.0 & 240 \\
|
||||
Mycelium & 259 & 27.7 & 710 \\
|
||||
\hline
|
||||
\end{tabular}
|
||||
\end{table}
|
||||
|
||||
The top tier ($>$80\,\% of baseline) groups WireGuard, ZeroTier,
|
||||
Headscale, and Yggdrasil, all within 15\,\% of the bare-metal link.
|
||||
A middle tier (55--80\,\%) follows with Nebula, EasyTier, and
|
||||
VpnCloud, while Hyprspace, Tinc, and Mycelium occupy the bottom tier
|
||||
at under 40\,\% of baseline.
|
||||
Figure~\ref{fig:tcp_throughput} visualizes this hierarchy.
|
||||
|
||||
Raw throughput alone is incomplete, however. The retransmit column
|
||||
reveals that not all high-throughput VPNs get there cleanly.
|
||||
ZeroTier, for instance, reaches 814\,Mbps but accumulates
|
||||
1\,163~retransmits per test, over 1\,000$\times$ what WireGuard
|
||||
needs. ZeroTier compensates for tunnel-internal packet loss by
|
||||
repeatedly triggering TCP congestion-control recovery, whereas
|
||||
WireGuard sends data once and it arrives. Across all VPNs,
|
||||
retransmit behaviour falls into three groups: \emph{clean} ($<$110:
|
||||
WireGuard, Internal, Yggdrasil, Headscale), \emph{stressed}
|
||||
(200--900: Tinc, EasyTier, Mycelium, VpnCloud), and
|
||||
\emph{pathological} ($>$950: Nebula, ZeroTier, Hyprspace).
|
||||
|
||||
% TODO: Is this naming scheme any good?
|
||||
|
||||
% TODO: Fix TCP Throughput plot
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\begin{subfigure}[t]{\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{{Figures/baseline/tcp/TCP
|
||||
Throughput}.png}
|
||||
\caption{Average single-stream TCP throughput}
|
||||
\label{fig:tcp_throughput}
|
||||
\end{subfigure}
|
||||
|
||||
\vspace{1em}
|
||||
|
||||
\begin{subfigure}[t]{\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{{Figures/baseline/tcp/TCP
|
||||
Retransmit Rate}.png}
|
||||
\caption{Average TCP retransmits per 30-second test (log scale)}
|
||||
\label{fig:tcp_retransmits}
|
||||
\end{subfigure}
|
||||
\caption{TCP throughput and retransmit rate at baseline. WireGuard
|
||||
leads at 864\,Mbps with 1 retransmit. Hyprspace has nearly 5000
|
||||
retransmits per test. The retransmit count does not always track
|
||||
inversely with throughput: ZeroTier achieves high throughput
|
||||
\emph{despite} high retransmits.}
|
||||
\label{fig:tcp_results}
|
||||
\end{figure}
|
||||
|
||||
Retransmits have a direct mechanical relationship with TCP congestion
|
||||
control. Each retransmit triggers a reduction in the congestion window
|
||||
(\texttt{cwnd}), throttling the sender. This relationship is visible
|
||||
in Figure~\ref{fig:retransmit_correlations}: Hyprspace, with 4965
|
||||
retransmits, maintains the smallest average congestion window in the
|
||||
dataset (205\,KB), while Yggdrasil's 75 retransmits allow a 4.3\,MB
|
||||
window, the largest of any VPN. At first glance this suggests a
|
||||
clean inverse correlation between retransmits and congestion window
|
||||
size, but the picture is misleading. Yggdrasil's outsized window is
|
||||
largely an artifact of its jumbo overlay MTU (32\,731 bytes): each
|
||||
segment carries far more data, so the window in bytes is inflated
|
||||
relative to VPNs using a standard ${\sim}$1\,400-byte MTU. Comparing
|
||||
congestion windows across different MTU sizes is not meaningful
|
||||
without normalizing for segment size. What \emph{is} clear is that
|
||||
high retransmit rates force TCP to spend more time in congestion
|
||||
recovery than in steady-state transmission, capping throughput
|
||||
regardless of available bandwidth. ZeroTier illustrates the
|
||||
opposite extreme: brute-force retransmission can still yield high
|
||||
throughput (814\,Mbps with 1\,163 retransmits), at the cost of wasted
|
||||
bandwidth and unstable flow behavior.
|
||||
|
||||
VpnCloud stands out: its sender reports 538.8\,Mbps
|
||||
but the receiver measures only 413.4\,Mbps, leaving a 23\,\% gap (the largest
|
||||
in the dataset). This suggests significant in-tunnel packet loss or
|
||||
buffering at the VpnCloud layer that the retransmit count (857)
|
||||
alone does not fully explain.
|
||||
|
||||
Run-to-run variability also differs substantially. WireGuard ranges
|
||||
from 824 to 884\,Mbps (a 60\,Mbps window), while Mycelium ranges
|
||||
from 122 to 379\,Mbps, a 3:1 ratio between worst and best runs. A
|
||||
VPN with wide variance is harder to capacity-plan around than one
|
||||
with consistent performance, even if the average is lower.
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\begin{subfigure}[t]{\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{Figures/baseline/retransmits-vs-throughput.png}
|
||||
\caption{Retransmits vs.\ throughput}
|
||||
\label{fig:retransmit_throughput}
|
||||
\end{subfigure}
|
||||
|
||||
\vspace{1em}
|
||||
|
||||
\begin{subfigure}[t]{\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{Figures/baseline/retransmits-vs-max-congestion-window.png}
|
||||
\caption{Retransmits vs.\ max congestion window}
|
||||
\label{fig:retransmit_cwnd}
|
||||
\end{subfigure}
|
||||
\caption{Retransmit correlations (log scale on x-axis). High
|
||||
retransmits do not always mean low throughput (ZeroTier: 1\,163
|
||||
retransmits, 814\,Mbps), but extreme retransmits do (Hyprspace:
|
||||
4\,965 retransmits, 368\,Mbps). The apparent inverse correlation
|
||||
between retransmits and congestion window size is dominated by
|
||||
Yggdrasil's outlier (4.3\,MB \texttt{cwnd}), which is inflated
|
||||
by its 32\,KB jumbo overlay MTU rather than by low retransmits
|
||||
alone.}
|
||||
\label{fig:retransmit_correlations}
|
||||
\end{figure}
|
||||
|
||||
\subsection{Latency}
|
||||
|
||||
Sorting by latency rearranges the rankings considerably.
|
||||
Table~\ref{tab:latency_baseline} lists the average ping round-trip
|
||||
times, which cluster into three distinct ranges.
|
||||
|
||||
\begin{table}[H]
|
||||
\centering
|
||||
\caption{Average ping RTT at baseline, sorted by latency}
|
||||
\label{tab:latency_baseline}
|
||||
\begin{tabular}{lr}
|
||||
\hline
|
||||
\textbf{VPN} & \textbf{Avg RTT (ms)} \\
|
||||
\hline
|
||||
Internal & 0.60 \\
|
||||
VpnCloud & 1.13 \\
|
||||
Tinc & 1.19 \\
|
||||
WireGuard & 1.20 \\
|
||||
Nebula & 1.25 \\
|
||||
ZeroTier & 1.28 \\
|
||||
EasyTier & 1.33 \\
|
||||
\hline
|
||||
Headscale & 1.64 \\
|
||||
Hyprspace & 1.79 \\
|
||||
Yggdrasil & 2.20 \\
|
||||
\hline
|
||||
Mycelium & 34.9 \\
|
||||
\hline
|
||||
\end{tabular}
|
||||
\end{table}
|
||||
|
||||
Six VPNs stay below 1.3\,ms, comfortably close to the bare-metal
|
||||
0.60\,ms. VpnCloud posts the lowest latency of any VPN (1.13\,ms), below
|
||||
WireGuard (1.20\,ms), yet its throughput tops out at only 539\,Mbps.
|
||||
Low per-packet latency does not guarantee high bulk throughput. A
|
||||
second group (Headscale,
|
||||
Hyprspace, Yggdrasil) lands in the 1.5--2.2\,ms range, representing
|
||||
moderate overhead. Then there is Mycelium at 34.9\,ms, so far
|
||||
removed from the rest that Section~\ref{sec:mycelium_routing} gives
|
||||
it a dedicated analysis.
|
||||
|
||||
ZeroTier's average of 1.28\,ms looks unremarkable, but its maximum
|
||||
RTT spikes to 8.6\,ms, a 6.8$\times$ jump and the largest for any
|
||||
sub-2\,ms VPN. These spikes point to periodic control-plane
|
||||
interference that the average hides.
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{{Figures/baseline/ping/Average RTT}.png}
|
||||
\caption{Average ping RTT at baseline. Mycelium (34.9\,ms) is a
|
||||
massive outlier at 58$\times$ the internal baseline. VpnCloud is
|
||||
the fastest VPN at 1.13\,ms, slightly below WireGuard (1.20\,ms).}
|
||||
\label{fig:ping_rtt}
|
||||
\end{figure}
|
||||
|
||||
Tinc presents a paradox: it has the third-lowest latency (1.19\,ms)
|
||||
but only the second-lowest throughput (336\,Mbps). Packets traverse
|
||||
the tunnel quickly, yet single-threaded userspace processing cannot
|
||||
keep up with the link speed. The qperf benchmark backs this up: Tinc
|
||||
maxes out at
|
||||
14.9\,\% CPU while delivering just 336\,Mbps, a clear sign that
|
||||
the CPU, not the network, is the bottleneck.
|
||||
Figure~\ref{fig:latency_throughput} makes this disconnect easy to
|
||||
spot.
|
||||
|
||||
The qperf measurements also reveal a wide spread in CPU usage.
|
||||
Hyprspace (55.1\,\%) and Yggdrasil
|
||||
(52.8\,\%) consume 5--6$\times$ as much CPU as Internal's
|
||||
9.7\,\%. WireGuard sits at 30.8\,\%, surprisingly high for a
|
||||
kernel-level implementation, though much of that goes to
|
||||
cryptographic processing. On the efficient end, VpnCloud
|
||||
(14.9\,\%), Tinc (14.9\,\%), and EasyTier (15.4\,\%) do the most
|
||||
with the least CPU time. Nebula and Headscale are missing from
|
||||
this comparison because qperf failed for both.
|
||||
|
||||
%TODO: Explain why they consistently failed
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{Figures/baseline/latency-vs-throughput.png}
|
||||
\caption{Latency vs.\ throughput at baseline. Each point represents
|
||||
one VPN. The quadrants reveal different bottleneck types:
|
||||
VpnCloud (low latency, moderate throughput), Tinc (low latency,
|
||||
low throughput, CPU-bound), Mycelium (high latency, low
|
||||
throughput, overlay routing overhead).}
|
||||
\label{fig:latency_throughput}
|
||||
\end{figure}
|
||||
|
||||
\subsection{Parallel TCP Scaling}
|
||||
|
||||
The single-stream benchmark tests one link direction at a time. The
|
||||
parallel benchmark changes this setup: all three link directions
|
||||
(lom$\rightarrow$yuki, yuki$\rightarrow$luna,
|
||||
luna$\rightarrow$lom) run simultaneously in a circular pattern for
|
||||
60~seconds, each carrying one bidirectional TCP stream (six
|
||||
unidirectional flows in total). Because three independent
|
||||
link pairs now compete for shared tunnel resources at once, the
|
||||
aggregate throughput is naturally higher than any single direction
|
||||
alone, which is why even Internal reaches 1.50$\times$ its
|
||||
single-stream figure. The scaling factor (parallel throughput
|
||||
divided by single-stream throughput) captures two effects:
|
||||
the benefit of using multiple link pairs in parallel, and how
|
||||
well the VPN handles the resulting contention.
|
||||
Table~\ref{tab:parallel_scaling} lists the results.
|
||||
|
||||
\begin{table}[H]
|
||||
\centering
|
||||
\caption{Parallel TCP scaling at baseline. Scaling factor is the
|
||||
ratio of parallel to single-stream throughput. Internal's
|
||||
1.50$\times$ represents the expected scaling on this hardware.}
|
||||
\label{tab:parallel_scaling}
|
||||
\begin{tabular}{lrrr}
|
||||
\hline
|
||||
\textbf{VPN} & \textbf{Single (Mbps)} &
|
||||
\textbf{Parallel (Mbps)} & \textbf{Scaling} \\
|
||||
\hline
|
||||
Mycelium & 259 & 569 & 2.20$\times$ \\
|
||||
Hyprspace & 368 & 803 & 2.18$\times$ \\
|
||||
Tinc & 336 & 563 & 1.68$\times$ \\
|
||||
Yggdrasil & 795 & 1265 & 1.59$\times$ \\
|
||||
Headscale & 800 & 1228 & 1.54$\times$ \\
|
||||
Internal & 934 & 1398 & 1.50$\times$ \\
|
||||
ZeroTier & 814 & 1206 & 1.48$\times$ \\
|
||||
WireGuard & 864 & 1281 & 1.48$\times$ \\
|
||||
EasyTier & 636 & 927 & 1.46$\times$ \\
|
||||
VpnCloud & 539 & 763 & 1.42$\times$ \\
|
||||
Nebula & 706 & 648 & 0.92$\times$ \\
|
||||
\hline
|
||||
\end{tabular}
|
||||
\end{table}
|
||||
|
||||
The VPNs that gain the most are those most constrained in
|
||||
single-stream mode. Mycelium's 34.9\,ms RTT means a lone TCP stream
|
||||
can never fill the pipe: the bandwidth-delay product demands a window
|
||||
larger than any single flow maintains, so multiple concurrent flows
|
||||
compensate for that constraint and push throughput to 2.20$\times$
|
||||
the single-stream figure. Hyprspace scales almost as well
|
||||
(2.18$\times$) but for a
|
||||
different reason: multiple streams work around the buffer bloat that
|
||||
cripples any individual flow
|
||||
(Section~\ref{sec:hyprspace_bloat}). Tinc picks up a
|
||||
1.68$\times$ boost because several streams can collectively keep its
|
||||
single-threaded CPU busy during what would otherwise be idle gaps in
|
||||
a single flow.
|
||||
|
||||
WireGuard and Internal both scale cleanly at around
|
||||
1.48--1.50$\times$ with zero retransmits, suggesting that
|
||||
WireGuard's overhead is a fixed per-packet cost that does not worsen
|
||||
under multiplexing.
|
||||
|
||||
Nebula is the only VPN that actually gets \emph{slower} with more
|
||||
streams: throughput drops from 706\,Mbps to 648\,Mbps
|
||||
(0.92$\times$) while retransmits jump from 955 to 2\,462. The ten
|
||||
streams are clearly fighting each other for resources inside the
|
||||
tunnel.
|
||||
|
||||
More streams also amplify existing retransmit problems. Hyprspace
|
||||
climbs from 4\,965 to 17\,426~retransmits;
|
||||
VpnCloud from 857 to 6\,023. VPNs that were clean in single-stream
|
||||
mode stay clean under load, while the stressed ones only get worse.
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\begin{subfigure}[t]{\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{Figures/baseline/single-stream-vs-parallel-tcp-throughput.png}
|
||||
\caption{Single-stream vs.\ parallel throughput}
|
||||
\label{fig:single_vs_parallel}
|
||||
\end{subfigure}
|
||||
|
||||
\vspace{1em}
|
||||
|
||||
\begin{subfigure}[t]{\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{Figures/baseline/parallel-tcp-scaling-factor.png}
|
||||
\caption{Parallel TCP scaling factor}
|
||||
\label{fig:scaling_factor}
|
||||
\end{subfigure}
|
||||
\caption{Parallel TCP scaling at baseline. Nebula is the only VPN
|
||||
where parallel throughput is lower than single-stream
|
||||
(0.92$\times$). Mycelium and Hyprspace benefit most from
|
||||
parallelism ($>$2$\times$), compensating for latency and buffer
|
||||
bloat respectively. The dashed line at 1.0$\times$ marks the
|
||||
break-even point.}
|
||||
\label{fig:parallel_tcp}
|
||||
\end{figure}
|
||||
|
||||
\subsection{UDP Stress Test}
|
||||
|
||||
The UDP iPerf3 test uses unlimited sender rate (\texttt{-b 0}),
|
||||
which is a deliberate overload test rather than a realistic workload.
|
||||
The sender throughput values are artifacts: they reflect how fast the
|
||||
sender can write to the socket, not how fast data traverses the
|
||||
tunnel. Yggdrasil, for example, reports 63,744\,Mbps sender
|
||||
throughput because it uses a 32,731-byte block size (a jumbo-frame
|
||||
overlay MTU), inflating the apparent rate per \texttt{send()} system
|
||||
call. Only the receiver throughput is meaningful.
|
||||
|
||||
\begin{table}[H]
|
||||
\centering
|
||||
\caption{UDP receiver throughput and packet loss at baseline
|
||||
(\texttt{-b 0} stress test). Hyprspace and Mycelium timed out
|
||||
at 120 seconds and are excluded.}
|
||||
\label{tab:udp_baseline}
|
||||
\begin{tabular}{lrr}
|
||||
\hline
|
||||
\textbf{VPN} & \textbf{Receiver (Mbps)} &
|
||||
\textbf{Loss (\%)} \\
|
||||
\hline
|
||||
Internal & 952 & 0.0 \\
|
||||
WireGuard & 898 & 0.0 \\
|
||||
Nebula & 890 & 76.2 \\
|
||||
Headscale & 876 & 69.8 \\
|
||||
EasyTier & 865 & 78.3 \\
|
||||
Yggdrasil & 852 & 98.7 \\
|
||||
ZeroTier & 851 & 89.5 \\
|
||||
VpnCloud & 773 & 83.7 \\
|
||||
Tinc & 471 & 89.9 \\
|
||||
\hline
|
||||
\end{tabular}
|
||||
\end{table}
|
||||
|
||||
%TODO: Explain that the UDP test also crashes often,
|
||||
% which makes the test somewhat unreliable
|
||||
% but a good indicator if the network traffic is "different" then
|
||||
% the programmer expected
|
||||
|
||||
Only Internal and WireGuard achieve 0\,\% packet loss. Both operate at
|
||||
the kernel level with proper backpressure that matches sender to
|
||||
receiver rate. Every userspace VPN shows massive loss (69--99\%)
|
||||
because the sender overwhelms the tunnel's processing capacity.
|
||||
Yggdrasil's 98.7\% loss is the most extreme: it sends the most data
|
||||
(due to its large block size) but loses almost all of it. These loss
|
||||
rates do not reflect real-world UDP behavior but reveal which VPNs
|
||||
implement effective flow control. Hyprspace and Mycelium could not
|
||||
complete the UDP test at all, timing out after 120 seconds.
|
||||
|
||||
The \texttt{blksize\_bytes} field reveals each VPN's effective path
|
||||
MTU: Yggdrasil at 32,731 bytes (jumbo overlay), ZeroTier at 2728,
|
||||
Internal at 1448, VpnCloud at 1375, WireGuard at 1368, Tinc at 1353,
|
||||
EasyTier at 1288, Nebula at 1228, and Headscale at 1208 (the
|
||||
smallest). These differences affect fragmentation behavior under real
|
||||
workloads, particularly for protocols that send large datagrams.
|
||||
|
||||
%TODO: Mention QUIC
|
||||
%TODO: Mention again that the "default" settings of every VPN have been used
|
||||
% to better reflect real world use, as most users probably won't
|
||||
% change these defaults
|
||||
% and explain that good defaults are as much a part of good software as
|
||||
% having the features but they are hard to configure correctly
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\begin{subfigure}[t]{\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{{Figures/baseline/udp/UDP
|
||||
Throughput}.png}
|
||||
\caption{UDP receiver throughput}
|
||||
\label{fig:udp_throughput}
|
||||
\end{subfigure}
|
||||
|
||||
\vspace{1em}
|
||||
|
||||
\begin{subfigure}[t]{\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{{Figures/baseline/udp/UDP
|
||||
Packet Loss}.png}
|
||||
\caption{UDP packet loss}
|
||||
\label{fig:udp_loss}
|
||||
\end{subfigure}
|
||||
\caption{UDP stress test results at baseline (\texttt{-b 0},
|
||||
unlimited sender rate). Internal and WireGuard are the only
|
||||
implementations with 0\% loss. Hyprspace and Mycelium are
|
||||
excluded due to 120-second timeouts.}
|
||||
\label{fig:udp_results}
|
||||
\end{figure}
|
||||
|
||||
% TODO: Compare parallel TCP retransmit rate
|
||||
% with single TCP retransmit rate and see what changed
|
||||
|
||||
\subsection{Real-World Workloads}
|
||||
|
||||
Saturating a link with iPerf3 measures peak capacity, but not how a
|
||||
VPN performs under realistic traffic. This subsection switches to
|
||||
application-level workloads: downloading packages from a Nix binary
|
||||
cache and streaming video over RIST. Both interact with the VPN
|
||||
tunnel the way real software does, through many short-lived
|
||||
connections, TLS handshakes, and latency-sensitive UDP packets.
|
||||
|
||||
\paragraph{Nix Binary Cache Downloads.}
|
||||
|
||||
This test downloads a fixed set of Nix packages through each VPN and
|
||||
measures the total transfer time. The results
|
||||
(Table~\ref{tab:nix_cache}) compress the throughput hierarchy
|
||||
considerably: even Hyprspace, the worst performer, finishes in
|
||||
11.92\,s, only 40\,\% slower than bare metal. Once connection
|
||||
setup, TLS handshakes, and HTTP round-trips enter the picture,
|
||||
throughput differences between 500 and 900\,Mbps matter far less
|
||||
than per-connection latency.
|
||||
|
||||
\begin{table}[H]
|
||||
\centering
|
||||
\caption{Nix binary cache download time at baseline, sorted by
|
||||
duration. Overhead is relative to the internal baseline (8.53\,s).}
|
||||
\label{tab:nix_cache}
|
||||
\begin{tabular}{lrr}
|
||||
\hline
|
||||
\textbf{VPN} & \textbf{Mean (s)} &
|
||||
\textbf{Overhead (\%)} \\
|
||||
\hline
|
||||
Internal & 8.53 & -- \\
|
||||
Nebula & 9.15 & +7.3 \\
|
||||
ZeroTier & 9.22 & +8.1 \\
|
||||
VpnCloud & 9.39 & +10.0 \\
|
||||
EasyTier & 9.39 & +10.1 \\
|
||||
WireGuard & 9.45 & +10.8 \\
|
||||
Headscale & 9.79 & +14.8 \\
|
||||
Tinc & 10.00 & +17.2 \\
|
||||
Mycelium & 10.07 & +18.1 \\
|
||||
Yggdrasil & 10.59 & +24.2 \\
|
||||
Hyprspace & 11.92 & +39.7 \\
|
||||
\hline
|
||||
\end{tabular}
|
||||
\end{table}
|
||||
|
||||
Several rankings invert relative to raw throughput. ZeroTier
|
||||
finishes faster than WireGuard (9.22\,s vs.\ 9.45\,s) despite
|
||||
30\,\% fewer raw Mbps and 1\,000$\times$ more retransmits. Yggdrasil
|
||||
is the clearest example: it has the
|
||||
third-highest throughput at 795\,Mbps, yet lands at 24\,\% overhead
|
||||
because its
|
||||
2.2\,ms latency adds up over the many small sequential HTTP requests
|
||||
that constitute a Nix cache download.
|
||||
Figure~\ref{fig:throughput_vs_download} confirms this weak link
|
||||
between raw throughput and real-world download speed.
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\begin{subfigure}[t]{\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{{Figures/baseline/Nix Cache
|
||||
Mean Download Time}.png}
|
||||
\caption{Nix cache download time per VPN}
|
||||
\label{fig:nix_cache}
|
||||
\end{subfigure}
|
||||
|
||||
\vspace{1em}
|
||||
|
||||
\begin{subfigure}[t]{\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{Figures/baseline/raw-throughput-vs-nix-cache-download-time.png}
|
||||
\caption{Raw throughput vs.\ download time}
|
||||
\label{fig:throughput_vs_download}
|
||||
\end{subfigure}
|
||||
\caption{Application-level download performance. The throughput
|
||||
hierarchy compresses under real HTTP workloads: the worst VPN
|
||||
(Hyprspace, 11.92\,s) is only 40\% slower than bare metal.
|
||||
Throughput explains some variance but not all: Yggdrasil
|
||||
(795\,Mbps, 10.59\,s) is slower than Nebula (706\,Mbps, 9.15\,s)
|
||||
because latency matters more for HTTP workloads.}
|
||||
\label{fig:nix_download}
|
||||
\end{figure}
|
||||
|
||||
\paragraph{Video Streaming (RIST).}
|
||||
|
||||
At just 3.3\,Mbps, the RIST video stream sits comfortably within
|
||||
every VPN's throughput budget. This test therefore measures
|
||||
something different: how well the VPN handles real-time UDP packet
|
||||
delivery under steady load. Nine of the eleven VPNs pass without
|
||||
incident, delivering 100\,\% video quality. The 14--16 dropped
|
||||
frames that appear uniformly across all VPNs, including Internal,
|
||||
trace back to encoder warm-up rather than tunnel overhead.
|
||||
|
||||
Headscale is the exception. It averages just 13.1\,\% quality,
|
||||
dropping 288~packets per test interval. The degradation is not
|
||||
bursty but sustained: median quality sits at 10\,\%, and the
|
||||
interquartile range of dropped packets spans a narrow 255--330 band.
|
||||
The qperf benchmark independently corroborates this, having failed
|
||||
outright for Headscale, confirming that something beyond bulk TCP is
|
||||
broken.
|
||||
|
||||
What makes this failure unexpected is that Headscale builds on
|
||||
WireGuard, which handles video flawlessly. TCP throughput places
|
||||
Headscale squarely in Tier~1. Yet the RIST test runs over UDP, and
|
||||
qperf probes latency-sensitive paths using both TCP and UDP. The
|
||||
pattern points toward Headscale's DERP relay or NAT traversal layer
|
||||
as the source. Its effective path MTU of 1\,208~bytes, the smallest
|
||||
of any VPN, likely compounds the issue: RIST packets that exceed
|
||||
this limit must be fragmented, and reassembling fragments under
|
||||
sustained load produces exactly the kind of steady, uniform packet
|
||||
drops the data shows. For video conferencing, VoIP, or any
|
||||
real-time media workload, this is a disqualifying result regardless
|
||||
of TCP throughput.
|
||||
|
||||
Hyprspace reveals a different failure mode. Its average quality
|
||||
reads 100\,\%, but the raw numbers underneath are far from stable:
|
||||
mean packet drops of 1\,194 and a maximum spike of 55\,500, with
|
||||
the 25th, 50th, and 75th percentiles all at zero. Hyprspace
|
||||
alternates between perfect delivery and catastrophic bursts.
|
||||
RIST's forward error correction compensates for most of these
|
||||
events, but the worst spikes are severe enough to overwhelm FEC
|
||||
entirely.
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{{Figures/baseline/Video
|
||||
Streaming/RIST Quality}.png}
|
||||
\caption{RIST video streaming quality at baseline. Headscale at
|
||||
13.1\% average quality is the clear outlier. Every other VPN
|
||||
achieves 99.8\% or higher. Nebula is at 99.8\% (minor
|
||||
degradation). The video bitrate (3.3\,Mbps) is well within every
|
||||
VPN's throughput capacity, so this test reveals real-time UDP
|
||||
handling quality rather than bandwidth limits.}
|
||||
\label{fig:rist_quality}
|
||||
\end{figure}
|
||||
|
||||
\subsection{Operational Resilience}
|
||||
|
||||
Sustained-load performance does not predict recovery speed. How
|
||||
quickly a tunnel comes up after a reboot, and how reliably it
|
||||
reconverges, matters as much as peak throughput for operational use.
|
||||
|
||||
First-time connectivity spans a wide range. Headscale and WireGuard
|
||||
are ready in under 50\,ms, while ZeroTier (8--17\,s) and VpnCloud
|
||||
(10--14\,s) spend seconds negotiating with their control planes
|
||||
before passing traffic.
|
||||
|
||||
%TODO: Maybe we want to scrap first-time connectivity
|
||||
|
||||
Reboot reconnection rearranges the rankings. Hyprspace, the worst
|
||||
performer under sustained TCP load, recovers in just 8.7~seconds on
|
||||
average, faster than any other VPN. WireGuard and Nebula follow at
|
||||
10.1\,s each. Nebula's consistency is striking: 10.06, 10.06,
|
||||
10.07\,s across its three nodes, pointing to a hard-coded timer
|
||||
rather than topology-dependent convergence.
|
||||
Mycelium sits at the opposite end, needing 76.6~seconds and showing
|
||||
the same suspiciously uniform pattern (75.7, 75.7, 78.3\,s),
|
||||
suggesting a fixed protocol-level wait built into the overlay.
|
||||
|
||||
%TODO: Hard coded timer needs to be verified
|
||||
|
||||
Yggdrasil produces the most lopsided result in the dataset: its yuki
|
||||
node is back in 7.1~seconds while lom and luna take 94.8 and
|
||||
97.3~seconds respectively. The gap likely reflects the overlay's
|
||||
spanning-tree rebuild: a node near the root of the tree reconverges
|
||||
quickly, while one further out has to wait for the topology to
|
||||
propagate.
|
||||
|
||||
%TODO: Needs clarifications what is a "spanning tree build"
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\begin{subfigure}[t]{\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{Figures/baseline/reboot-reconnection-time-per-vpn.png}
|
||||
\caption{Average reconnection time per VPN}
|
||||
\label{fig:reboot_bar}
|
||||
\end{subfigure}
|
||||
|
||||
\vspace{1em}
|
||||
|
||||
\begin{subfigure}[t]{\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{Figures/baseline/reboot-reconnection-time-heatmap.png}
|
||||
\caption{Per-node reconnection time heatmap}
|
||||
\label{fig:reboot_heatmap}
|
||||
\end{subfigure}
|
||||
\caption{Reboot reconnection time at baseline. The heatmap reveals
|
||||
Yggdrasil's extreme per-node asymmetry (7\,s for yuki vs.\
|
||||
95--97\,s for lom/luna) and Mycelium's uniform slowness (75--78\,s
|
||||
across all nodes). Hyprspace reconnects fastest (8.7\,s average)
|
||||
despite its poor sustained-load performance.}
|
||||
\label{fig:reboot_reconnection}
|
||||
\end{figure}
|
||||
|
||||
\subsection{Pathological Cases}
|
||||
\label{sec:pathological}
|
||||
|
||||
Three VPNs exhibit behaviors that the aggregate numbers alone cannot
|
||||
explain. The following subsections piece together observations from
|
||||
earlier benchmarks into per-VPN diagnoses.
|
||||
|
||||
\paragraph{Hyprspace: Buffer Bloat.}
|
||||
\label{sec:hyprspace_bloat}
|
||||
|
||||
Hyprspace produces the most severe performance collapse in the
|
||||
dataset. At idle, its ping latency is a modest 1.79\,ms.
|
||||
Under TCP load, that number balloons to roughly 2\,800\,ms, a
|
||||
1\,556$\times$ increase. This is not the network becoming
|
||||
congested; it is the VPN tunnel itself filling up with buffered
|
||||
packets and refusing to drain.
|
||||
|
||||
The consequences ripple through every TCP metric. With 4\,965
|
||||
retransmits per 30-second test (one in every 200~segments), TCP
|
||||
spends most of its time in congestion recovery rather than
|
||||
steady-state transfer, shrinking the average congestion window to
|
||||
205\,KB, the smallest in the dataset. Under parallel load the
|
||||
situation worsens: retransmits climb to 17\,426. The buffering even
|
||||
inverts iPerf3's measurements: the receiver reports 419.8\,Mbps
|
||||
while the sender sees only 367.9\,Mbps, because massive ACK delays
|
||||
cause the sender-side timer to undercount the actual data rate. The
|
||||
UDP test never finished at all, timing out at 120~seconds.
|
||||
|
||||
% Should we always use percentages for retransmits?
|
||||
|
||||
What prevents Hyprspace from being entirely unusable is everything
|
||||
\emph{except} sustained load. It has the fastest reboot
|
||||
reconnection in the dataset (8.7\,s) and delivers 100\,\% video
|
||||
quality outside of its burst events. The pathology is narrow but
|
||||
severe: any continuous data stream saturates the tunnel's internal
|
||||
buffers.
|
||||
|
||||
\paragraph{Mycelium: Routing Anomaly.}
|
||||
\label{sec:mycelium_routing}
|
||||
|
||||
Mycelium's 34.9\,ms average latency appears to be the cost of
|
||||
routing through a global overlay. The per-path numbers, however,
|
||||
reveal a bimodal distribution:
|
||||
|
||||
\begin{itemize}
|
||||
\bitem{luna$\rightarrow$lom:} 1.63\,ms (direct path, comparable
|
||||
to Headscale at 1.64\,ms)
|
||||
\bitem{lom$\rightarrow$yuki:} 51.47\,ms (overlay-routed)
|
||||
\bitem{yuki$\rightarrow$luna:} 51.60\,ms (overlay-routed)
|
||||
\end{itemize}
|
||||
|
||||
One of the three links has found a direct route; the other two still
|
||||
bounce through the overlay. All three machines sit on the same
|
||||
physical network, so Mycelium's path discovery is failing
|
||||
intermittently, a more specific problem than blanket overlay
|
||||
overhead. Throughput mirrors the split:
|
||||
yuki$\rightarrow$luna reaches 379\,Mbps while
|
||||
luna$\rightarrow$lom manages only 122\,Mbps, a 3:1 gap. In
|
||||
bidirectional mode, the reverse direction on that worst link drops
|
||||
to 58.4\,Mbps, the lowest single-direction figure in the entire
|
||||
dataset.
|
||||
|
||||
\begin{figure}[H]
|
||||
\centering
|
||||
\includegraphics[width=\textwidth]{{Figures/baseline/tcp/Mycelium/Average
|
||||
Throughput}.png}
|
||||
\caption{Per-link TCP throughput for Mycelium, showing extreme
|
||||
path asymmetry caused by inconsistent direct route discovery.
|
||||
The 3:1 ratio between best (yuki$\rightarrow$luna, 379\,Mbps)
|
||||
and worst (luna$\rightarrow$lom, 122\,Mbps) links reflects
|
||||
different overlay routing paths.}
|
||||
\label{fig:mycelium_paths}
|
||||
\end{figure}
|
||||
|
||||
The overlay penalty shows up most clearly at connection setup.
|
||||
Mycelium's average time-to-first-byte is 93.7\,ms (vs.\ Internal's
|
||||
16.8\,ms, a 5.6$\times$ overhead), and connection establishment
|
||||
alone costs 47.3\,ms (3$\times$ overhead). Every new connection
|
||||
incurs that overhead, so workloads dominated by
|
||||
short-lived connections accumulate it rapidly. Bulk downloads, by
|
||||
contrast, amortize it: the Nix cache test finishes only 18\,\%
|
||||
slower than Internal (10.07\,s vs.\ 8.53\,s) because once the
|
||||
transfer phase begins, per-connection latency fades into the
|
||||
background.
|
||||
|
||||
Mycelium is also the slowest VPN to recover from a reboot:
|
||||
76.6~seconds on average, and almost suspiciously uniform across
|
||||
nodes (75.7, 75.7, 78.3\,s). That kind of consistency points to a
|
||||
hard-coded convergence timer in the overlay protocol rather than
|
||||
anything topology-dependent. The UDP test timed out at
|
||||
120~seconds, and even first-time connectivity required a
|
||||
70-second wait at startup.
|
||||
|
||||
% Explain what topology-dependent means in this case.
|
||||
|
||||
\paragraph{Tinc: Userspace Processing Bottleneck.}
|
||||
|
||||
Tinc is a clear case of a CPU bottleneck masquerading as a network
|
||||
problem. At 1.19\,ms latency, packets get through the
|
||||
tunnel quickly. Yet throughput tops out at 336\,Mbps, barely a
|
||||
third of the bare-metal link. The usual suspects do not apply:
|
||||
Tinc's path MTU is a healthy 1\,500~bytes
|
||||
(\texttt{blksize\_bytes} of 1\,353 from UDP iPerf3, comparable to
|
||||
VpnCloud at 1\,375 and WireGuard at 1\,368), and its retransmit
|
||||
count (240) is moderate. What limits Tinc is its single-threaded
|
||||
userspace architecture: one CPU core simply cannot encrypt, copy,
|
||||
and forward packets fast enough to fill the pipe.
|
||||
|
||||
The parallel benchmark confirms this diagnosis. Tinc scales to
|
||||
563\,Mbps (1.68$\times$), beating Internal's 1.50$\times$ ratio.
|
||||
Multiple TCP streams collectively keep that single core busy during
|
||||
what would otherwise be idle gaps in any individual flow, squeezing
|
||||
out throughput that no single stream could reach alone.
|
||||
|
||||
\section{Impact of Network Impairment}
|
||||
|
||||
This section examines how each VPN responds to the Low, Medium, and
|
||||
High impairment profiles defined in Chapter~\ref{Methodology}.
|
||||
|
||||
\subsection{Ping}
|
||||
|
||||
% RTT and packet loss across impairment profiles.
|
||||
|
||||
\subsection{TCP Throughput}
|
||||
|
||||
% TCP iperf3: throughput, retransmits, congestion window.
|
||||
|
||||
\subsection{UDP Throughput}
|
||||
|
||||
% UDP iperf3: throughput, jitter, packet loss.
|
||||
|
||||
\subsection{Parallel TCP}
|
||||
|
||||
% Parallel iperf3: throughput under contention (A->B, B->C, C->A).
|
||||
|
||||
\subsection{QUIC Performance}
|
||||
|
||||
% qperf: bandwidth, TTFB, connection establishment time.
|
||||
|
||||
\subsection{Video Streaming}
|
||||
|
||||
% RIST: bitrate, dropped frames, packets recovered, quality score.
|
||||
|
||||
\subsection{Application-Level Download}
|
||||
|
||||
% Nix cache: download duration for Firefox package.
|
||||
|
||||
\section{Tailscale Under Degraded Conditions}
|
||||
|
||||
% The central finding: Tailscale outperforming the raw Linux
|
||||
% networking stack under impairment.
|
||||
|
||||
\subsection{Observed Anomaly}
|
||||
|
||||
% Present the data showing Tailscale exceeding internal baseline
|
||||
% throughput under Medium/High impairment.
|
||||
|
||||
\subsection{Congestion Control Analysis}
|
||||
|
||||
% Reno vs CUBIC, RACK disabled to avoid spurious retransmits
|
||||
% under reordering.
|
||||
|
||||
\subsection{Tuned Kernel Parameters}
|
||||
|
||||
% Re-run results with tuned buffer sizes and congestion control
|
||||
% on the internal baseline, showing the gap closes.
|
||||
|
||||
\section{Source Code Analysis}
|
||||
|
||||
\subsection{Feature Matrix Overview}
|
||||
|
||||
% Summary of the 131-feature matrix across all ten VPNs.
|
||||
% Highlight key architectural differences that explain
|
||||
% performance results.
|
||||
|
||||
\subsection{Security Vulnerabilities}
|
||||
|
||||
% Vulnerabilities discovered during source code review.
|
||||
|
||||
\section{Summary of Findings}
|
||||
|
||||
% Brief summary table or ranking of VPNs by key metrics.
|
||||
% Save deeper interpretation for a Discussion chapter.
|
||||
36
Chapters/Zusammenfassung.tex
Normal file
@@ -0,0 +1,36 @@
|
||||
%----------------------------------------------------------------------------------------
|
||||
% GERMAN ABSTRACT PAGE
|
||||
%----------------------------------------------------------------------------------------
|
||||
|
||||
\begingroup
|
||||
\renewcommand{\abstractname}{Zusammenfassung}
|
||||
\begin{abstract}
|
||||
\addchaptertocentry{Zusammenfassung}
|
||||
|
||||
Diese Arbeit evaluiert zehn Peer-to-Peer-Mesh-VPN-Implementierungen
|
||||
under kontrollierten Netzwerkbedingungen mithilfe eines
|
||||
reproduzierbaren, Nix-basierten Benchmark-Frameworks, das auf einem
|
||||
Deployment-System namens Clan aufbaut. Die Implementierungen reichen
|
||||
von Kernel-Protokollen (WireGuard, also Reference-Baseline) bis zu
|
||||
Userspace-Overlays (Tinc, Yggdrasil, Nebula, Hyprspace und
|
||||
weitere). Jede wird under vier Beeinträchtigungsprofilen mit
|
||||
variierendem Paketverlust, Paketumsortierung, Latenz und Jitter
|
||||
getestet, was über 300 Messungen in sieben Benchmarks ergibt, von
|
||||
reinem TCP- und UDP-Durchsatz bis zu Video-Streaming und
|
||||
Anwendungs-Downloads.
|
||||
|
||||
In zentrales Ergebnis ist, dass keine einzelne Metrik die
|
||||
VPN-Leistung vollständig erfasst: Die Rangfolge verschiebt sich je
|
||||
nachdem, ob Durchsatz, Latenz, Retransmit-Verhalten oder
|
||||
Transferzeit auf Anwendungsebene gemessen wird. Under
|
||||
Netzwerkbeeinträchtigung übertrifft Tailscale (über Headscale) den
|
||||
Standard-Netzwerkstack des Linux-Kernels, eine Anomalie, die wir
|
||||
auf die optimierten Congestion-Control- und Pufferparameter seines
|
||||
Userspace-IP-Stacks zurückführen. Eine erneute Durchführung der
|
||||
internen Baseline mit entsprechend angepassten Kernel-Parametern
|
||||
schließt die Lücke und bestätigt diese Erklärung. Die begleitende
|
||||
Quellcodeanalyse deckte eine kritische Sicherheitslücke in einer
|
||||
der evaluierten Implementierungen auf.
|
||||
|
||||
\end{abstract}
|
||||
\endgroup
|
||||
BIN
Figures/baseline/Average Test Duration per Machine.png
Normal file
|
After Width: | Height: | Size: 48 KiB |
BIN
Figures/baseline/Benchmark Success Rate.png
Normal file
|
After Width: | Height: | Size: 42 KiB |
BIN
Figures/baseline/Nix Cache Mean Download Time.png
Normal file
|
After Width: | Height: | Size: 40 KiB |
BIN
Figures/baseline/Video Streaming/Packets Dropped.png
Normal file
|
After Width: | Height: | Size: 36 KiB |
BIN
Figures/baseline/Video Streaming/RIST Quality.png
Normal file
|
After Width: | Height: | Size: 36 KiB |
BIN
Figures/baseline/latency-vs-throughput.png
Normal file
|
After Width: | Height: | Size: 189 KiB |
BIN
Figures/baseline/parallel-tcp-scaling-factor.png
Normal file
|
After Width: | Height: | Size: 236 KiB |
BIN
Figures/baseline/ping/Average RTT.png
Normal file
|
After Width: | Height: | Size: 36 KiB |
BIN
Figures/baseline/raw-throughput-vs-nix-cache-download-time.png
Normal file
|
After Width: | Height: | Size: 196 KiB |
BIN
Figures/baseline/reboot-reconnection-time-heatmap.png
Normal file
|
After Width: | Height: | Size: 308 KiB |
BIN
Figures/baseline/reboot-reconnection-time-per-vpn.png
Normal file
|
After Width: | Height: | Size: 228 KiB |
BIN
Figures/baseline/retransmits-single-stream-vs-parallel.png
Normal file
|
After Width: | Height: | Size: 218 KiB |
BIN
Figures/baseline/retransmits-vs-max-congestion-window.png
Normal file
|
After Width: | Height: | Size: 210 KiB |
BIN
Figures/baseline/retransmits-vs-throughput.png
Normal file
|
After Width: | Height: | Size: 196 KiB |
BIN
Figures/baseline/single-stream-vs-parallel-tcp-throughput.png
Normal file
|
After Width: | Height: | Size: 208 KiB |
BIN
Figures/baseline/tcp/Mycelium/Average Throughput.png
Normal file
|
After Width: | Height: | Size: 35 KiB |
BIN
Figures/baseline/tcp/TCP Retransmit Rate.png
Normal file
|
After Width: | Height: | Size: 42 KiB |
BIN
Figures/baseline/tcp/TCP Throughput.png
Normal file
|
After Width: | Height: | Size: 49 KiB |
BIN
Figures/baseline/udp/UDP Packet Loss.png
Normal file
|
After Width: | Height: | Size: 42 KiB |
BIN
Figures/baseline/udp/UDP Throughput.png
Normal file
|
After Width: | Height: | Size: 46 KiB |
@@ -250,8 +250,7 @@
|
||||
\newcommand{\decoRule}{\rule{.8\textwidth}{.4pt}} % New command for a
|
||||
% rule to be used under figures
|
||||
|
||||
\setcounter{tocdepth}{3} % The depth to which the document sections
|
||||
% are printed to the table of contents
|
||||
\setcounter{tocdepth}{1} % Only show chapters and sections in table of contents
|
||||
\ProvideDocumentCommand{\addchaptertocentry}{ m }{%
|
||||
\addcontentsline{toc}{chapter}{#1}%
|
||||
}
|
||||
@@ -390,8 +389,6 @@ KOMA-script documentation for details.}]{fancyhdr}
|
||||
{\normalsize \degreename\par}% Degree name
|
||||
\bigskip
|
||||
{\normalsize\bfseries \@title \par}% Thesis title
|
||||
\medskip
|
||||
{\normalsize \byname{} \authorname \par}% Author name
|
||||
\bigskip
|
||||
\end{center}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ extend-exclude = [
|
||||
"**/value",
|
||||
"**.rev",
|
||||
"**/facter-report.nix",
|
||||
"Chapters/Zusammenfassung.tex",
|
||||
"**/key.json",
|
||||
"pkgs/clan-cli/clan_lib/machines/test_suggestions.py",
|
||||
]
|
||||
@@ -16,4 +17,4 @@ dynamicdns = "dynamicdns"
|
||||
substituters = "substituters"
|
||||
|
||||
[default.extend-identifiers]
|
||||
pn = "pn"
|
||||
pn = "pn"
|
||||
|
||||
62
example.bib
@@ -1,62 +0,0 @@
|
||||
@article{Reference1,
|
||||
Abstract = {We have developed an enhanced Littrow configuration
|
||||
extended cavity diode laser (ECDL) that can be tuned without
|
||||
changing the direction of the output beam. The output of a
|
||||
conventional Littrow ECDL is reflected from a plane mirror fixed
|
||||
parallel to the tuning diffraction grating. Using a free-space
|
||||
Michelson wavemeter to measure the laser wavelength, we can tune
|
||||
the laser over a range greater than 10 nm without any alteration of
|
||||
alignment.},
|
||||
Author = {C. J. Hawthorn and K. P. Weber and R. E. Scholten},
|
||||
Journal = {Review of Scientific Instruments},
|
||||
Month = {12},
|
||||
Number = {12},
|
||||
Numpages = {3},
|
||||
Pages = {4477--4479},
|
||||
Title = {Littrow Configuration Tunable External Cavity Diode Laser
|
||||
with Fixed Direction Output Beam},
|
||||
Volume = {72},
|
||||
Url = {http://link.aip.org/link/?RSI/72/4477/1},
|
||||
Year = {2001}}
|
||||
|
||||
@article{Reference3,
|
||||
Abstract = {Operating a laser diode in an extended cavity which
|
||||
provides frequency-selective feedback is a very effective method of
|
||||
reducing the laser's linewidth and improving its tunability. We
|
||||
have developed an extremely simple laser of this type, built from
|
||||
inexpensive commercial components with only a few minor
|
||||
modifications. A 780~nm laser built to this design has an output
|
||||
power of 80~mW, a linewidth of 350~kHz, and it has been
|
||||
continuously locked to a Doppler-free rubidium transition for several days.},
|
||||
Author = {A. S. Arnold and J. S. Wilson and M. G. Boshier and J. Smith},
|
||||
Journal = {Review of Scientific Instruments},
|
||||
Month = {3},
|
||||
Number = {3},
|
||||
Numpages = {4},
|
||||
Pages = {1236--1239},
|
||||
Title = {A Simple Extended-Cavity Diode Laser},
|
||||
Volume = {69},
|
||||
Url = {http://link.aip.org/link/?RSI/69/1236/1},
|
||||
Year = {1998}}
|
||||
|
||||
@article{Reference2,
|
||||
Abstract = {We present a review of the use of diode lasers in
|
||||
atomic physics with an extensive list of references. We discuss the
|
||||
relevant characteristics of diode lasers and explain how to
|
||||
purchase and use them. We also review the various techniques that
|
||||
have been used to control and narrow the spectral outputs of diode
|
||||
lasers. Finally we present a number of examples illustrating the
|
||||
use of diode lasers in atomic physics experiments. Review of
|
||||
Scientific Instruments is copyrighted by The American Institute of Physics.},
|
||||
Author = {Carl E. Wieman and Leo Hollberg},
|
||||
Journal = {Review of Scientific Instruments},
|
||||
Keywords = {Diode Laser},
|
||||
Month = {1},
|
||||
Number = {1},
|
||||
Numpages = {20},
|
||||
Pages = {1--20},
|
||||
Title = {Using Diode Lasers for Atomic Physics},
|
||||
Volume = {62},
|
||||
Url = {http://link.aip.org/link/?RSI/62/1/1},
|
||||
Year = {1991}}
|
||||
|
||||
@@ -49,7 +49,10 @@
|
||||
|
||||
devShells.default = pkgs.mkShell {
|
||||
buildInputs = [
|
||||
pkgs.nodejs
|
||||
pkgs.vite
|
||||
texlive
|
||||
pkgs.pandoc
|
||||
pkgs.inkscape
|
||||
pkgs.python3
|
||||
];
|
||||
|
||||
125
main.tex
@@ -25,10 +25,10 @@
|
||||
|
||||
\documentclass[
|
||||
11pt, % The default document font size, options: 10pt, 11pt, 12pt
|
||||
oneside, % Two side (alternating margins) for binding by default,
|
||||
oneside,%twoside, % Two side (alternating margins) for binding by default,
|
||||
% uncomment to switch to one side
|
||||
english, % ngerman for German
|
||||
singlespacing, % Single line spacing, alternatives: onehalfspacing
|
||||
onehalfspacing, % Single line spacing, alternatives: onehalfspacing
|
||||
% or doublespacing
|
||||
%draft, % Uncomment to enable draft mode (no pictures, no links,
|
||||
% overfull hboxes indicated)
|
||||
@@ -38,7 +38,7 @@
|
||||
% the table of contents
|
||||
%toctotoc, % Uncomment to add the main table of contents to the
|
||||
% table of contents
|
||||
%parskip, % Uncomment to add space between paragraphs
|
||||
parskip, % Add space between paragraphs and remove indentation
|
||||
%nohyperref, % Uncomment to not load the hyperref package
|
||||
headsepline, % Uncomment to get a line under the header
|
||||
chapterinoneline, % Place the chapter title next to the number on one line
|
||||
@@ -59,6 +59,9 @@
|
||||
\usepackage{svg}
|
||||
\usepackage{acronym}
|
||||
\usepackage{subcaption} % For subfigures
|
||||
\usepackage{tikz}
|
||||
\usetikzlibrary{shapes.geometric}
|
||||
\usepackage[edges]{forest}
|
||||
|
||||
\usepackage[backend=bibtex,style=numeric,natbib=true]{biblatex} %
|
||||
% Use the bibtex backend with the authoryear citation style (which
|
||||
@@ -70,28 +73,32 @@
|
||||
\usepackage[autostyle=true]{csquotes} % Required to generate
|
||||
% language-dependent quotes in the bibliography
|
||||
|
||||
\newcommand{\bitem}[1]{
|
||||
\item \textbf{#1}}
|
||||
|
||||
\setcounter{secnumdepth}{0} % Only number chapters, not sections or subsections
|
||||
|
||||
%----------------------------------------------------------------------------------------
|
||||
% MARGIN SETTINGS
|
||||
%----------------------------------------------------------------------------------------
|
||||
|
||||
\geometry{
|
||||
paper=a4paper, % Change to letterpaper for US letter
|
||||
inner=2.5cm, % Inner margin
|
||||
outer=3.8cm, % Outer margin
|
||||
bindingoffset=.5cm, % Binding offset
|
||||
top=1.5cm, % Top margin
|
||||
bottom=1.5cm, % Bottom margin
|
||||
%showframe, % Uncomment to show how the type block is set on the page
|
||||
paper=a4paper,
|
||||
inner=3.0cm, % Bindungsseite – braucht mehr Platz
|
||||
outer=2.5cm, % Außenseite
|
||||
bindingoffset=0.5cm, % Zusätzlich für Klebebindung
|
||||
top=2.5cm,
|
||||
bottom=2.0cm,
|
||||
}
|
||||
|
||||
%----------------------------------------------------------------------------------------
|
||||
% THESIS INFORMATION
|
||||
%----------------------------------------------------------------------------------------
|
||||
|
||||
\thesistitle{Decrypting the Overlay: A Reproducible Analysis of P2P
|
||||
VPN Implementation and Overhead} % Your thesis title, this is used in the title
|
||||
\thesistitle{An Analysis of P2P VPN Implementation} % Your thesis
|
||||
% title, this is used in the title
|
||||
% and abstract, print it elsewhere with \ttitle
|
||||
\supervisor{\textsc{Ber Lorke}} % Your supervisor's name, this is
|
||||
%\supervisor{\textsc{Ber Lorke}} % Your supervisor's name, this is
|
||||
% used in the title page, print it elsewhere with \supname
|
||||
\examiner{Prof. Dr. \textsc{Stefan Schmid}} % Your examiner's name,
|
||||
% this is not currently used anywhere in the template, print it
|
||||
@@ -152,17 +159,19 @@ and Management}} % Your department's name and URL, this is used in
|
||||
\begin{minipage}[t]{0.4\textwidth}
|
||||
\begin{flushleft} \large
|
||||
\emph{Author:}\\
|
||||
\href{http://www.johnsmith.com}{\authorname} % Author name -
|
||||
% remove the \href bracket to remove the link
|
||||
\textcolor{mdtRed}{\authorname}
|
||||
\end{flushleft}
|
||||
\end{minipage}
|
||||
\begin{minipage}[t]{0.4\textwidth}
|
||||
\begin{flushright} \large
|
||||
\emph{Supervisor:} \\
|
||||
\href{http://www.jamessmith.com}{\supname} % Supervisor name
|
||||
% - remove the \href bracket to remove the link
|
||||
\emph{First Examiner:} \\
|
||||
\textcolor{mdtRed}{Prof.\ Dr.\ \textsc{Stefan Schmid}}\\[1em]
|
||||
\emph{Second Examiner:} \\
|
||||
\textcolor{mdtRed}{\textsc{[TBD]}}\\[1em]
|
||||
%\emph{Supervisor:} \\
|
||||
%\textcolor{mdtRed}{\supname}
|
||||
\end{flushright}
|
||||
\end{minipage}\\[3cm]
|
||||
\end{minipage}\\[2cm]
|
||||
|
||||
\vfill
|
||||
|
||||
@@ -223,44 +232,32 @@ and Management}} % Your department's name and URL, this is used in
|
||||
\begin{abstract}
|
||||
\addchaptertocentry{\abstractname} % Add the abstract to the table of contents
|
||||
|
||||
This thesis evaluates the performance and fault tolerance of
|
||||
peer-to-peer mesh VPNs through an automated, reproducible
|
||||
benchmarking framework
|
||||
built on the Clan deployment system.
|
||||
We establish a cloud API–independent, binary-reproducible environment
|
||||
for deploying and assessing various VPN implementations,
|
||||
including Tailscale (via Headscale), Hyprspace, Lighthouse, Tinc,
|
||||
and ZeroTier.
|
||||
This thesis evaluates ten peer-to-peer mesh VPN implementations
|
||||
under controlled network conditions using a reproducible, Nix-based
|
||||
benchmarking framework built on a deployment system called Clan.
|
||||
The implementations range from kernel-level protocols (WireGuard,
|
||||
used as a reference baseline) to userspace overlays (Tinc,
|
||||
Yggdrasil, Nebula, Hyprspace, and others). We test each against
|
||||
four impairment profiles that vary packet loss, reordering, latency,
|
||||
and jitter, producing over 300 measurements across seven benchmarks
|
||||
from raw TCP and UDP throughput to video streaming and
|
||||
application-level downloads.
|
||||
|
||||
To simulate real-world network conditions, we employ four impairment profiles
|
||||
with varying degrees of packet loss, reordering, latency, and jitter.
|
||||
Our benchmark suite comprises RIST video streaming, Nix cache downloads,
|
||||
iperf3 throughput tests, QUIC transfers, and ping latency measurements.
|
||||
The experiments run on three machines interconnected at 1\,Gbps,
|
||||
each equipped with four CPU cores and eight threads.
|
||||
In total, we evaluate ten VPNs across seven benchmarks and four
|
||||
impairment profiles,
|
||||
yielding over 300 unique measurements.
|
||||
|
||||
Our analysis reveals that Tailscale outperforms the Linux kernel's
|
||||
default networking stack under degraded network conditions—a
|
||||
counterintuitive finding
|
||||
we investigate through source code analysis of packet handling,
|
||||
encryption schemes, and resilience mechanisms.
|
||||
This investigation also uncovered several critical security vulnerabilities
|
||||
across the evaluated VPNs.
|
||||
|
||||
We validate our hypotheses by re-running benchmarks with tuned
|
||||
Linux kernel parameters,
|
||||
demonstrating measurable improvements in network throughput.
|
||||
This work contributes to decentralized networking research
|
||||
by providing an extensible framework for reproducible P2P benchmarks,
|
||||
offering insights into mesh VPN implementation quality,
|
||||
and demonstrating that default Linux kernel settings are suboptimal
|
||||
for adverse network conditions.
|
||||
A central finding is that no single metric captures VPN performance:
|
||||
the rankings shift depending on whether one measures throughput,
|
||||
latency, retransmit behavior, or application-level transfer time.
|
||||
Under network impairment, Tailscale (via Headscale) outperforms the
|
||||
Linux kernel's default networking stack, an anomaly we trace to its
|
||||
userspace IP stack's tuned congestion-control and buffer parameters.
|
||||
Re-running the internal baseline with matching kernel-side tuning
|
||||
closes the gap, confirming the explanation. The accompanying source
|
||||
code analysis uncovered a critical security vulnerability in one of
|
||||
the evaluated implementations.
|
||||
|
||||
\end{abstract}
|
||||
|
||||
\input{Chapters/Zusammenfassung}
|
||||
|
||||
%----------------------------------------------------------------------------------------
|
||||
% ACKNOWLEDGEMENTS
|
||||
%----------------------------------------------------------------------------------------
|
||||
@@ -292,15 +289,6 @@ and Management}} % Your department's name and URL, this is used in
|
||||
|
||||
%\listoftables % Prints the list of tables
|
||||
|
||||
%----------------------------------------------------------------------------------------
|
||||
% ABBREVIATIONS
|
||||
%----------------------------------------------------------------------------------------
|
||||
|
||||
\section*{Abbreviations}
|
||||
\begin{acronym}[P2P] % [P2P] aligns entries to the longest label
|
||||
\acro{P2P}{Peer to Peer}
|
||||
\end{acronym}
|
||||
|
||||
%----------------------------------------------------------------------------------------
|
||||
% PHYSICAL CONSTANTS/OTHER DEFINITIONS
|
||||
%----------------------------------------------------------------------------------------
|
||||
@@ -348,10 +336,12 @@ and Management}} % Your department's name and URL, this is used in
|
||||
|
||||
% Include the chapters of the thesis as separate files from the Chapters folder
|
||||
% Uncomment the lines as you write the chapters
|
||||
\include{Chapters/Motivation}
|
||||
\include{Chapters/Methodology}
|
||||
|
||||
\include{Chapters/Introduction}
|
||||
\include{Chapters/Background}
|
||||
\include{Chapters/Methodology}
|
||||
\include{Chapters/Results}
|
||||
\include{Chapters/Discussion}
|
||||
\include{Chapters/Conclusion}
|
||||
|
||||
%\include{Chapters/Chapter1}
|
||||
%\include{Chapters/Chapter2}
|
||||
@@ -369,6 +359,11 @@ and Management}} % Your department's name and URL, this is used in
|
||||
% Appendices folder
|
||||
% Uncomment the lines as you write the Appendices
|
||||
|
||||
\chapter{Abbreviations}
|
||||
\begin{acronym}[P2P] % [P2P] aligns entries to the longest label
|
||||
\acro{P2P}{Peer to Peer}
|
||||
\end{acronym}
|
||||
|
||||
%\include{Appendices/AppendixA}
|
||||
%\include{Appendices/AppendixB}
|
||||
%\include{Appendices/AppendixC}
|
||||
|
||||
@@ -44,80 +44,6 @@
|
||||
evaluation for a secure edge‐cloud continuum.pdf:application/pdf},
|
||||
}
|
||||
|
||||
@inproceedings{hugerich_no-hop_2022,
|
||||
location = {New York, {NY}, {USA}},
|
||||
title = {No-hop: In-network Distributed Hash Tables},
|
||||
isbn = {978-1-4503-9168-9},
|
||||
url = {https://doi.org/10.1145/3493425.3502757},
|
||||
doi = {10.1145/3493425.3502757},
|
||||
series = {{ANCS} '21},
|
||||
shorttitle = {No-hop},
|
||||
abstract = {We make a case for a distributed hash table lookup in
|
||||
the network data plane. We argue that the lookup time performance
|
||||
of distributed hash tables can be further improved via an
|
||||
in-network data plane implementation. To this end, we introduce
|
||||
No-hop, an in-network distributed hash table implementation, which
|
||||
leverages the data plane programmability at line rate gained from
|
||||
P4. Our initial results of transporting distributed hash table
|
||||
logic from hosts' user space to the fast path of switches in the
|
||||
network data plane are promising. We show that No-hop improves the
|
||||
performance of locating the responsible host and maintains the
|
||||
properties of distributed hash tables while outperforming two baselines.},
|
||||
pages = {80--87},
|
||||
booktitle = {Proceedings of the Symposium on Architectures for
|
||||
Networking and Communications Systems},
|
||||
publisher = {Association for Computing Machinery},
|
||||
author = {Hügerich, Lily and Shukla, Apoorv and Smaragdakis, Georgios},
|
||||
urldate = {2024-09-23},
|
||||
date = {2022-01},
|
||||
file =
|
||||
{Attachment:/home/lhebendanz/Zotero/storage/WCI9PCTE/inet_nohop_decen_hashtable.pdf:application/pdf},
|
||||
}
|
||||
|
||||
@article{bakhshi_state_2017,
|
||||
title = {State of the Art and Recent Research Advances in Software
|
||||
Defined Networking},
|
||||
volume = {2017},
|
||||
rights = {Copyright © 2017 Taimur Bakhshi.},
|
||||
issn = {1530-8677},
|
||||
url = {https://onlinelibrary.wiley.com/doi/abs/10.1155/2017/7191647},
|
||||
doi = {10.1155/2017/7191647},
|
||||
abstract = {Emerging network services and subsequent growth in the
|
||||
networking infrastructure have gained tremendous momentum in recent
|
||||
years. Application performance requiring rapid real-time network
|
||||
provisioning, optimized traffic management, and virtualization of
|
||||
shared resources has induced the conceptualization and adoption of
|
||||
new networking models. Software defined networking ({SDN}), one of
|
||||
the predominant and relatively new networking paradigms, seeks to
|
||||
simplify network management by decoupling network control logic
|
||||
from the underlying hardware and introduces real-time network
|
||||
programmability enabling innovation. The present work reviews the
|
||||
state of the art in software defined networking providing a
|
||||
historical perspective on complementary technologies in network
|
||||
programmability and the inherent shortcomings which paved the way
|
||||
for {SDN}. The {SDN} architecture is discussed along with popular
|
||||
protocols, platforms, and existing simulation and debugging
|
||||
solutions. Furthermore, a detailed analysis is presented around
|
||||
recent {SDN} development and deployment avenues ranging from mobile
|
||||
communications and data centers to campus networks and residential
|
||||
environments. The review concludes by highlighting implementation
|
||||
challenges and subsequent research directions being pursued in
|
||||
academia and industry to address issues related to application
|
||||
performance, control plane scalability and design, security, and
|
||||
interdomain connectivity in the context of {SDN}.},
|
||||
pages = {7191647},
|
||||
number = {1},
|
||||
journaltitle = {Wireless Communications and Mobile Computing},
|
||||
author = {Bakhshi, Taimur},
|
||||
urldate = {2024-09-23},
|
||||
date = {2017},
|
||||
langid = {english},
|
||||
file =
|
||||
{Attachment:/home/lhebendanz/Zotero/storage/TXFJ8DJB/Wireless
|
||||
Communications and Mobile Computing - 2017 - Bakhshi - State of the
|
||||
Art and Recent Research Advances in Software.pdf:application/pdf},
|
||||
}
|
||||
|
||||
@online{noauthor_sci-hub_nodate,
|
||||
title = {Sci-Hub},
|
||||
url = {https://sci-hub.usualwant.com/},
|
||||
@@ -222,38 +148,6 @@
|
||||
Snapshot:/home/lhebendanz/Zotero/storage/VHPTLVMW/S0167642312000639.html:text/html},
|
||||
}
|
||||
|
||||
@article{laddad_keep_2022,
|
||||
title = {Keep {CALM} and {CRDT} On},
|
||||
volume = {16},
|
||||
issn = {2150-8097},
|
||||
url = {https://doi.org/10.14778/3574245.3574268},
|
||||
doi = {10.14778/3574245.3574268},
|
||||
abstract = {Despite decades of research and practical experience,
|
||||
developers have few tools for programming reliable distributed
|
||||
applications without resorting to expensive coordination
|
||||
techniques. Conflict-free replicated datatypes ({CRDTs}) are a
|
||||
promising line of work that enable coordination-free replication
|
||||
and offer certain eventual consistency guarantees in a relatively
|
||||
simple object-oriented {API}. Yet {CRDT} guarantees extend only to
|
||||
data updates; observations of {CRDT} state are unconstrained and
|
||||
unsafe. We propose an agenda that embraces the simplicity of
|
||||
{CRDTs}, but provides richer, more uniform guarantees. We extend
|
||||
{CRDTs} with a query model that reasons about which queries are
|
||||
safe without coordination by applying monotonicity results from the
|
||||
{CALM} Theorem, and lay out a larger agenda for developing {CRDT}
|
||||
data stores that let developers safely and efficiently interact
|
||||
with replicated application state.},
|
||||
pages = {856--863},
|
||||
number = {4},
|
||||
journaltitle = {Proc. {VLDB} Endow.},
|
||||
author = {Laddad, Shadaj and Power, Conor and Milano, Mae and
|
||||
Cheung, Alvin and Crooks, Natacha and Hellerstein, Joseph M.},
|
||||
urldate = {2024-11-24},
|
||||
date = {2022},
|
||||
file = {PDF:/home/lhebendanz/Zotero/storage/SEGUKMKS/Laddad et al.
|
||||
- 2022 - Keep CALM and CRDT On.pdf:application/pdf},
|
||||
}
|
||||
|
||||
@inproceedings{dolstra_nix_2004,
|
||||
location = {{USA}},
|
||||
title = {Nix: A Safe and Policy-Free System for Software Deployment},
|
||||
@@ -379,28 +273,6 @@
|
||||
11, No 3:/home/lhebendanz/Zotero/storage/7A2CZ7A6/3309699.html:text/html},
|
||||
}
|
||||
|
||||
@article{noauthor_systematic_2024,
|
||||
title = {A Systematic Approach to Deal with Noisy Neighbour in
|
||||
Cloud Infrastructure {\textbar} Request {PDF}},
|
||||
url =
|
||||
{https://www.researchgate.net/publication/303741535_A_Systematic_Approach_to_Deal_with_Noisy_Neighbour_in_Cloud_Infrastructure},
|
||||
doi = {10.17485/ijst/2016/v9i19/89211},
|
||||
abstract = {Request {PDF} {\textbar} A Systematic Approach to Deal
|
||||
with Noisy Neighbour in Cloud Infrastructure {\textbar}
|
||||
Background/Objectives: One of the major challenges of the
|
||||
multitenant cloud model is performance unpredictability because of
|
||||
resource contention.... {\textbar} Find, read and cite all the
|
||||
research you need on {ResearchGate}},
|
||||
journaltitle = {{ResearchGate}},
|
||||
urldate = {2025-02-19},
|
||||
date = {2024-10-22},
|
||||
langid = {english},
|
||||
file = {Full Text PDF:/home/lhebendanz/Zotero/storage/3HXVA58J/2024
|
||||
- A Systematic Approach to Deal with Noisy Neighbour in Cloud
|
||||
Infrastructure Request
|
||||
PDF.pdf:application/pdf;Snapshot:/home/lhebendanz/Zotero/storage/8KU7F7XX/303741535_A_Systematic_Approach_to_Deal_with_Noisy_Neighbour_in_Cloud_Infrastructure.html:text/html},
|
||||
}
|
||||
|
||||
@online{noauthor_netzdg_nodate,
|
||||
title = {{NetzDG} - Gesetz zur Verbesserung der Rechtsdurchsetzung
|
||||
in sozialen Netzwerken},
|
||||
@@ -566,43 +438,6 @@
|
||||
2011 - UDP NAT and firewall puncturing in the wild.pdf:application/pdf},
|
||||
}
|
||||
|
||||
@article{mehrab_new_2025,
|
||||
title = {A New Approach to Peer-to-Peer {VPN} Connectivity:
|
||||
Achieving Seamless Communication Without Firewalls},
|
||||
shorttitle = {A New Approach to Peer-to-Peer {VPN} Connectivity},
|
||||
abstract = {This study presents a novel approach to peer-to-peer
|
||||
Virtual Private Network ({VPN}) connectivity that eliminates
|
||||
traditional firewall dependencies. As remote work and distributed
|
||||
systems become increasingly prevalent, the limitations of
|
||||
conventional {VPN} architectures-including performance bottlenecks,
|
||||
complex configurations, and centralized points of failure-have
|
||||
become more apparent. The research evaluates {ZeroTier}, a modern
|
||||
network virtualization solution, against established {VPN}
|
||||
technologies (Wireguard, Tailscale, and {OpenVPN}) across a
|
||||
simulated environment of 100 globally distributed virtual nodes
|
||||
over a 30-day period. Results demonstrate {ZeroTier}'s superior
|
||||
performance across key metrics: 57\% faster connection
|
||||
establishment than Wireguard, 45\% lower latency compared to
|
||||
traditional solutions, higher throughput (875 Mbps for P2P
|
||||
connections), reduced resource utilization, and a 94.8\%
|
||||
first-attempt connection success rate. {ZeroTier} successfully
|
||||
established direct P2P connections in 85\% of cases, significantly
|
||||
outperforming competitors. The study details {ZeroTier}'s "Virtual
|
||||
Layer 2 Ethernet" architecture, which combines centralized
|
||||
coordination with decentralized communication and employs
|
||||
sophisticated {NAT} traversal techniques to enable direct
|
||||
device-to-device connections without complex firewall
|
||||
configurations. This approach creates a flat, software-defined
|
||||
network that spans multiple physical locations while maintaining
|
||||
enterprise-grade security through end-to-end encryption.},
|
||||
author = {Mehrab, Abu},
|
||||
date = {2025-04-11},
|
||||
file = {Full Text
|
||||
PDF:/home/lhebendanz/Zotero/storage/8T5JMBVF/Mehrab - 2025 - A New
|
||||
Approach to Peer-to-Peer VPN Connectivity Achieving Seamless
|
||||
Communication Without Firewalls.pdf:application/pdf},
|
||||
}
|
||||
|
||||
@inproceedings{lackorzynski_comparative_2019,
|
||||
title = {A Comparative Study on Virtual Private Networks for Future
|
||||
Industrial Communication Systems},
|
||||
@@ -634,7 +469,7 @@
|
||||
networks, Industry, Logic gates, network security, Production
|
||||
facilities, secure transport, Software, tunneling, Virtual private
|
||||
networks, {VPN}},
|
||||
file = {PDF:/home/lhebendanz/Zotero/storage/DWSF7ERP/Lackorzynski
|
||||
file = {PDF:/home/lhebendanz/Zotero/storage/6Q5SUJX5/Lackorzynski
|
||||
et al. - 2019 - A Comparative Study on Virtual Private Networks for
|
||||
Future Industrial Communication
|
||||
Systems.pdf:application/pdf;Snapshot:/home/lhebendanz/Zotero/storage/PXWNAC6D/8758010.html:text/html},
|
||||
@@ -665,3 +500,120 @@
|
||||
date = {2026-02-11},
|
||||
note = {original-date: 2015-12-09T20:10:53Z},
|
||||
}
|
||||
|
||||
@article{leung_overview_2007,
|
||||
title = {An Overview of Packet Reordering in Transmission Control
|
||||
Protocol ({TCP}): Problems, Solutions, and Challenges},
|
||||
volume = {18},
|
||||
issn = {1558-2183},
|
||||
url = {https://ieeexplore.ieee.org/document/4118693},
|
||||
doi = {10.1109/TPDS.2007.1011},
|
||||
shorttitle = {An Overview of Packet Reordering in Transmission
|
||||
Control Protocol ({TCP})},
|
||||
abstract = {Transmission control protocol ({TCP}) is the most
|
||||
popular transport layer protocol for the Internet. Due to various
|
||||
reasons, such as multipath routing, route fluttering, and
|
||||
retransmissions, packets belonging to the same flow may arrive out
|
||||
of order at a destination. Such packet reordering violates the
|
||||
design principles of some traffic control mechanisms in {TCP} and,
|
||||
thus, poses performance problems. In this paper, we provide a
|
||||
comprehensive and in-depth survey on recent research on packet
|
||||
reordering in {TCP}. The causes and problems for packet reordering
|
||||
are discussed. Various representative algorithms are examined and
|
||||
compared by computer simulations. The ported program codes and
|
||||
simulation scripts are available for download. Some open questions
|
||||
are discussed to stimulate further research in this area},
|
||||
pages = {522--535},
|
||||
number = {4},
|
||||
journaltitle = {{IEEE} Transactions on Parallel and Distributed Systems},
|
||||
author = {Leung, Ka-cheong and Li, Victor O.k. and Yang, Daiqin},
|
||||
urldate = {2026-02-16},
|
||||
date = {2007-04},
|
||||
keywords = {{ARPANET}, Communication system control, Computational
|
||||
modeling, Computer simulations of {TCP}, congestion control, flow
|
||||
control, Internet, {IP} networks, Out of order, packet reordering,
|
||||
Routing, Telecommunication network reliability, Traffic control,
|
||||
Transmission Control Protocol ({TCP})., Transport protocols},
|
||||
file =
|
||||
{Snapshot:/home/lhebendanz/Zotero/storage/NEVVLVJL/4118693.html:text/html;Submitted
|
||||
Version:/home/lhebendanz/Zotero/storage/5SQILKLX/Leung et al. -
|
||||
2007 - An Overview of Packet Reordering in Transmission Control
|
||||
Protocol (TCP) Problems, Solutions, and Ch.pdf:application/pdf},
|
||||
}
|
||||
|
||||
@inproceedings{mcclellan_estimating_2013,
|
||||
title = {Estimating Retransmission Timeouts in {IP}-Based Transport
|
||||
Protocols},
|
||||
author = {{McClellan}, Stan and Peng, Wuxu},
|
||||
date = {2013-04-01},
|
||||
file = {Full Text
|
||||
PDF:/home/lhebendanz/Zotero/storage/9PZP9SVG/McClellan and Peng -
|
||||
2013 - Estimating Retransmission Timeouts in IP-Based Transport
|
||||
Protocols.pdf:application/pdf},
|
||||
}
|
||||
|
||||
@misc{guo_implementation_2025,
|
||||
title = {Implementation and Performance Evaluation of {TCP} over
|
||||
{QUIC} Tunnels},
|
||||
url = {http://arxiv.org/abs/2504.10054},
|
||||
doi = {10.48550/arXiv.2504.10054},
|
||||
abstract = {{QUIC}, a {UDP}-based transport protocol, addresses
|
||||
several limitations of {TCP} by offering built-in encryption,
|
||||
stream multiplexing, and improved loss recovery. To extend these
|
||||
benefits to legacy {TCP}-based applications, this paper explores
|
||||
the implementation and evaluation of a {TCP} over {QUIC} tunneling
|
||||
approach. A lightweight, stream-based tunnel is constructed using
|
||||
the Rust-based Quinn library, enabling {TCP} traffic to traverse
|
||||
{QUIC} connections transparently. Performance is evaluated under
|
||||
varying network conditions, including packet loss, high latency,
|
||||
and out-of-order delivery. Results indicate that {TCP} over {QUIC}
|
||||
maintains significantly higher throughput than native {TCP} in
|
||||
lossy or unstable environments, with up to a high improvement under
|
||||
20{\textbackslash}\% packet loss. However, under ideal network
|
||||
conditions, tunneling introduces modest overhead due to encryption
|
||||
and user-space processing. These findings provide insights into the
|
||||
trade-offs of {TCP} over {QUIC} tunneling and its suitability for
|
||||
deployment in dynamic or impaired networks.},
|
||||
number = {{arXiv}:2504.10054},
|
||||
publisher = {{arXiv}},
|
||||
author = {Guo, Xuanhong and Bao, Zekun and Chen, Ying},
|
||||
urldate = {2026-02-16},
|
||||
date = {2025-10-07},
|
||||
eprinttype = {arxiv},
|
||||
eprint = {2504.10054 [cs]},
|
||||
keywords = {Computer Science - Networking and Internet Architecture},
|
||||
file = {Preprint PDF:/home/lhebendanz/Zotero/storage/FXJSBRXL/Guo
|
||||
et al. - 2025 - Implementation and Performance Evaluation of TCP
|
||||
over QUIC
|
||||
Tunnels.pdf:application/pdf;Snapshot:/home/lhebendanz/Zotero/storage/LJ56UH99/2504.html:text/html},
|
||||
}
|
||||
|
||||
@report{whitner_improved_2008,
|
||||
title = {Improved Packet Reordering Metrics},
|
||||
url = {https://datatracker.ietf.org/doc/rfc5236},
|
||||
abstract = {This document presents two improved metrics for packet
|
||||
reordering, namely, Reorder Density ({RD}) and Reorder
|
||||
Buffer-occupancy Density ({RBD}). A threshold is used to clearly
|
||||
define when a packet is considered lost, to bound computational
|
||||
complexity at O(N), and to keep the memory requirement for
|
||||
evaluation independent of N, where N is the length of the packet
|
||||
sequence. {RD} is a comprehensive metric that captures the
|
||||
characteristics of reordering, while {RBD} evaluates the sequences
|
||||
from the point of view of recovery from reordering. These metrics
|
||||
are simple to compute yet comprehensive in their characterization
|
||||
of packet reordering. The measures are robust and orthogonal to
|
||||
packet loss and duplication. This memo provides information for the
|
||||
Internet community.},
|
||||
number = {{RFC} 5236},
|
||||
institution = {Internet Engineering Task Force},
|
||||
type = {Request for Comments},
|
||||
author = {Whitner, Rick and Banka, Tarun and Piratla, Nischal M.
|
||||
and Bare, Abhijit A. and Jayasumana, Professor Anura P.},
|
||||
urldate = {2026-02-16},
|
||||
date = {2008-06},
|
||||
doi = {10.17487/RFC5236},
|
||||
note = {Num Pages: 26},
|
||||
file = {Full Text
|
||||
PDF:/home/lhebendanz/Zotero/storage/KM9D625Y/Whitner et al. - 2008
|
||||
- Improved Packet Reordering Metrics.pdf:application/pdf},
|
||||
}
|
||||
|
||||
@@ -3,7 +3,9 @@
|
||||
imports = [ inputs.treefmt-nix.flakeModule ];
|
||||
|
||||
perSystem =
|
||||
{ ... }:
|
||||
{
|
||||
...
|
||||
}:
|
||||
{
|
||||
treefmt = {
|
||||
# Used to find the project root
|
||||
@@ -17,6 +19,7 @@
|
||||
"AI_Data/**"
|
||||
"Figures/**"
|
||||
];
|
||||
|
||||
programs.typos = {
|
||||
enable = true;
|
||||
threads = 4;
|
||||
|
||||