Compare commits

...

63 Commits

Author SHA1 Message Date
fa30099991 update ui-assets.nix
All checks were successful
checks / test (push) Has been skipped
assets1 / test (push) Has been skipped
checks-impure / test (push) Has been skipped
2023-11-08 21:02:45 +00:00
b4c657501e Merge pull request 'README: Fixed missing direnv install step' (#13) from Luis-main into main
All checks were successful
checks-impure / test (push) Successful in 25s
checks / test (push) Successful in 1m32s
assets1 / test (push) Successful in 49s
Reviewed-on: #13
2023-11-08 21:47:57 +01:00
c4388733e5 Fix forgetting to delete api folder in UI 2023-11-08 21:47:57 +01:00
105b6f0b35 Merge pull request 'Improved README' (#12) from Luis-main into main
All checks were successful
assets1 / test (push) Successful in 22s
checks-impure / test (push) Successful in 24s
checks / test (push) Successful in 1m49s
Reviewed-on: #12
2023-11-01 16:11:31 +01:00
1f895a0668 README: Fixed missing direnv install step 2023-11-01 16:11:31 +01:00
adcca39dc9 Merge pull request 'Improved README' (#11) from Luis-main into main
All checks were successful
checks-impure / test (push) Successful in 26s
checks / test (push) Successful in 1m38s
assets1 / test (push) Successful in 24s
Reviewed-on: #11
2023-10-31 13:32:24 +01:00
637921e722 nix fmt
All checks were successful
checks-impure / test (pull_request) Successful in 26s
checks / test (pull_request) Successful in 1m31s
2023-10-31 13:24:10 +01:00
44f12945b8 Improved README 2023-10-31 13:24:10 +01:00
c0a5743502 update ui-assets.nix
All checks were successful
checks-impure / test (push) Has been skipped
checks / test (push) Has been skipped
assets1 / test (push) Has been skipped
2023-10-30 16:41:11 +00:00
48bc94a5de Merge pull request 'nix fmt' (#9) from Luis-main into main
All checks were successful
checks-impure / test (push) Successful in 24s
checks / test (push) Successful in 2m54s
assets1 / test (push) Successful in 48s
Reviewed-on: #9
2023-10-30 17:32:34 +01:00
1621b22c1c Fixed missing gnused in ui-assets.sh 2023-10-30 17:32:34 +01:00
1e9817dea2 Merge pull request 'nix fmt' (#8) from Luis-main into main
Some checks failed
checks-impure / test (push) Successful in 25s
checks / test (push) Successful in 2m43s
assets1 / test (push) Failing after 48s
Reviewed-on: #8
2023-10-30 17:26:29 +01:00
494067899e Generate ui assets package_name on the fly 2023-10-30 17:26:29 +01:00
b95194890d nix fmt 2023-10-30 17:26:29 +01:00
8c1c050ba3 Merge pull request 'Improved README' (#7) from Luis-main into main
Some checks failed
checks-impure / test (push) Successful in 26s
checks / test (push) Failing after 3m19s
assets1 / test (push) Successful in 22s
Reviewed-on: #7
2023-10-30 17:14:37 +01:00
1eff969fbf Improved README
Some checks failed
checks / test (pull_request) Failing after 1m29s
checks-impure / test (pull_request) Successful in 26s
2023-10-30 17:08:41 +01:00
55f252af92 update ui-assets.nix
All checks were successful
checks / test (push) Has been skipped
assets1 / test (push) Has been skipped
checks-impure / test (push) Has been skipped
2023-10-30 16:01:42 +00:00
81553a3bc6 Merge pull request 'Improved README and ui-asset workflow' (#6) from Luis-main into main
All checks were successful
checks-impure / test (push) Successful in 25s
checks / test (push) Successful in 1m28s
assets1 / test (push) Successful in 49s
Reviewed-on: #6
2023-10-30 16:59:11 +01:00
84c5b0477e Improved README and ui-asset workflow
All checks were successful
checks-impure / test (pull_request) Successful in 25s
checks / test (pull_request) Successful in 1m24s
2023-10-30 16:51:39 +01:00
5273eee89f Merge pull request 'Added Getting Started to README' (#5) from Luis-main into main
Some checks failed
checks / test (push) Failing after 3m15s
assets1 / test (push) Successful in 22s
checks-impure / test (push) Successful in 26s
Reviewed-on: Luis/consulting-website#5
2023-10-30 16:00:49 +01:00
f714682948 update ui-assets.nix
All checks were successful
checks-impure / test (push) Has been skipped
checks / test (push) Has been skipped
assets1 / test (push) Has been skipped
2023-10-30 14:59:42 +00:00
51754676bc Merge pull request 'Added Getting Started to README' (#4) from Luis-main into main
All checks were successful
checks-impure / test (push) Successful in 25s
checks / test (push) Successful in 1m50s
assets1 / test (push) Successful in 51s
Reviewed-on: Luis/consulting-website#4
2023-10-30 15:56:46 +01:00
627fd5e76d Added Getting Started to README
Some checks failed
checks-impure / test (pull_request) Successful in 24s
checks / test (pull_request) Failing after 1m40s
2023-10-30 13:37:03 +01:00
7a54c87fde Added Getting Started to README
All checks were successful
checks-impure / test (pull_request) Successful in 26s
checks / test (pull_request) Successful in 1m52s
2023-10-30 13:26:09 +01:00
ui-asset-bot
217f465dc7 update ui-assets.nix
All checks were successful
checks-impure / test (push) Has been skipped
checks / test (push) Has been skipped
assets1 / test (push) Has been skipped
2023-10-27 22:52:40 +00:00
81cf1e2f81 Merge pull request 'Added correct owner to update-ui-assets.sh' (#3) from Luis-main into main
All checks were successful
checks-impure / test (push) Successful in 25s
checks / test (push) Successful in 1m59s
assets1 / test (push) Successful in 48s
Reviewed-on: Luis/consulting-website#3
2023-10-28 00:48:34 +02:00
27c9146ef6 Merge branch 'main' into Luis-main
All checks were successful
checks-impure / test (pull_request) Successful in 25s
checks / test (pull_request) Successful in 2m2s
2023-10-28 00:47:10 +02:00
16d7947701 Added correct owner to update-ui-assets.sh
All checks were successful
checks-impure / test (pull_request) Successful in 24s
checks / test (pull_request) Successful in 2m2s
2023-10-28 00:29:27 +02:00
778130d00d Merge pull request 'Fully working ui and cli' (#1) from Luis-main into main
Some checks failed
checks-impure / test (push) Successful in 24s
checks / test (push) Successful in 1m24s
assets1 / test (push) Failing after 51s
Reviewed-on: Luis/consulting-website#1
2023-10-23 22:37:35 +02:00
d053d4fba4 Fixing broken CI
All checks were successful
checks-impure / test (pull_request) Successful in 35s
checks / test (pull_request) Successful in 6m23s
2023-10-23 03:23:57 +02:00
a659800cb8 Fixing broken CI
Some checks failed
checks-impure / test (pull_request) Successful in 41s
checks / test (pull_request) Failing after 7m10s
2023-10-23 03:17:57 +02:00
1f70b42401 Fixing broken CI
Some checks failed
checks-impure / test (pull_request) Failing after 40s
checks / test (pull_request) Successful in 4m31s
2023-10-23 03:08:27 +02:00
112f281fd9 Fixing broken CI 2023-10-23 02:50:45 +02:00
9238225556 Fixing merge-after-ci
Some checks failed
checks-impure / test (pull_request) Failing after 3m11s
checks / test (pull_request) Failing after 24m9s
2023-10-23 02:12:42 +02:00
f1b66d7996 Nix fmt doesn't complain anymore 2023-10-23 01:30:47 +02:00
7a354875c9 Fully working ui and cli 2023-10-23 01:23:06 +02:00
805efb7ec7 Working base cli webui 2023-10-23 01:18:58 +02:00
e5c0bc7fd4 Fixed AnyURL problem 2023-10-22 22:38:04 +02:00
d78a3bc684 Fixed linting problem 2023-10-22 22:20:23 +02:00
aaeccfbec5 Fixed linting problem 2023-10-22 22:17:10 +02:00
e69cc4940d After fixing problem 2023-10-22 21:24:02 +02:00
c7c47b6527 Befor fixing linting problem 2023-10-22 21:03:06 +02:00
545d389df0 Initial commit 2023-10-15 16:41:25 +02:00
6def19b4c8 Added new type FlakeName 2023-10-14 15:17:58 +02:00
718f647774 Added flake_name:str argument everywhere, nix fmt doesn't complain anymore 2023-10-14 14:57:36 +02:00
06d6edbfa7 API|CLI: Added argument 'flake_name' to all CLI and API endpoints. Tests missing. 2023-10-13 22:29:55 +02:00
740e5e2ebc Added state directory. 2023-10-13 19:56:10 +02:00
4ab4832d41 API: Added Path validators. api/flake/create inits git repo. Fixed vscode interpreter problem 2023-10-12 22:46:32 +02:00
DavHau
2f9ec882b2 Merge pull request 'vms.create: don't generate secrets if clan is remote' (#424) from lassulus-vm_generate into main
Reviewed-on: https://git.clan.lol/clan/clan-core/pulls/424
2023-10-11 10:10:26 +00:00
lassulus
3dad667f97 vms.create: don't generate secrets if clan is remote 2023-10-11 12:01:42 +02:00
clan-bot
1cca1faedd Merge pull request 'API: Added /api/flake/create. Fixed vscode search settings. Moved clan create to clan flake create' (#423) from Qubasa-main into main 2023-10-09 12:04:02 +00:00
b49433958b API: Added /api/flake/create. Fixed vscode search settings. Moved clan create to clan flake create 2023-10-09 14:01:34 +02:00
clan-bot
3650ab491d Merge pull request 'Automatic flake update - 2023-10-09T00:00+00:00' (#422) from flake-update-2023-10-09 into main 2023-10-09 00:04:23 +00:00
Clan Merge Bot
603b48a0fe update flake lock - 2023-10-09T00:00+00:00
Flake lock file updates:

• Updated input 'disko':
    'github:nix-community/disko/646ee25c25fffee122a66282861f5f56ad3e0fd9' (2023-10-02)
  → 'github:nix-community/disko/cde886a1c97ef2399b4f91409db045785020291f' (2023-10-05)
• Updated input 'flake-parts':
    'github:hercules-ci/flake-parts/7f53fdb7bdc5bb237da7fefef12d099e4fd611ca' (2023-09-01)
  → 'github:hercules-ci/flake-parts/c9afaba3dfa4085dbd2ccb38dfade5141e33d9d4' (2023-10-03)
• Updated input 'nixos-generators':
    'github:nix-community/nixos-generators/8ee78470029e641cddbd8721496da1316b47d3b4' (2023-09-04)
  → 'github:nix-community/nixos-generators/150f38bd1e09e20987feacb1b0d5991357532fb5' (2023-09-30)
• Updated input 'nixpkgs':
    'github:Mic92/nixpkgs/bc160df717ed1e9defe6044092ea66950976e3ed' (2023-09-26)
  → 'github:Mic92/nixpkgs/c3bd4f19ef0062d4462444aa413e26c917187ae9' (2023-09-30)
• Updated input 'sops-nix':
    'github:Mic92/sops-nix/2f375ed8702b0d8ee2430885059d5e7975e38f78' (2023-09-21)
  → 'github:Mic92/sops-nix/d7380c38d407eaf06d111832f4368ba3486b800e' (2023-10-08)
• Updated input 'treefmt-nix':
    'github:numtide/treefmt-nix/e951529be2e7c669487de78f5aef8597bbae5fca' (2023-09-21)
  → 'github:numtide/treefmt-nix/720bd006d855b08e60664e4683ccddb7a9ff614a' (2023-09-27)
2023-10-09 00:00:16 +00:00
ui-asset-bot
78758319f3 update ui-assets.nix 2023-10-08 15:27:26 +00:00
clan-bot
4fa2056834 Merge pull request 'cleanup work' (#421) from chore/fixes into main 2023-10-08 15:26:08 +00:00
ui-asset-bot
2874cf3bdb update ui-assets.nix 2023-10-08 15:24:32 +00:00
clan-bot
ded13b2da5 Merge pull request 'add coporate theme color variables' (#419) from feat/theme into main 2023-10-08 15:23:54 +00:00
Johannes Kirschbauer
5483018783 cleanup work 2023-10-08 17:20:43 +02:00
clan-bot
26b7effe99 Merge pull request 'CLI: Fixed bug in firefox opening addon page because of new profile' (#420) from Qubasa-main into main 2023-10-08 14:42:49 +00:00
Johannes Kirschbauer
6312f47545 remove: tailwindcss/plugin until we find solution 2023-10-08 16:31:16 +02:00
Johannes Kirschbauer
9ea71c90a6 resolve conflicts 2023-10-08 15:53:43 +02:00
Johannes Kirschbauer
d3310f861b add coporate theme color variables 2023-10-08 15:46:33 +02:00
260 changed files with 18803 additions and 15500 deletions

2
.envrc
View File

@@ -3,3 +3,5 @@ if ! has nix_direnv_version || ! nix_direnv_version 2.3.0; then
fi
use flake

View File

@@ -15,7 +15,7 @@ jobs:
id: changed-files
uses: tj-actions/changed-files@v32
with:
fetch-depth: 2
fetch-depth: 0
- name: Check if UI files are in the list of modified files
run: |
@@ -35,8 +35,8 @@ jobs:
export PATH=$PATH:$DEPS
# Setup git config
git config --global user.email "ui-asset-bot@clan.lol"
git config --global user.name "ui-asset-bot"
git config --global user.email "$BOT_EMAIL"
git config --global user.name "$BOT_NAME"
################################################
# #
@@ -66,3 +66,5 @@ jobs:
env:
MODIFIED_FILES: ${{ steps.changed-files.outputs.modified_files }}
GITEA_TOKEN: ${{ secrets.BOT_ACCESS_TOKEN }}
BOT_NAME: "ui-asset-bot"
BOT_EMAIL: "ui-asset-bot@gchq.icu"

3
.gitignore vendored
View File

@@ -1,4 +1,7 @@
.direnv
.coverage.*
**/qubeclan
**/testdir
democlan
result*
/pkgs/clan-cli/clan_cli/nixpkgs

214
README.md
View File

@@ -1,9 +1,211 @@
# clan.lol core
# Website Template
This is the monorepo of the clan.lol project
In here are all the packages we use, all the nixosModules we use/expose, the CLI and tests for everything.
Welcome to our website template repository! This template is designed to help you and your team build high-quality websites efficiently. We've carefully chosen the technologies to make development smooth and enjoyable. Here's what you can expect from this template:
## cLAN config tool
**Frontend**: Our frontend is powered by [React NextJS](https://nextjs.org/), a popular and versatile framework for building web applications.
- The quickstart guide can be found here: [here](/clan/clan-core/src/branch/main/docs/quickstart.md)
- Find the docs [here](/clan/clan-core/src/branch/main/docs/clan-config.md)
**Backend**: For the backend, we use Python along with the [FastAPI framework](https://fastapi.tiangolo.com/). To ensure seamless communication between the frontend and backend, we generate an `openapi.json` file from the Python code, which defines the REST API. This file is then used with [Orval](https://orval.dev/) to generate TypeScript bindings for the REST API. We're committed to code correctness, so we use [mypy](https://mypy-lang.org/) to ensure that our Python code is statically typed correctly. For backend testing, we rely on [pytest](https://docs.pytest.org/en/7.4.x/).
**Continuous Integration (CI)**: We've set up a CI bot that rigorously checks your code using the quality assurance (QA) tools mentioned above. If any errors are detected, it will block pull requests until they're resolved.
**Dependency Management**: We use the [Nix package manager](https://nixos.org/) to manage dependencies and ensure reproducibility, making your development process more robust.
## Supported Operating Systems
- Linux
- macOS
# Getting Started with the Development Environment
Let's get your development environment up and running:
1. **Install Nix Package Manager**:
- You can install the Nix package manager by either [downloading the Nix installer](https://github.com/DeterminateSystems/nix-installer/releases) or running this command:
```bash
curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install
```
2. **Install direnv**:
- Download the direnv package from [here](https://direnv.net/docs/installation.html) or run the following command:
```bash
curl -sfL https://direnv.net/install.sh | bash
```
3. **Add direnv to your shell**:
- Direnv needs to [hook into your shell](https://direnv.net/docs/hook.html) to work.
You can do this by executing following command:
```bash
echo 'eval "$(direnv hook zsh)"' >> ~/.zshrc && echo 'eval "$(direnv hook bash)"' >> ~/.bashrc && eval "$SHELL"
```
4. **Clone the Repository and Navigate**:
- Clone this repository and navigate to it.
5. **Allow .envrc**:
- When you enter the directory, you'll receive an error message like this:
```bash
direnv: error .envrc is blocked. Run `direnv allow` to approve its content
```
- Execute `direnv allow` to automatically execute the shell script `.envrc` when entering the directory.
6. **Build the Backend**:
- Go to the `pkgs/clan-cli` directory and execute:
```bash
direnv allow
```
- Wait for the backend to build.
7. **Start the Backend Server**:
- To start the backend server, execute:
```bash
clan webui --reload --no-open --log-level debug
```
- The server will automatically restart if any Python files change.
8. **Build the Frontend**:
- In a different shell, navigate to the `pkgs/ui` directory and execute:
```bash
direnv allow
```
- Wait for the frontend to build.
9. **Start the Frontend**:
- To start the frontend, execute:
```bash
npm run dev
```
- Access the website by going to [http://localhost:3000](http://localhost:3000).
# Setting Up Your Git Workflow
Let's set up your Git workflow to collaborate effectively:
1. **Register Your Gitea Account Locally**:
- Execute the following command to add your Gitea account locally:
```bash
tea login add
```
- Fill out the prompt as follows:
- URL of Gitea instance: `https://gitea.gchq.icu`
- Name of new Login [gitea.gchq.icu]: `gitea.gchq.icu:7171`
- Do you have an access token? No
- Username: YourUsername
- Password: YourPassword
- Set Optional settings: No
2. **Git Workflow**:
1. Add your changes to Git using `git add <file1> <file2>`.
2. Run `nix fmt` to lint your files.
3. Commit your changes with a descriptive message: `git commit -a -m "My descriptive commit message"`.
4. Make sure your branch has the latest changes from upstream by executing:
```bash
git fetch && git rebase origin/main --autostash
```
5. Use `git status` to check for merge conflicts.
6. If conflicts exist, resolve them. Here's a tutorial for resolving conflicts in [VSCode](https://code.visualstudio.com/docs/sourcecontrol/overview#_merge-conflicts).
7. After resolving conflicts, execute `git merge --continue` and repeat step 5 until there are no conflicts.
3. **Create a Pull Request**:
- To automatically open a pull request that gets merged if all tests pass, execute:
```bash
merge-after-ci
```
4. **Review Your Pull Request**:
- Visit https://gitea.gchq.icu and go to the project page. Check under "Pull Requests" for any issues with your pull request.
5. **Push Your Changes**:
- If there are issues, fix them and redo step 2. Afterward, execute:
```bash
git push origin HEAD:YourUsername-main
```
- This will directly push to your open pull request.
# Debugging
When working on the backend of your project, debugging is an essential part of the development process. Here are some methods for debugging and testing the backend of your application:
## Test Backend Locally in Devshell with Breakpoints
To test the backend locally in a development environment and set breakpoints for debugging, follow these steps:
1. Run the following command to execute your tests and allow for debugging with breakpoints:
```bash
pytest -n0 -s --maxfail=1
```
You can place `breakpoint()` in your Python code where you want to trigger a breakpoint for debugging.
## Test Backend Locally in a Nix Sandbox
To run your backend tests in a Nix sandbox, you have two options depending on whether your test functions have been marked as impure or not:
### Running Tests Marked as Impure
If your test functions need to execute `nix build` and have been marked as impure because you can't execute `nix build` inside a Nix sandbox, use the following command:
```bash
nix run .#impure-checks
```
This command will run the impure test functions.
### Running Pure Tests
For test functions that have not been marked as impure and don't require executing `nix build`, you can use the following command:
```bash
nix build .#checks.x86_64-linux.clan-pytest --rebuild
```
This command will run all pure test functions.
### Inspecting the Nix Sandbox
If you need to inspect the Nix sandbox while running tests, follow these steps:
1. Insert an endless sleep into your test code where you want to pause the execution. For example:
```python
import time
time.sleep(3600) # Sleep for one hour
```
2. Use `cntr` and `psgrep` to attach to the Nix sandbox. This allows you to interactively debug your code while it's paused. For example:
```bash
cntr exec -w your_sandbox_name
psgrep -a -x your_python_process_name
```
These debugging and testing methods will help you identify and fix issues in your backend code efficiently, ensuring the reliability and robustness of your application.
# Using this Template
To make the most of this template:
1. Set up a new Gitea account named `ui-asset-bot`. Generate an access token with all access permissions and set it under `settings/actions/secrets` as a secret called `BOT_ACCESS_TOKEN`.
- Also, edit the file `.gitea/workflows/ui_assets.yaml` and change the `BOT_EMAIL` variable to match the email you set for that account. Gitea matches commits to accounts by their email address, so this step is essential.
2. Create a second Gitea account named `merge-bot`. Edit the file `pkgs/merge-after-ci/default.nix` if the name should be different. Under "Branches," set the main branch to be protected and add `merge-bot` to the whitelisted users for pushing. Set the unprotected file pattern to `**/ui-assets.nix`.
- Enable the status check for "build / test (pull_request)."
3. Add both `merge-bot` and `ui-asset-bot` as collaborators.
- Set the option to "Delete pull request branch after merge by default."
- Also, set the default merge style to "Rebase then create merge commit."
With this template, you're well-equipped to build and collaborate on high-quality websites efficiently. Happy coding!.

View File

@@ -2,28 +2,16 @@
imports = [
./impure/flake-module.nix
];
perSystem = { pkgs, lib, self', ... }: {
perSystem = { lib, self', ... }: {
checks =
let
nixosTestArgs = {
# reference to nixpkgs for the current system
inherit pkgs;
# this gives us a reference to our flake but also all flake inputs
inherit self;
};
nixosTests = lib.optionalAttrs (pkgs.stdenv.isLinux) {
# import our test
secrets = import ./secrets nixosTestArgs;
};
schemaTests = pkgs.callPackages ./schemas.nix {
inherit self;
};
flakeOutputs = lib.mapAttrs' (name: config: lib.nameValuePair "nixos-${name}" config.config.system.build.toplevel) self.nixosConfigurations
// lib.mapAttrs' (n: lib.nameValuePair "package-${n}") self'.packages
// lib.mapAttrs' (n: lib.nameValuePair "devShell-${n}") self'.devShells
// lib.mapAttrs' (name: config: lib.nameValuePair "home-manager-${name}" config.activation-script) (self'.legacyPackages.homeConfigurations or { });
in
nixosTests // schemaTests // flakeOutputs;
flakeOutputs;
};
}

View File

@@ -1,54 +0,0 @@
{ self, lib, inputs, ... }:
let
inherit (builtins)
mapAttrs
toJSON
toFile
;
inherit (lib)
mapAttrs'
;
clanLib = self.lib;
clanModules = self.clanModules;
in
{
perSystem = { pkgs, ... }:
let
baseModule = {
imports =
(import (inputs.nixpkgs + "/nixos/modules/module-list.nix"))
++ [{
nixpkgs.hostPlatform = pkgs.system;
}];
};
optionsFromModule = module:
let
evaled = lib.evalModules {
modules = [ module baseModule ];
};
in
evaled.options.clan.networking;
clanModuleSchemas =
mapAttrs
(_: module: clanLib.jsonschema.parseOptions (optionsFromModule module))
clanModules;
mkTest = name: schema: pkgs.runCommand "schema-${name}" { } ''
${pkgs.check-jsonschema}/bin/check-jsonschema \
--check-metaschema ${toFile "schema-${name}" (toJSON schema)}
touch $out
'';
in
{
checks = mapAttrs'
(name: schema: {
name = "schema-${name}";
value = mkTest name schema;
})
clanModuleSchemas;
};
}

View File

@@ -1,34 +0,0 @@
{ self, runCommand, check-jsonschema, pkgs, lib, ... }:
let
clanModules.clanCore = self.nixosModules.clanCore;
baseModule = {
imports =
(import (pkgs.path + "/nixos/modules/module-list.nix"))
++ [{
nixpkgs.hostPlatform = "x86_64-linux";
}];
};
optionsFromModule = module:
let
evaled = lib.evalModules {
modules = [ module baseModule ];
};
in
evaled.options.clan;
clanModuleSchemas = lib.mapAttrs (_: module: self.lib.jsonschema.parseOptions (optionsFromModule module)) clanModules;
mkTest = name: schema: runCommand "schema-${name}" { } ''
${check-jsonschema}/bin/check-jsonschema \
--check-metaschema ${builtins.toFile "schema-${name}" (builtins.toJSON schema)}
touch $out
'';
in
lib.mapAttrs'
(name: schema: {
name = "schema-${name}";
value = mkTest name schema;
})
clanModuleSchemas

View File

@@ -1,6 +0,0 @@
#!/usr/bin/env bash
set -eux -o pipefail
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
export SOPS_AGE_KEY_FILE="${SCRIPT_DIR}/key.age"
nix run .# -- secrets "$@"

View File

@@ -1,21 +0,0 @@
(import ../lib/test-base.nix) {
name = "secrets";
nodes.machine = { self, config, ... }: {
imports = [
(self.nixosModules.clanCore)
];
environment.etc."secret".source = config.sops.secrets.secret.path;
environment.etc."group-secret".source = config.sops.secrets.group-secret.path;
sops.age.keyFile = ./key.age;
clanCore.clanDir = "${./.}";
clanCore.machineName = "machine";
networking.hostName = "machine";
};
testScript = ''
machine.succeed("cat /etc/secret >&2")
machine.succeed("cat /etc/group-secret >&2")
'';
}

View File

@@ -1 +0,0 @@
AGE-SECRET-KEY-1UCXEUJH6JXF8LFKWFHDM4N9AQE2CCGQZGXLUNV4TKR5KY0KC8FDQ2TY4NX

View File

@@ -1 +0,0 @@
../../../machines/machine

View File

@@ -1,4 +0,0 @@
{
"publickey": "age15x8u838dwqflr3t6csf4tlghxm4tx77y379ncqxav7y2n8qp7yzqgrwt00",
"type": "age"
}

View File

@@ -1 +0,0 @@
../../../groups/group

View File

@@ -1,20 +0,0 @@
{
"data": "ENC[AES256_GCM,data:FgF3,iv:QBbnqZ6405qmwGKhbolPr9iobngXt8rtfUwCBOnmwRA=,tag:7gqI1zLVnTkZ0xrNn/LEkA==,type:str]",
"sops": {
"kms": null,
"gcp_kms": null,
"azure_kv": null,
"hc_vault": null,
"age": [
{
"recipient": "age15x8u838dwqflr3t6csf4tlghxm4tx77y379ncqxav7y2n8qp7yzqgrwt00",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSArMHcxKzhUZzNHQmQrb28x\nRC9UMlZMeDN3S1l1eHdUWmV4VUVReHhhQ0RnCjAyUXVlY1FmclVmL2lEdFZuTmll\nVENpa3AwbjlDck5zdGdHUTRnNEdEOUkKLS0tIER3ZlNMSVFnRElkRDcxajZnVmFl\nZThyYzcvYUUvaWJYUmlwQ3dsSDdjSjgK+tj34yBzrsIjm6V+T9wTgz5FdNGOR7I/\nVB4fh8meW0vi/PCK/rajC8NbqmK8qq/lwsF/JwfZKDSdG0FOJUB1AA==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2023-09-03T12:44:56Z",
"mac": "ENC[AES256_GCM,data:d5a0WfE5ZRLKF1NZkBfOl+cVI8ZZHd2rC+qX/giALjyrzk09rLxBeY4lO827GFfMmVy/oC7ceH9pjv2O7ibUiQtcbGIQVBg/WP+dVn8fRMWtF0jpv9BhYTutkVk3kiddqPGhp3mpwvls2ot5jtCRczTPk3JSxN3B1JSJCmj9GfQ=,iv:YmlkTYFNUaFRWozO8+OpEVKaSQmh+N9zpatwUNMPNyw=,tag:mEGQ4tdo82qlhKWalQuufg==,type:str]",
"pgp": null,
"unencrypted_suffix": "_unencrypted",
"version": "3.7.3"
}
}

View File

@@ -1 +0,0 @@
../../../machines/machine

View File

@@ -1,20 +0,0 @@
{
"data": "ENC[AES256_GCM,data:bhxF,iv:iNs+IfSU/7EwssZ0GVTF2raxJkVlddfQEPGIBeUYAy8=,tag:JMOKTMW3/ic3UTj9eT9YFQ==,type:str]",
"sops": {
"kms": null,
"gcp_kms": null,
"azure_kv": null,
"hc_vault": null,
"age": [
{
"recipient": "age15x8u838dwqflr3t6csf4tlghxm4tx77y379ncqxav7y2n8qp7yzqgrwt00",
"enc": "-----BEGIN AGE ENCRYPTED FILE-----\nYWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBxS0g4TEt4S09LQnFKdCtk\nZTlUQWhNUHZmcmZqdGtuZkhhTkMzZDVaWWdNCi9vNnZQeklNaFBBU2x0ditlUDR0\nNGJlRmFFb09WSUFGdEh5TGViTWtacFEKLS0tIE1OMWdQMHhGeFBwSlVEamtHUkcy\ndzI1VHRkZ1o4SStpekVNZmpQSnRkeUkKYmPS9sR6U0NHxd55DjRk29LNFINysOl6\nEM2MTrntLxOHFWZ1QgNx34l4rYIIXx97ONvR0SRpxN0ECL9VonQeZg==\n-----END AGE ENCRYPTED FILE-----\n"
}
],
"lastmodified": "2023-08-23T09:11:08Z",
"mac": "ENC[AES256_GCM,data:8z819mP4FJXE/ExWM1+/dhaXIXzCglhBuZwE6ikl/jNLUAnv3jYL9c9vPrPFl2by3wXSNzqB4AOiTKDQoxDx2SBQKxeWaUnOajD6hbzskoLqCCBfVx7qOHrk/BULcBvMSxBca4RnzXXoMFTwKs2A1fXqAPvSQd1X4gX6Xm9VXWM=,iv:3YxZX+gaEcRKDN0Kuf9y1oWL+sT/J5B/5CtCf4iur9Y=,tag:0dwyjpvjCqbm9vIrz6WSWQ==,type:str]",
"pgp": null,
"unencrypted_suffix": "_unencrypted",
"version": "3.7.3"
}
}

View File

@@ -1 +0,0 @@
../../../users/admin

View File

@@ -1,4 +0,0 @@
{
"publickey": "age15x8u838dwqflr3t6csf4tlghxm4tx77y379ncqxav7y2n8qp7yzqgrwt00",
"type": "age"
}

View File

@@ -1,44 +0,0 @@
{ config, lib, ... }:
{
options.clan.diskLayouts.singleDiskExt4 = {
device = lib.mkOption {
type = lib.types.str;
example = "/dev/disk/by-id/ata-Samsung_SSD_850_EVO_250GB_S21PNXAGB12345";
};
};
config.disko.devices = {
disk = {
main = {
type = "disk";
device = config.clan.diskLayouts.singleDiskExt4.device;
content = {
type = "gpt";
partitions = {
boot = {
size = "1M";
type = "EF02"; # for grub MBR
};
ESP = {
size = "512M";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
};
};
root = {
size = "100%";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
};
};
};
};
};
};
};
}

View File

@@ -1,12 +0,0 @@
{ self, lib, ... }: {
flake.clanModules = {
diskLayouts = lib.mapAttrs'
(name: _: lib.nameValuePair (lib.removeSuffix ".nix" name) {
imports = [
self.inputs.disko.nixosModules.disko
./diskLayouts/${name}
];
})
(builtins.readDir ./diskLayouts);
};
}

View File

@@ -1,69 +0,0 @@
# cLAN config
`clan config` allows you to manage your nixos configuration via the terminal.
Similar as how `git config` reads and sets git options, `clan config` does the same with your nixos options
It also supports auto completion making it easy to find the right options.
## Set up clan-config
Add the clan tool to your flake inputs:
```
clan.url = "git+https://git.clan.lol/clan/clan-core";
```
and inside the mkFlake:
```
imports = [
inputs.clan.flakeModules.clan-config
];
```
Add an empty config file and add it to git
```command
echo "{}" > ./clan-settings.json
git add ./clan-settings.json
```
Import the clan-config module into your nixos configuration:
```nix
{
imports = [
# clan-settings.json is located in the same directory as your flake.
# Adapt the path if necessary.
(builtins.fromJSON (builtins.readFile ./clan-settings.json))
];
}
```
Make sure your nixos configuration is set a default
```nix
{self, ...}: {
flake.nixosConfigurations.default = self.nixosConfigurations.my-machine;
}
```
Use all inputs provided by the clan-config devShell in your own devShell:
```nix
{ ... }: {
perSystem = { pkgs, self', ... }: {
devShells.default = pkgs.mkShell {
inputsFrom = [ self'.devShells.clan-config ];
# ...
};
};
}
```
re-load your dev-shell to make the clan tool available.
```command
clan config --help
```

View File

@@ -1,155 +0,0 @@
# Initializing a New Clan Project
## Create a new Clan flake
1. To start a new project, execute the following command to add the clan cli to your shell:
```shellSession
$ nix shell git+https://git.clan.lol/clan/clan-core
```
2. Then use the following commands to initialize a new clan-flake:
```shellSession
$ mkdir ./my-flake
$ cd ./my-flake
$ clan create
```
This action will generate two primary files: `flake.nix` and `.clan-flake`.
```shellSession
$ ls -la
drwx------ joerg users 5 B a minute ago ./
drwxrwxrwt root root 139 B 12 seconds ago ../
.rw-r--r-- joerg users 77 B a minute ago .clan-flake
.rw-r--r-- joerg users 4.8 KB a minute ago flake.lock
.rw-r--r-- joerg users 242 B a minute ago flake.nix
```
### Understanding the .clan-flake Marker File
The `.clan-flake` marker file serves an optional purpose: it helps the `clan-cli` utility locate the project's root directory.
If `.clan-flake` is missing, `clan-cli` will instead search for other indicators like `.git`, `.hg`, `.svn`, or `flake.nix` to identify the project root.
## Add your first machine
```shellSession
$ clan machines create my-machine
$ clan machines list
my-machine
```
## Configure your machine
In this example we crate a user named `my-user` that is allowed to login to the machine
```shellSession
# create a new user
$ clan config --machine my-machine users.users.my-user.isNormalUser true
# set some password
$ clan config --machine my-machine users.users.my-user.hashedPassword $(mkpasswd)
```
## Test your machine config inside a VM
```shellSession
$ nix build .#nixosConfigurations.my-machine.config.system.build.vm
...
$ ./result/bin/run-nixos-vm
```
---
# Migrating Existing NixOS Configuration Flake
Absolutely, let's break down the migration step by step, explaining each action in detail:
#### Before You Begin
1. **Backup Your Current Configuration**: Always start by making a backup of your current NixOS configuration to ensure you can revert if needed.
```shellSession
$ cp -r /etc/nixos ~/nixos-backup
```
2. **Update Flake Inputs**: Add a new input for the `clan-core` dependency:
```nix
inputs.clan-core = {
url = "git+https://git.clan.lol/clan/clan-core";
# Don't do this if your machines are on nixpkgs stable.
inputs.nixpkgs.follows = "nixpkgs";
};
```
- `url`: Specifies the Git repository URL for Clan Core.
- `inputs.nixpkgs.follows`: Tells Nix to use the same `nixpkgs` input as your main input (in this case, it follows `nixpkgs`).
3. **Update Outputs**: Then modify the `outputs` section of your `flake.nix` to adapt to Clan Core's new provisioning method. The key changes are as follows:
Add `clan-core` to the output
```diff
- outputs = { self, nixpkgs, }:
+ outputs = { self, nixpkgs, clan-core }:
```
Previous configuration:
```nix
{
nixosConfigurations.example-desktop = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules = [
./configuration.nix
];
[...]
};
}
```
After change:
```nix
let clan = clan-core.lib.buildClan {
# this needs to point at the repository root
directory = self;
specialArgs = {};
machines = {
example-desktop = {
nixpkgs.hostPlatform = "x86_64-linux";
imports = [
./configuration.nix
];
};
};
};
in { inherit (clan) nixosConfigurations clanInternals; }
```
- `nixosConfigurations`: Defines NixOS configurations, using Clan Cores `buildClan` function to manage the machines.
- Inside `machines`, a new machine configuration is defined (in this case, `example-desktop`).
- Inside `example-desktop` which is the target machine hostname, `nixpkgs.hostPlatform` specifies the host platform as `x86_64-linux`.
- `clanInternals`: Is required to enable evaluation of the secret generation/upload script on every architecture
4. **Rebuild and Switch**: Rebuild your NixOS configuration using the updated flake:
```shellSession
$ sudo nixos-rebuild switch --flake .
```
- This command rebuilds and switches to the new configuration. Make sure to include the `--flake .` argument to use the current directory as the flake source.
5. **Test Configuration**: Before rebooting, verify that your new configuration builds without errors or warnings.
6. **Reboot**: If everything is fine, you can reboot your system to apply the changes:
```shellSession
$ sudo reboot
```
7. **Verify**: After the reboot, confirm that your system is running with the new configuration, and all services and applications are functioning as expected.
By following these steps, you've successfully migrated your NixOS Flake configuration to include the `clan-core` input and adapted the `outputs` section to work with Clan Core's new machine provisioning method.

View File

@@ -1,173 +0,0 @@
# Managing Secrets with Clan
Clan enables encryption of secrets within a Clan flake, ensuring secure sharing among users.
This documentation will guide you through managing secrets with the Clan CLI,
which utilizes the [sops](https://github.com/getsops/sops) format and
integrates with [sops-nix](https://github.com/Mic92/sops-nix) on NixOS machines.
## 1. Generating Keys and Creating Secrets
To begin, generate a key pair:
```shellSession
$ clan secrets key generate
```
**Output**:
```
Public key: age1wkth7uhpkl555g40t8hjsysr20drq286netu8zptw50lmqz7j95sw2t3l7
Generated age private key at '/home/joerg/.config/sops/age/keys.txt' for your user.
Generated age private key at '/home/joerg/.config/sops/age/keys.txt' for your user. Please back it up on a secure location or you will lose access to your secrets.
Also add your age public key to the repository with 'clan secrets users add youruser age1wkth7uhpkl555g40t8hjsysr20drq286netu8zptw50lmqz7j95sw2t3l7' (replace you
user with your user name)
```
⚠️ **Important**: Backup the generated private key securely, or risk losing access to your secrets.
Next, add your public key to the Clan flake repository:
```shellSession
$ clan secrets users add <your_username> <your_public_key>
```
Doing so creates this structure in your Clan flake:
```
sops/
└── users/
└── <your_username>/
└── key.json
```
Now, to set your first secret:
```shellSession
$ clan secrets set mysecret
Paste your secret:
```
Note: As you type your secret, keypresses won't be displayed. Press Enter to save the secret.
Retrieve the stored secret:
```shellSession
$ clan secrets get mysecret
```
And list all secrets like this:
```shellSession
$ clan secrets list
```
Secrets in the repository follow this structure:
```
sops/
├── secrets/
│ └── <secret_name>/
│ ├── secret
│ └── users/
│ └── <your_username>/
```
The content of the secret is stored encrypted inside the `secret` file under `mysecret`.
By default, secrets are encrypted with your key to ensure readability.
## 2. Adding Machine Keys
New machines in Clan come with age keys stored in `./sops/machines/<machine_name>`. To list these machines:
```shellSession
$ clan secrets machines list
```
For existing machines, add their keys:
```shellSession
$ clan secrets machines add <machine_name> <age_key>
```
To fetch an age key from an SSH host key:
```shellSession
$ ssh-keyscan <domain_name> | nix shell nixpkgs#ssh-to-age -c ssh-to-age
```
## 3. Assigning Access
By default, secrets are encrypted for your key. To specify which users and machines can access a secret:
```shellSession
$ clan secrets set --machine <machine1> --machine <machine2> --user <user1> --user <user2> <secret_name>
```
You can add machines/users to existing secrets without modifying the secret:
```shellSession
$ clan secrets machines add-secret <machine_name> <secret_name>
```
## 4. Utilizing Groups
For convenience, Clan CLI allows group creation to simplify access management. Here's how:
1. **Creating Groups**:
Assign users to a new group, e.g., `admins`:
```shellSession
$ clan secrets groups add admins <username>
```
2. **Listing Groups**:
```shellSession
$ clan secrets groups list
```
3. **Assigning Secrets to Groups**:
```shellSession
$ clan secrets groups add-secret <group_name> <secret_name>
```
# NixOS integration
A NixOS machine will automatically import all secrets that are encrypted for the
current machine. At runtime it will use the host key to decrypt all secrets into
a in-memory, non-persistent filesystem using
[sops-nix](https://github.com/Mic92/sops-nix). In your nixos configuration you
can get a path to secrets like this `config.sops.secrets.<name>.path`. Example:
```nix
{ config, ...}: {
sops.secrets.my-password.neededForUsers = true;
users.users.mic92 = {
isNormalUser = true;
passwordFile = config.sops.secrets.my-password.path;
};
}
```
See the [readme](https://github.com/Mic92/sops-nix) of sops-nix for more
examples.
# Importing existing sops-based keys / sops-nix
`clan secrets` stores each secrets in a single file, whereas [sops](https://github.com/Mic92/sops-nix)
commonly allows to put all secrets in a yaml or json documents.
If you already happend to use sops-nix, you can migrate by using the `clan secrets import-sops` command by importing these documents:
```shellSession
% clan secrets import-sops --prefix matchbox- --group admins --machine matchbox nixos/matchbox/secrets/secrets.yaml
```
This will create secrets for each secret found in `nixos/matchbox/secrets/secrets.yaml` in a ./sops folder of your repository.
Each member of the group `admins` will be able
Since our clan secret module will auto-import secrets that are encrypted for a particular nixos machine,
you can now remove `sops.secrets.<secrets> = { };` unless you need to specify more options for the secret like owner/group of the secret file.

View File

@@ -1,43 +0,0 @@
# Self Hosting
## General Description
Self-hosting refers to the practice of hosting and maintaining servers, networks, storage, services, and other types of infrastructure by oneself rather than relying on a third-party vendor. This could involve running a server from a home or business location, or leasing a dedicated server at a data center.
There are several reasons for choosing to self-host. These can include:
1. Cost savings: Over time, self-hosting can be more cost-effective, especially for businesses with large scale needs.
1. Control: Self-hosting provides a greater level of control over the infrastructure and services. It allows the owner to customize the system to their specific needs.
1. Privacy and security: Self-hosting can offer improved privacy and security because data remains under the control of the host rather than being stored on third-party servers.
1. Independent: Being independent of third-party services can ensure that one's websites, applications, or services remain up even if the third-party service goes down.
## Stories
### Story 1: Private mumble server hosted at home
Alice wants to self-host a mumble server for her family.
- She visits to the cLAN website, and follows the instructions on how to install cLAN-OS on her server.
- Alice logs into a terminal on her server via SSH (alternatively uses cLAN GUI app)
- Using the cLAN CLI or GUI tool, alice creates a new private network for her family (VPN)
- Alice now browses a list of curated cLAN modules and finds a module for mumble.
- She adds this module to her network using the cLAN tool.
- After that, she uses the clan tool to invite her family members to her network
- Other family members join the private network via the invitation.
- By accepting the invitation, other members automatically install all required software to interact with the network on their machine.
### Story 2: Adding a service to an existing network
Alice wants to add a photos app to her private network
- She uses the clan CLI or GUI tool to manage her existing private cLAN family network
- She discovers a module for photoprism, and adds it to her server using the tool
- Other members who are already part of her network, will receive a notification that an update is required to their environment
- After accepting, all new software and services to interact with the new photoprism service will be installed automatically.
## Challenges
...

View File

@@ -1,37 +0,0 @@
# Joining a cLAN network
## General Description
Joining a self-hosted infrastructure involves connecting to a network, server, or system that is privately owned and managed, instead of being hosted by a third-party service provider. This could be a business's internal server, a private cloud setup, or any other private IT infrastructure that is not publicly accessible or controlled by outside entities.
## Stories
### Story 1: Joining a private network
Alice' son Bob has never heard of cLAN, but receives an invitation URL from Alice who already set up private cLAN network for her family.
Bob opens the invitation link and lands on the cLAN website. He quickly learns about what cLAN is and can see that the invitation is for a private network of his family that hosts a number of services, like a private voice chat and a photo sharing platform.
Bob decides to join the network and follows the instructions to install the cLAN tool on his computer.
Feeding the invitation link to the cLAN tool, bob registers his machine with the network.
All programs required to interact with the network will be installed and configured automatically and securely.
Optionally, bob can customize the configuration of these programs through a simplified configuration interface.
### Story 2: Receiving breaking changes
The cLAN family network which Bob is part of received an update.
The existing photo sharing service has been removed and replaced with another alternative service. The new photo sharing service requires a different client app to view and upload photos.
Bob accepts the update. Now his environment will be updated. The old client software will be removed and the new one installed.
Because Bob has customized the previous photo viewing app, he is notified that this customization is no longer valid, as the software has been removed (deprecation message).l
Optionally, Bob can now customize the new photo viewing software through his cLAN configuration app or via a config file.
## Challenges
...

View File

@@ -1,25 +0,0 @@
# cLAN module maintaining
## General Description
cLAN modules are pieces of software that can be used by admins to build a private or public infrastructure.
cLAN modules should have the following properties:
1. Documented: It should be clear what the module does and how to use it.
1. Self contained: A module should be usable as is. If it requires any other software or settings, those should be delivered with the module itself.
1. Simple to deploy and use: Modules should have opinionated defaults that just work. Any customization should be optional
## Stories
### Story 1: Maintaining a shared folder module
Alice maintains a module for a shared folder service that she uses in her own infra, but also publishes for the community.
By following clan module standards (Backups, Interfaces, Output schema, etc), other community members have an easy time re-using the module within their own infra.
She benefits from publishing the module, because other community members start using it and help to maintain it.
## Challenges
...

View File

@@ -1,17 +0,0 @@
# (TITLE)
## General Description
## Stories
### Story 1: Some Description
Alice...
### Story 2: Some Description
Bob...
## Challenges
...

View File

@@ -1,69 +0,0 @@
# ZeroTier Configuration with NixOS in Clan
This guide provides detailed instructions for configuring
[ZeroTier VPN](https://zerotier.com) within Clan. Follow the
outlined steps to set up a machine as a VPN controller (`<CONTROLLER>`) and to
include a new machine into the VPN.
## 1. Setting Up the VPN Controller
The VPN controller is initially essential for providing configuration to new
peers. Post the address allocation, the controller's continuous operation is not
crucial.
### Instructions:
1. **Designate a Machine**: Label a machine as the VPN controller in the clan,
referred to as `<CONTROLLER>` henceforth in this guide.
2. **Add Configuration**: Input the below configuration to the NixOS
configuration of the controller machine:
```nix
clan.networking.zerotier.controller = {
enable = true;
public = true;
};
```
3. **Update the Controller Machine**: Execute the following:
```console
$ clan machines update <CONTROLLER>
```
Your machine is now operational as the VPN controller.
## 2. Integrating a New Machine to the VPN
To introduce a new machine to the VPN, adhere to the following steps:
### Instructions:
1. **Update Configuration**: On the new machine, incorporate the below to its
configuration, substituting `<CONTROLLER>` with the controller machine name:
```nix
{ config, ... }: {
clan.networking.zerotier.networkId = builtins.readFile (config.clanCore.clanDir + "/machines/<CONTROLLER>/facts/zerotier-network-id");
}
```
2. **Update the New Machine**: Execute:
```console
$ clan machines update <NEW_MACHINE>
```
Replace `<NEW_MACHINE>` with the designated new machine name.
3. **Retrieve the ZeroTier ID**: On the `new_machine`, execute:
```console
$ sudo zerotier-cli info
```
Example Output: `200 info d2c71971db 1.12.1 OFFLINE`, where `d2c71971db` is
the ZeroTier ID.
4. **Authorize the New Machine on Controller**: On the controller machine,
execute:
```console
$ sudo zerotier-members allow <ID>
```
Substitute `<ID>` with the ZeroTier ID obtained previously.
5. **Verify Connection**: On the `new_machine`, re-execute:
```console
$ sudo zerotier-cli info
```
The status should now be "ONLINE" e.g., `200 info 47303517ef 1.12.1 ONLINE`.
Congratulations! The new machine is now part of the VPN, and the ZeroTier
configuration on NixOS within the Clan project is complete.

104
flake.lock generated
View File

@@ -1,25 +1,5 @@
{
"nodes": {
"disko": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1696266752,
"narHash": "sha256-wJnMDFM21+xXdsXSs6pXMElbv4YfqmQslcPApRuaYKs=",
"owner": "nix-community",
"repo": "disko",
"rev": "646ee25c25fffee122a66282861f5f56ad3e0fd9",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "disko",
"type": "github"
}
},
"flake-parts": {
"inputs": {
"nixpkgs-lib": [
@@ -27,11 +7,11 @@
]
},
"locked": {
"lastModified": 1693611461,
"narHash": "sha256-aPODl8vAgGQ0ZYFIRisxYG5MOGSkIczvu2Cd8Gb9+1Y=",
"lastModified": 1696343447,
"narHash": "sha256-B2xAZKLkkeRFG5XcHHSXXcP7To9Xzr59KXeZiRf4vdQ=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "7f53fdb7bdc5bb237da7fefef12d099e4fd611ca",
"rev": "c9afaba3dfa4085dbd2ccb38dfade5141e33d9d4",
"type": "github"
},
"original": {
@@ -60,90 +40,30 @@
"type": "github"
}
},
"nixlib": {
"locked": {
"lastModified": 1693701915,
"narHash": "sha256-waHPLdDYUOHSEtMKKabcKIMhlUOHPOOPQ9UyFeEoovs=",
"owner": "nix-community",
"repo": "nixpkgs.lib",
"rev": "f5af57d3ef9947a70ac86e42695231ac1ad00c25",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nixpkgs.lib",
"type": "github"
}
},
"nixos-generators": {
"inputs": {
"nixlib": "nixlib",
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1693791338,
"narHash": "sha256-wHmtB5H8AJTUaeGHw+0hsQ6nU4VyvVrP2P4NeCocRzY=",
"owner": "nix-community",
"repo": "nixos-generators",
"rev": "8ee78470029e641cddbd8721496da1316b47d3b4",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nixos-generators",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1695741452,
"narHash": "sha256-pDIQmCR0fyb6FKjvURaD6yC5YnE/+rxs5iFQQGgcoNE=",
"owner": "Mic92",
"lastModified": 1697059129,
"narHash": "sha256-9NJcFF9CEYPvHJ5ckE8kvINvI84SZZ87PvqMbH6pro0=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "bc160df717ed1e9defe6044092ea66950976e3ed",
"rev": "5e4c2ada4fcd54b99d56d7bd62f384511a7e2593",
"type": "github"
},
"original": {
"owner": "Mic92",
"ref": "fakeroot",
"owner": "nixos",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"disko": "disko",
"flake-parts": "flake-parts",
"floco": "floco",
"nixos-generators": "nixos-generators",
"nixpkgs": "nixpkgs",
"sops-nix": "sops-nix",
"treefmt-nix": "treefmt-nix"
}
},
"sops-nix": {
"inputs": {
"nixpkgs": [
"sops-nix"
],
"nixpkgs-stable": []
},
"locked": {
"lastModified": 1695284550,
"narHash": "sha256-z9fz/wz9qo9XePEvdduf+sBNeoI9QG8NJKl5ssA8Xl4=",
"owner": "Mic92",
"repo": "sops-nix",
"rev": "2f375ed8702b0d8ee2430885059d5e7975e38f78",
"type": "github"
},
"original": {
"owner": "Mic92",
"repo": "sops-nix",
"type": "github"
}
},
"treefmt-nix": {
"inputs": {
"nixpkgs": [
@@ -151,11 +71,11 @@
]
},
"locked": {
"lastModified": 1695290086,
"narHash": "sha256-ol6licpIAzc9oMsEai/9YZhgSMcrnlnD/3ulMLGNKL0=",
"lastModified": 1695822946,
"narHash": "sha256-IQU3fYo0H+oGlqX5YrgZU3VRhbt2Oqe6KmslQKUO4II=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "e951529be2e7c669487de78f5aef8597bbae5fca",
"rev": "720bd006d855b08e60664e4683ccddb7a9ff614a",
"type": "github"
},
"original": {

View File

@@ -1,22 +1,12 @@
{
description = "clan.lol base operating system";
nixConfig.extra-substituters = [ "https://cache.clan.lol" ];
nixConfig.extra-trusted-public-keys = [ "cache.clan.lol-1:3KztgSAB5R1M+Dz7vzkBGzXdodizbgLXGXKXlcQLA28=" ];
description = "Consulting Website";
inputs = {
#nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
# https://github.com/NixOS/nixpkgs/pull/257462
nixpkgs.url = "github:Mic92/nixpkgs/fakeroot";
nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
floco.url = "github:aakropotkin/floco";
floco.inputs.nixpkgs.follows = "nixpkgs";
disko.url = "github:nix-community/disko";
disko.inputs.nixpkgs.follows = "nixpkgs";
sops-nix.url = "github:Mic92/sops-nix";
sops-nix.inputs.nixpkgs.follows = "sops-nix";
sops-nix.inputs.nixpkgs-stable.follows = "";
nixos-generators.url = "github:nix-community/nixos-generators";
nixos-generators.inputs.nixpkgs.follows = "nixpkgs";
flake-parts.url = "github:hercules-ci/flake-parts";
flake-parts.inputs.nixpkgs-lib.follows = "nixpkgs";
treefmt-nix.url = "github:numtide/treefmt-nix";
@@ -34,14 +24,7 @@
./checks/flake-module.nix
./devShell.nix
./formatter.nix
./templates/flake-module.nix
./clanModules/flake-module.nix
./pkgs/flake-module.nix
./lib/flake-module.nix
./nixosModules/flake-module.nix
./nixosModules/clanCore/flake-module.nix
];
});
}

View File

@@ -11,9 +11,10 @@
treefmt.flakeFormatter = true;
treefmt.programs.shellcheck.enable = true;
treefmt.programs.prettier.enable = true;
treefmt.programs.prettier.settings.plugins = [
"${self'.packages.prettier-plugin-tailwindcss}/lib/node_modules/prettier-plugin-tailwindcss/dist/index.mjs"
];
# TODO: add custom prettier package, that uses our ui/node_modules
# treefmt.programs.prettier.settings.plugins = [
# "${self'.packages.prettier-plugin-tailwindcss}/lib/node_modules/prettier-plugin-tailwindcss/dist/index.mjs"
# ];
treefmt.settings.formatter.prettier.excludes = [
"secrets.yaml"
"key.json"

View File

@@ -1,59 +0,0 @@
{ nixpkgs, self, lib }:
{ directory # The directory containing the machines subdirectory
, specialArgs ? { } # Extra arguments to pass to nixosSystem i.e. useful to make self available
, machines ? { } # allows to include machine-specific modules i.e. machines.${name} = { ... }
}:
let
machinesDirs = lib.optionalAttrs (builtins.pathExists "${directory}/machines") (builtins.readDir (directory + /machines));
machineSettings = machineName:
lib.optionalAttrs (builtins.pathExists "${directory}/machines/${machineName}/settings.json")
(builtins.fromJSON
(builtins.readFile (directory + /machines/${machineName}/settings.json)));
# TODO: remove default system once we have a hardware-config mechanism
nixosConfiguration = { system ? "x86_64-linux", name }: nixpkgs.lib.nixosSystem {
modules = [
self.nixosModules.clanCore
(machineSettings name)
(machines.${name} or { })
{
clanCore.machineName = name;
clanCore.clanDir = directory;
nixpkgs.hostPlatform = lib.mkForce system;
}
];
inherit specialArgs;
};
allMachines = machinesDirs // machines;
supportedSystems = [
"x86_64-linux"
"aarch64-linux"
"riscv64-linux"
"x86_64-darwin"
"aarch64-darwin"
];
nixosConfigurations = lib.mapAttrs (name: _: nixosConfiguration { inherit name; }) allMachines;
# This instantiates nixos for each system that we support:
# configPerSystem = <system>.<machine>.nixosConfiguration
# We need this to build nixos secret generators for each system
configsPerSystem = builtins.listToAttrs
(builtins.map
(system: lib.nameValuePair system
(lib.mapAttrs (name: _: nixosConfiguration { inherit name system; }) allMachines))
supportedSystems);
in
{
inherit nixosConfigurations;
clanInternals = {
machines = configsPerSystem;
all-machines-json = lib.mapAttrs
(system: configs: nixpkgs.legacyPackages.${system}.writers.writeJSON "machines.json" (lib.mapAttrs (_: m: m.config.system.clan.deployment.data) configs))
configsPerSystem;
};
}

View File

@@ -1,6 +0,0 @@
{ lib, self, nixpkgs, ... }:
{
jsonschema = import ./jsonschema { inherit lib; };
buildClan = import ./build-clan { inherit lib self nixpkgs; };
}

View File

@@ -1,14 +0,0 @@
{ lib
, inputs
, self
, ...
}: {
imports = [
./jsonschema/flake-module.nix
];
flake.lib = import ./default.nix {
inherit lib;
inherit self;
inherit (inputs) nixpkgs;
};
}

View File

@@ -1,167 +0,0 @@
{ lib ? import <nixpkgs/lib> }:
let
# from nixos type to jsonschema type
typeMap = {
bool = "boolean";
float = "number";
int = "integer";
str = "string";
path = "string"; # TODO add prober path checks
};
# remove _module attribute from options
clean = opts: builtins.removeAttrs opts [ "_module" ];
# throw error if option type is not supported
notSupported = option: throw
"option type '${option.type.name}' ('${option.type.description}') not supported by jsonschema converter";
in
rec {
# parses a nixos module to a jsonschema
parseModule = module:
let
evaled = lib.evalModules {
modules = [ module ];
};
in
parseOptions evaled.options;
# parses a set of evaluated nixos options to a jsonschema
parseOptions = options':
let
options = clean options';
# parse options to jsonschema properties
properties = lib.mapAttrs (_name: option: parseOption option) options;
isRequired = prop: ! (prop ? default || prop.type == "object");
requiredProps = lib.filterAttrs (_: prop: isRequired prop) properties;
required = lib.optionalAttrs (requiredProps != { }) {
required = lib.attrNames requiredProps;
};
in
# return jsonschema
required // {
type = "object";
inherit properties;
};
# parses and evaluated nixos option to a jsonschema property definition
parseOption = option:
let
default = lib.optionalAttrs (option ? default) {
inherit (option) default;
};
description = lib.optionalAttrs (option ? description) {
inherit (option) description;
};
in
# handle nested options (not a submodule)
if ! option ? _type
then parseOptions option
# throw if not an option
else if option._type != "option"
then throw "parseOption: not an option"
# parse nullOr
else if option.type.name == "nullOr"
# return jsonschema property definition for nullOr
then default // description // {
type = [
"null"
(typeMap.${option.type.functor.wrapped.name} or (notSupported option))
];
}
# parse bool
else if option.type.name == "bool"
# return jsonschema property definition for bool
then default // description // {
type = "boolean";
}
# parse float
else if option.type.name == "float"
# return jsonschema property definition for float
then default // description // {
type = "number";
}
# parse int
else if (option.type.name == "int" || option.type.name == "positiveInt")
# return jsonschema property definition for int
then default // description // {
type = "integer";
}
# parse string
else if option.type.name == "str"
# return jsonschema property definition for string
then default // description // {
type = "string";
}
# parse string
else if option.type.name == "path"
# return jsonschema property definition for path
then default // description // {
type = "string";
}
# parse enum
else if option.type.name == "enum"
# return jsonschema property definition for enum
then default // description // {
enum = option.type.functor.payload;
}
# parse listOf submodule
else if option.type.name == "listOf" && option.type.functor.wrapped.name == "submodule"
# return jsonschema property definition for listOf submodule
then default // description // {
type = "array";
items = parseOptions (option.type.functor.wrapped.getSubOptions option.loc);
}
# parse list
else if
(option.type.name == "listOf")
&& (typeMap ? "${option.type.functor.wrapped.name}")
# return jsonschema property definition for list
then default // description // {
type = "array";
items = {
type = typeMap.${option.type.functor.wrapped.name};
};
}
# parse attrsOf submodule
else if option.type.name == "attrsOf" && option.type.nestedTypes.elemType.name == "submodule"
# return jsonschema property definition for attrsOf submodule
then default // description // {
type = "object";
additionalProperties = parseOptions (option.type.nestedTypes.elemType.getSubOptions option.loc);
}
# parse attrs
else if option.type.name == "attrsOf"
# return jsonschema property definition for attrs
then default // description // {
type = "object";
additionalProperties = {
type = typeMap.${option.type.nestedTypes.elemType.name} or (notSupported option);
};
}
# parse submodule
else if option.type.name == "submodule"
# return jsonschema property definition for submodule
# then (lib.attrNames (option.type.getSubOptions option.loc).opt)
then parseOptions (option.type.getSubOptions option.loc)
# throw error if option type is not supported
else notSupported option;
}

View File

@@ -1,14 +0,0 @@
{
"name": "John Doe",
"age": 42,
"isAdmin": false,
"kernelModules": ["usbhid", "usb_storage"],
"userIds": {
"mic92": 1,
"lassulus": 2,
"davhau": 3
},
"services": {
"opt": "this option doesn't make sense"
}
}

View File

@@ -1,51 +0,0 @@
/*
An example nixos module declaring an interface.
*/
{ lib, ... }: {
options = {
# str
name = lib.mkOption {
type = lib.types.str;
default = "John Doe";
description = "The name of the user";
};
# int
age = lib.mkOption {
type = lib.types.int;
default = 42;
description = "The age of the user";
};
# bool
isAdmin = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Is the user an admin?";
};
# a submodule option
services = lib.mkOption {
type = lib.types.submodule {
options.opt = lib.mkOption {
type = lib.types.str;
default = "foo";
description = "A submodule option";
};
};
};
# attrs of int
userIds = lib.mkOption {
type = lib.types.attrsOf lib.types.int;
description = "Some attributes";
default = {
horst = 1;
peter = 2;
albrecht = 3;
};
};
# list of str
kernelModules = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ "nvme" "xhci_pci" "ahci" ];
description = "A list of enabled kernel modules";
};
};
}

View File

@@ -1,50 +0,0 @@
{
"type": "object",
"properties": {
"name": {
"type": "string",
"default": "John Doe",
"description": "The name of the user"
},
"age": {
"type": "integer",
"default": 42,
"description": "The age of the user"
},
"isAdmin": {
"type": "boolean",
"default": false,
"description": "Is the user an admin?"
},
"kernelModules": {
"type": "array",
"items": {
"type": "string"
},
"default": ["nvme", "xhci_pci", "ahci"],
"description": "A list of enabled kernel modules"
},
"userIds": {
"type": "object",
"default": {
"horst": 1,
"peter": 2,
"albrecht": 3
},
"additionalProperties": {
"type": "integer"
},
"description": "Some attributes"
},
"services": {
"type": "object",
"properties": {
"opt": {
"type": "string",
"default": "foo",
"description": "A submodule option"
}
}
}
}
}

View File

@@ -1,29 +0,0 @@
{
perSystem = { pkgs, self', ... }: {
checks = {
# check if the `clan config` example jsonschema and data is valid
lib-jsonschema-example-valid = pkgs.runCommand "lib-jsonschema-example-valid" { } ''
echo "Checking that example-schema.json is valid"
${pkgs.check-jsonschema}/bin/check-jsonschema \
--check-metaschema ${./.}/example-schema.json
echo "Checking that example-data.json is valid according to example-schema.json"
${pkgs.check-jsonschema}/bin/check-jsonschema \
--schemafile ${./.}/example-schema.json \
${./.}/example-data.json
touch $out
'';
# check if the `clan config` nix jsonschema converter unit tests succeed
lib-jsonschema-nix-unit-tests = pkgs.runCommand "lib-jsonschema-nix-unit-tests" { } ''
export NIX_PATH=nixpkgs=${pkgs.path}
${self'.packages.nix-unit}/bin/nix-unit \
${./.}/test.nix \
--eval-store $(realpath .)
touch $out
'';
};
};
}

View File

@@ -1,6 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
expr='let pkgs = import <nixpkgs> {}; lib = pkgs.lib; in (pkgs.nixosOptionsDoc {options = (lib.evalModules {modules=[./example-interface.nix];}).options;}).optionsJSON.options'
jq < "$(nix eval --impure --raw --expr "$expr")" > options.json

View File

@@ -1,89 +0,0 @@
{
"age": {
"declarations": [
"/home/grmpf/synced/projects/clan/clan-core/lib/jsonschema/example-interface.nix"
],
"default": {
"_type": "literalExpression",
"text": "42"
},
"description": "The age of the user",
"loc": ["age"],
"readOnly": false,
"type": "signed integer"
},
"isAdmin": {
"declarations": [
"/home/grmpf/synced/projects/clan/clan-core/lib/jsonschema/example-interface.nix"
],
"default": {
"_type": "literalExpression",
"text": "false"
},
"description": "Is the user an admin?",
"loc": ["isAdmin"],
"readOnly": false,
"type": "boolean"
},
"kernelModules": {
"declarations": [
"/home/grmpf/synced/projects/clan/clan-core/lib/jsonschema/example-interface.nix"
],
"default": {
"_type": "literalExpression",
"text": "[\n \"nvme\"\n \"xhci_pci\"\n \"ahci\"\n]"
},
"description": "A list of enabled kernel modules",
"loc": ["kernelModules"],
"readOnly": false,
"type": "list of string"
},
"name": {
"declarations": [
"/home/grmpf/synced/projects/clan/clan-core/lib/jsonschema/example-interface.nix"
],
"default": {
"_type": "literalExpression",
"text": "\"John Doe\""
},
"description": "The name of the user",
"loc": ["name"],
"readOnly": false,
"type": "string"
},
"services": {
"declarations": [
"/home/grmpf/synced/projects/clan/clan-core/lib/jsonschema/example-interface.nix"
],
"description": null,
"loc": ["services"],
"readOnly": false,
"type": "submodule"
},
"services.opt": {
"declarations": [
"/home/grmpf/synced/projects/clan/clan-core/lib/jsonschema/example-interface.nix"
],
"default": {
"_type": "literalExpression",
"text": "\"foo\""
},
"description": "A submodule option",
"loc": ["services", "opt"],
"readOnly": false,
"type": "string"
},
"userIds": {
"declarations": [
"/home/grmpf/synced/projects/clan/clan-core/lib/jsonschema/example-interface.nix"
],
"default": {
"_type": "literalExpression",
"text": "{\n albrecht = 3;\n horst = 1;\n peter = 2;\n}"
},
"description": "Some attributes",
"loc": ["userIds"],
"readOnly": false,
"type": "attribute set of signed integer"
}
}

View File

@@ -1,8 +0,0 @@
# run these tests via `nix-unit ./test.nix`
{ lib ? (import <nixpkgs> { }).lib
, slib ? import ./. { inherit lib; }
}:
{
parseOption = import ./test_parseOption.nix { inherit lib slib; };
parseOptions = import ./test_parseOptions.nix { inherit lib slib; };
}

View File

@@ -1,249 +0,0 @@
# tests for the nixos options to jsonschema converter
# run these tests via `nix-unit ./test.nix`
{ lib ? (import <nixpkgs> { }).lib
, slib ? import ./. { inherit lib; }
}:
let
description = "Test Description";
evalType = type: default:
let
evaledConfig = lib.evalModules {
modules = [{
options.opt = lib.mkOption {
inherit type;
inherit default;
inherit description;
};
}];
};
in
evaledConfig.options.opt;
in
{
testNoDefaultNoDescription =
let
evaledConfig = lib.evalModules {
modules = [{
options.opt = lib.mkOption {
type = lib.types.bool;
};
}];
};
in
{
expr = slib.parseOption evaledConfig.options.opt;
expected = {
type = "boolean";
};
};
testBool =
let
default = false;
in
{
expr = slib.parseOption (evalType lib.types.bool default);
expected = {
type = "boolean";
inherit default description;
};
};
testString =
let
default = "hello";
in
{
expr = slib.parseOption (evalType lib.types.str default);
expected = {
type = "string";
inherit default description;
};
};
testInteger =
let
default = 42;
in
{
expr = slib.parseOption (evalType lib.types.int default);
expected = {
type = "integer";
inherit default description;
};
};
testFloat =
let
default = 42.42;
in
{
expr = slib.parseOption (evalType lib.types.float default);
expected = {
type = "number";
inherit default description;
};
};
testEnum =
let
default = "foo";
values = [ "foo" "bar" "baz" ];
in
{
expr = slib.parseOption (evalType (lib.types.enum values) default);
expected = {
enum = values;
inherit default description;
};
};
testListOfInt =
let
default = [ 1 2 3 ];
in
{
expr = slib.parseOption (evalType (lib.types.listOf lib.types.int) default);
expected = {
type = "array";
items = {
type = "integer";
};
inherit default description;
};
};
testAttrsOfInt =
let
default = { foo = 1; bar = 2; baz = 3; };
in
{
expr = slib.parseOption (evalType (lib.types.attrsOf lib.types.int) default);
expected = {
type = "object";
additionalProperties = {
type = "integer";
};
inherit default description;
};
};
testNullOrBool =
let
default = null; # null is a valid value for this type
in
{
expr = slib.parseOption (evalType (lib.types.nullOr lib.types.bool) default);
expected = {
type = [ "null" "boolean" ];
inherit default description;
};
};
testSubmoduleOption =
let
subModule = {
options.opt = lib.mkOption {
type = lib.types.bool;
default = true;
inherit description;
};
};
in
{
expr = slib.parseOption (evalType (lib.types.submodule subModule) { });
expected = {
type = "object";
properties = {
opt = {
type = "boolean";
default = true;
inherit description;
};
};
};
};
testSubmoduleOptionWithoutDefault =
let
subModule = {
options.opt = lib.mkOption {
type = lib.types.bool;
inherit description;
};
};
in
{
expr = slib.parseOption (evalType (lib.types.submodule subModule) { });
expected = {
type = "object";
properties = {
opt = {
type = "boolean";
inherit description;
};
};
required = [ "opt" ];
};
};
testAttrsOfSubmodule =
let
subModule = {
options.opt = lib.mkOption {
type = lib.types.bool;
default = true;
inherit description;
};
};
default = { foo.opt = false; bar.opt = true; };
in
{
expr = slib.parseOption (evalType (lib.types.attrsOf (lib.types.submodule subModule)) default);
expected = {
type = "object";
additionalProperties = {
type = "object";
properties = {
opt = {
type = "boolean";
default = true;
inherit description;
};
};
};
inherit default description;
};
};
testListOfSubmodule =
let
subModule = {
options.opt = lib.mkOption {
type = lib.types.bool;
default = true;
inherit description;
};
};
default = [{ opt = false; } { opt = true; }];
in
{
expr = slib.parseOption (evalType (lib.types.listOf (lib.types.submodule subModule)) default);
expected = {
type = "array";
items = {
type = "object";
properties = {
opt = {
type = "boolean";
default = true;
inherit description;
};
};
};
inherit default description;
};
};
}

View File

@@ -1,46 +0,0 @@
# tests for the nixos options to jsonschema converter
# run these tests via `nix-unit ./test.nix`
{ lib ? (import <nixpkgs> { }).lib
, slib ? import ./. { inherit lib; }
}:
let
evaledOptions =
let
evaledConfig = lib.evalModules {
modules = [ ./example-interface.nix ];
};
in
evaledConfig.options;
in
{
testParseOptions = {
expr = slib.parseOptions evaledOptions;
expected = builtins.fromJSON (builtins.readFile ./example-schema.json);
};
testParseNestedOptions =
let
evaled = lib.evalModules {
modules = [{
options.foo.bar = lib.mkOption {
type = lib.types.bool;
};
}];
};
in
{
expr = slib.parseOptions evaled.options;
expected = {
properties = {
foo = {
properties = {
bar = { type = "boolean"; };
};
required = [ "bar" ];
type = "object";
};
};
type = "object";
};
};
}

View File

@@ -1,9 +0,0 @@
{ lib, ... }: {
options.clan.bloatware = lib.mkOption {
type = lib.types.submodule {
imports = [
../../../lib/jsonschema/example-interface.nix
];
};
};
}

View File

@@ -1,106 +0,0 @@
{ self, inputs, lib, ... }: {
flake.nixosModules.clanCore = { config, pkgs, options, ... }: {
imports = [
./secrets
./zerotier
./networking.nix
inputs.sops-nix.nixosModules.sops
# just some example options. Can be removed later
./bloatware
./vm.nix
./options.nix
];
options.clanSchema = lib.mkOption {
type = lib.types.attrs;
description = "The json schema for the .clan options namespace";
default = self.lib.jsonschema.parseOptions options.clan;
};
options.clanCore = {
clanDir = lib.mkOption {
type = lib.types.either lib.types.path lib.types.str;
description = ''
the location of the flake repo, used to calculate the location of facts and secrets
'';
};
machineName = lib.mkOption {
type = lib.types.str;
description = ''
the name of the machine
'';
};
clanPkgs = lib.mkOption {
default = self.packages.${pkgs.system};
defaultText = "self.packages.${pkgs.system}";
internal = true;
};
};
options.system.clan = lib.mkOption {
type = lib.types.submodule {
options = {
deployment.data = lib.mkOption {
type = lib.types.attrs;
description = ''
the data to be written to the deployment.json file
'';
};
deployment.file = lib.mkOption {
type = lib.types.path;
description = ''
the location of the deployment.json file
'';
};
deploymentAddress = lib.mkOption {
type = lib.types.str;
description = ''
the address of the deployment server
'';
};
secretsUploadDirectory = lib.mkOption {
type = lib.types.path;
description = ''
the directory on the deployment server where secrets are uploaded
'';
};
uploadSecrets = lib.mkOption {
type = lib.types.path;
description = ''
script to upload secrets to the deployment server
'';
default = "${pkgs.coreutils}/bin/true";
};
generateSecrets = lib.mkOption {
type = lib.types.path;
description = ''
script to generate secrets
'';
default = "${pkgs.coreutils}/bin/true";
};
vm.config = lib.mkOption {
type = lib.types.attrs;
description = ''
the vm config
'';
};
vm.create = lib.mkOption {
type = lib.types.path;
description = ''
json metadata about the vm
'';
};
};
};
description = ''
utility outputs for clan management of this machine
'';
};
# optimization for faster secret generate/upload and machines update
config = {
system.clan.deployment.data = {
inherit (config.system.clan) uploadSecrets generateSecrets;
inherit (config.clan.networking) deploymentAddress;
inherit (config.clanCore) secretsUploadDirectory;
};
system.clan.deployment.file = pkgs.writeText "deployment.json" (builtins.toJSON config.system.clan.deployment.data);
};
};
}

View File

@@ -1,21 +0,0 @@
{ config, lib, ... }:
{
options.clan.networking = {
deploymentAddress = lib.mkOption {
description = ''
The target SSH node for deployment.
By default, the node's attribute name will be used.
If set to null, only local deployment will be supported.
format: user@host:port&SSH_OPTION=SSH_VALUE
examples:
- machine.example.com
- user@machine2.example.com
- root@example.com:2222&IdentityFile=/path/to/private/key
'';
type = lib.types.nullOr lib.types.str;
default = "root@${config.networking.hostName}";
};
};
}

View File

@@ -1,12 +0,0 @@
{ pkgs, options, lib, ... }: {
options.clanCore.optionsNix = lib.mkOption {
type = lib.types.raw;
internal = true;
readOnly = true;
default = (pkgs.nixosOptionsDoc { inherit options; }).optionsNix;
defaultText = "optionsNix";
description = ''
This is to export nixos options used for `clan config`
'';
};
}

View File

@@ -1,121 +0,0 @@
{ config, lib, ... }:
{
options.clanCore.secretStore = lib.mkOption {
type = lib.types.enum [ "sops" "password-store" "custom" ];
default = "sops";
description = ''
method to store secrets
custom can be used to define a custom secret store.
one would have to define system.clan.generateSecrets and system.clan.uploadSecrets
'';
};
options.clanCore.secretsDirectory = lib.mkOption {
type = lib.types.path;
description = ''
The directory where secrets are installed to. This is backend specific.
'';
};
options.clanCore.secretsUploadDirectory = lib.mkOption {
type = lib.types.path;
description = ''
The directory where secrets are uploaded into, This is backend specific.
'';
};
options.clanCore.secretsPrefix = lib.mkOption {
type = lib.types.str;
default = "";
description = ''
Prefix for secrets. This is backend specific.
'';
};
options.clanCore.secrets = lib.mkOption {
default = { };
type = lib.types.attrsOf
(lib.types.submodule (secret: {
options = {
name = lib.mkOption {
type = lib.types.str;
default = secret.config._module.args.name;
description = ''
Namespace of the secret
'';
};
generator = lib.mkOption {
type = lib.types.str;
description = ''
Script to generate the secret.
The script will be called with the following variables:
- facts: path to a directory where facts can be stored
- secrets: path to a directory where secrets can be stored
The script is expected to generate all secrets and facts defined in the module.
'';
};
secrets =
let
config' = config;
in
lib.mkOption {
type = lib.types.attrsOf (lib.types.submodule ({ config, ... }: {
options = {
name = lib.mkOption {
type = lib.types.str;
description = ''
name of the secret
'';
default = config._module.args.name;
};
path = lib.mkOption {
type = lib.types.str;
description = ''
path to a secret which is generated by the generator
'';
default = "${config'.clanCore.secretsDirectory}/${config'.clanCore.secretsPrefix}${config.name}";
};
};
}));
description = ''
path where the secret is located in the filesystem
'';
};
facts = lib.mkOption {
default = { };
type = lib.types.attrsOf (lib.types.submodule (fact: {
options = {
name = lib.mkOption {
type = lib.types.str;
description = ''
name of the fact
'';
default = fact.config._module.args.name;
};
path = lib.mkOption {
type = lib.types.str;
description = ''
path to a fact which is generated by the generator
'';
default = "machines/${config.clanCore.machineName}/facts/${fact.config._module.args.name}";
};
value = lib.mkOption {
defaultText = lib.literalExpression "\${config.clanCore.clanDir}/\${fact.config.path}";
type = lib.types.nullOr lib.types.str;
default =
if builtins.pathExists "${config.clanCore.clanDir}/${fact.config.path}" then
builtins.readFile "${config.clanCore.clanDir}/${fact.config.path}"
else
null;
};
};
}));
};
};
}));
};
imports = [
./sops.nix
./password-store.nix
];
}

View File

@@ -1,116 +0,0 @@
{ config, lib, pkgs, ... }:
let
passwordstoreDir = "\${PASSWORD_STORE_DIR:-$HOME/.password-store}";
in
{
options.clan.password-store.targetDirectory = lib.mkOption {
type = lib.types.path;
default = "/etc/secrets";
description = ''
The directory where the password store is uploaded to.
'';
};
config = lib.mkIf (config.clanCore.secretStore == "password-store") {
clanCore.secretsDirectory = config.clan.password-store.targetDirectory;
clanCore.secretsUploadDirectory = config.clan.password-store.targetDirectory;
system.clan.generateSecrets = lib.mkIf (config.clanCore.secrets != { }) (
pkgs.writeScript "generate-secrets" ''
#!/bin/sh
set -efu
test -d "$CLAN_DIR"
PATH=${lib.makeBinPath [
pkgs.pass
]}:$PATH
# TODO maybe initialize password store if it doesn't exist yet
${lib.foldlAttrs (acc: n: v: ''
${acc}
# ${n}
# if any of the secrets are missing, we regenerate all connected facts/secrets
(if ! (${lib.concatMapStringsSep " && " (x: "test -e ${passwordstoreDir}/machines/${config.clanCore.machineName}/${x.name}.gpg >/dev/null") (lib.attrValues v.secrets)}); then
tmpdir=$(mktemp -d)
trap "rm -rf $tmpdir" EXIT
cd $tmpdir
facts=$(mktemp -d)
trap "rm -rf $facts" EXIT
secrets=$(mktemp -d)
trap "rm -rf $secrets" EXIT
( ${v.generator} )
${lib.concatMapStrings (fact: ''
mkdir -p "$CLAN_DIR"/"$(dirname ${fact.path})"
cp "$facts"/${fact.name} "$CLAN_DIR"/${fact.path}
'') (lib.attrValues v.facts)}
${lib.concatMapStrings (secret: ''
cat "$secrets"/${secret.name} | pass insert -m machines/${config.clanCore.machineName}/${secret.name}
'') (lib.attrValues v.secrets)}
fi)
'') "" config.clanCore.secrets}
''
);
system.clan.uploadSecrets = pkgs.writeScript "upload-secrets" ''
#!/bin/sh
set -efu
umask 0077
PATH=${lib.makeBinPath [
pkgs.pass
pkgs.git
pkgs.findutils
pkgs.rsync
]}:$PATH:${lib.getBin pkgs.openssh}
if test -e ${passwordstoreDir}/.git; then
local_pass_info=$(
git -C ${passwordstoreDir} log -1 --format=%H machines/${config.clanCore.machineName}
# we append a hash for every symlink, otherwise we would miss updates on
# files where the symlink points to
find ${passwordstoreDir}/machines/${config.clanCore.machineName} -type l \
-exec realpath {} + |
sort |
xargs -r -n 1 git -C ${passwordstoreDir} log -1 --format=%H
)
remote_pass_info=$(ssh ${config.clan.networking.deploymentAddress} -- ${lib.escapeShellArg ''
cat ${config.clan.password-store.targetDirectory}/.pass_info || :
''} || :)
if test "$local_pass_info" = "$remote_pass_info"; then
echo secrets already match
exit 23
fi
fi
find ${passwordstoreDir}/machines/${config.clanCore.machineName} -type f -follow ! -name .gpg-id |
while read -r gpg_path; do
rel_name=''${gpg_path#${passwordstoreDir}}
rel_name=''${rel_name%.gpg}
pass_date=$(
if test -e ${passwordstoreDir}/.git; then
git -C ${passwordstoreDir} log -1 --format=%aI "$gpg_path"
fi
)
pass_name=$rel_name
tmp_path="$SECRETS_DIR"/$(basename $rel_name)
mkdir -p "$(dirname "$tmp_path")"
pass show "$pass_name" > "$tmp_path"
if [ -n "$pass_date" ]; then
touch -d "$pass_date" "$tmp_path"
fi
done
if test -n "''${local_pass_info-}"; then
echo "$local_pass_info" > "$SECRETS_DIR"/.pass_info
fi
'';
};
}

View File

@@ -1,59 +0,0 @@
{ config, lib, pkgs, ... }:
let
secretsDir = config.clanCore.clanDir + "/sops/secrets";
groupsDir = config.clanCore.clanDir + "/sops/groups";
# My symlink is in the nixos module detected as a directory also it works in the repl. Is this because of pure evaluation?
containsSymlink = path:
builtins.pathExists path && (builtins.readFileType path == "directory" || builtins.readFileType path == "symlink");
containsMachine = parent: name: type:
type == "directory" && containsSymlink "${parent}/${name}/machines/${config.clanCore.machineName}";
containsMachineOrGroups = name: type:
(containsMachine secretsDir name type) || lib.any (group: type == "directory" && containsSymlink "${secretsDir}/${name}/groups/${group}") groups;
filterDir = filter: dir:
lib.optionalAttrs (builtins.pathExists dir)
(lib.filterAttrs filter (builtins.readDir dir));
groups = builtins.attrNames (filterDir (containsMachine groupsDir) groupsDir);
secrets = filterDir containsMachineOrGroups secretsDir;
in
{
config = lib.mkIf (config.clanCore.secretStore == "sops") {
clanCore.secretsDirectory = "/run/secrets";
clanCore.secretsPrefix = config.clanCore.machineName + "-";
system.clan = lib.mkIf (config.clanCore.secrets != { }) {
generateSecrets = pkgs.writeScript "generate-secrets" ''
#!${pkgs.python3}/bin/python
import json
from clan_cli.secrets.sops_generate import generate_secrets_from_nix
args = json.loads(${builtins.toJSON (builtins.toJSON { machine_name = config.clanCore.machineName; secret_submodules = config.clanCore.secrets; })})
generate_secrets_from_nix(**args)
'';
uploadSecrets = pkgs.writeScript "upload-secrets" ''
#!${pkgs.python3}/bin/python
import json
from clan_cli.secrets.sops_generate import upload_age_key_from_nix
# the second toJSON is needed to escape the string for the python
args = json.loads(${builtins.toJSON (builtins.toJSON { machine_name = config.clanCore.machineName; })})
upload_age_key_from_nix(**args)
'';
};
sops.secrets = builtins.mapAttrs
(name: _: {
sopsFile = config.clanCore.clanDir + "/sops/secrets/${name}/secret";
format = "binary";
})
secrets;
# To get proper error messages about missing secrets we need a dummy secret file that is always present
sops.defaultSopsFile = lib.mkIf config.sops.validateSopsFiles (lib.mkDefault (builtins.toString (pkgs.writeText "dummy.yaml" "")));
sops.age.keyFile = lib.mkIf (builtins.pathExists (config.clanCore.clanDir + "/sops/secrets/${config.clanCore.machineName}-age.key/secret"))
(lib.mkDefault "/var/lib/sops-nix/key.txt");
clanCore.secretsUploadDirectory = lib.mkDefault "/var/lib/sops-nix";
};
}

View File

@@ -1,74 +0,0 @@
{ lib, config, pkgs, options, extendModules, modulesPath, ... }:
let
vmConfig = extendModules {
modules = [
(modulesPath + "/virtualisation/qemu-vm.nix")
{
virtualisation.fileSystems.${config.clanCore.secretsUploadDirectory} = lib.mkForce {
device = "secrets";
fsType = "9p";
neededForBoot = true;
options = [ "trans=virtio" "version=9p2000.L" "cache=loose" ];
};
}
];
};
in
{
options = {
clan.virtualisation = {
cores = lib.mkOption {
type = lib.types.ints.positive;
default = 1;
description = lib.mdDoc ''
Specify the number of cores the guest is permitted to use.
The number can be higher than the available cores on the
host system.
'';
};
memorySize = lib.mkOption {
type = lib.types.ints.positive;
default = 1024;
description = lib.mdDoc ''
The memory size in megabytes of the virtual machine.
'';
};
graphics = lib.mkOption {
type = lib.types.bool;
default = true;
description = lib.mdDoc ''
Whether to run QEMU with a graphics window, or in nographic mode.
Serial console will be enabled on both settings, but this will
change the preferred console.
'';
};
};
};
config = {
system.clan.vm = {
# for clan vm inspect
config = {
inherit (config.clan.virtualisation) cores graphics;
memory_size = config.clan.virtualisation.memorySize;
};
# for clan vm create
create = pkgs.writeText "vm.json" (builtins.toJSON {
initrd = "${vmConfig.config.system.build.initialRamdisk}/${vmConfig.config.system.boot.loader.initrdFile}";
toplevel = vmConfig.config.system.build.toplevel;
regInfo = (pkgs.closureInfo { rootPaths = vmConfig.config.virtualisation.additionalPaths; });
inherit (config.clan.virtualisation) memorySize cores graphics;
generateSecrets = config.system.clan.generateSecrets;
uploadSecrets = config.system.clan.uploadSecrets;
});
};
virtualisation = lib.optionalAttrs (options.virtualisation ? cores) {
memorySize = lib.mkDefault config.clan.virtualisation.memorySize;
graphics = lib.mkDefault config.clan.virtualisation.graphics;
cores = lib.mkDefault config.clan.virtualisation.cores;
};
};
}

View File

@@ -1,122 +0,0 @@
{ config, lib, pkgs, ... }:
let
cfg = config.clan.networking.zerotier;
facts = config.clanCore.secrets.zerotier.facts;
networkConfig = {
authTokens = [
null
];
authorizationEndpoint = "";
capabilities = [ ];
clientId = "";
dns = [ ];
enableBroadcast = true;
id = cfg.networkId;
ipAssignmentPools = [ ];
mtu = 2800;
multicastLimit = 32;
name = "";
uwid = cfg.networkId;
objtype = "network";
private = !cfg.controller.public;
remoteTraceLevel = 0;
remoteTraceTarget = null;
revision = 1;
routes = [ ];
rules = [
{
not = false;
or = false;
type = "ACTION_ACCEPT";
}
];
rulesSource = "";
ssoEnabled = false;
tags = [ ];
v4AssignMode = {
zt = false;
};
v6AssignMode = {
"6plane" = false;
rfc4193 = true;
zt = false;
};
};
in
{
options.clan.networking.zerotier = {
networkId = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = ''
zerotier networking id
'';
};
controller = {
enable = lib.mkEnableOption "turn this machine into the networkcontroller";
public = lib.mkOption {
type = lib.types.bool;
default = false;
description = ''
everyone can join a public network without having the administrator to accept
'';
};
};
};
config = lib.mkMerge [
({
# Override license so that we can build zerotierone without
# having to re-import nixpkgs.
services.zerotierone.package = lib.mkDefault (pkgs.zerotierone.overrideAttrs (_old: { meta = { }; }));
})
(lib.mkIf (cfg.networkId != null) {
systemd.network.networks.zerotier = {
matchConfig.Name = "zt*";
networkConfig = {
LLMNR = true;
LLDP = true;
MulticastDNS = true;
KeepConfiguration = "static";
};
};
networking.firewall.interfaces."zt+".allowedTCPPorts = [ 5353 ]; # mdns
networking.firewall.interfaces."zt+".allowedUDPPorts = [ 5353 ]; # mdns
networking.networkmanager.unmanaged = [ "interface-name:zt*" ];
services.zerotierone = {
enable = true;
joinNetworks = [ cfg.networkId ];
};
})
(lib.mkIf cfg.controller.enable {
# only the controller needs to have the key in the repo, the other clients can be dynamic
# we generate the zerotier code manually for the controller, since it's part of the bootstrap command
clanCore.secrets.zerotier = {
facts.zerotier-network-id = { };
secrets.zerotier-identity-secret = { };
generator = ''
export PATH=${lib.makeBinPath [ config.services.zerotierone.package pkgs.fakeroot ]}
${pkgs.python3.interpreter} ${./generate-network.py} "$facts/zerotier-network-id" "$secrets/zerotier-identity-secret"
'';
};
environment.systemPackages = [ config.clanCore.clanPkgs.zerotier-members ];
})
(lib.mkIf ((config.clanCore.secrets ? zerotier) && (facts.zerotier-network-id.value != null)) {
clan.networking.zerotier.networkId = facts.zerotier-network-id.value;
environment.etc."zerotier/network-id".text = facts.zerotier-network-id.value;
systemd.services.zerotierone.serviceConfig.ExecStartPre = [
"+${pkgs.writeShellScript "init-zerotier" ''
cp ${config.clanCore.secrets.zerotier.secrets.zerotier-identity-secret.path} /var/lib/zerotier-one/identity.secret
mkdir -p /var/lib/zerotier-one/controller.d/network
ln -sfT ${pkgs.writeText "net.json" (builtins.toJSON networkConfig)} /var/lib/zerotier-one/controller.d/network/${cfg.networkId}.json
''}"
];
systemd.services.zerotierone.serviceConfig.ExecStartPost = [
"+${pkgs.writeShellScript "whitelist-controller" ''
${config.clanCore.clanPkgs.zerotier-members}/bin/zerotier-members allow ${builtins.substring 0 10 cfg.networkId}
''}"
];
})
];
}

View File

@@ -1,143 +0,0 @@
import argparse
import contextlib
import json
import socket
import subprocess
import time
import urllib.request
from contextlib import contextmanager
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Any, Iterator, Optional
class ClanError(Exception):
pass
def try_bind_port(port: int) -> bool:
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with tcp, udp:
try:
tcp.bind(("127.0.0.1", port))
udp.bind(("127.0.0.1", port))
return True
except OSError:
return False
def try_connect_port(port: int) -> bool:
sock = socket.socket(socket.AF_INET)
result = sock.connect_ex(("127.0.0.1", port))
sock.close()
return result == 0
def find_free_port() -> Optional[int]:
"""Find an unused localhost port from 1024-65535 and return it."""
with contextlib.closing(socket.socket(type=socket.SOCK_STREAM)) as sock:
sock.bind(("127.0.0.1", 0))
return sock.getsockname()[1]
class ZerotierController:
def __init__(self, port: int, home: Path) -> None:
self.port = port
self.home = home
self.authtoken = (home / "authtoken.secret").read_text()
self.secret = (home / "identity.secret").read_text()
def _http_request(
self,
path: str,
method: str = "GET",
headers: dict[str, str] = {},
data: Optional[dict[str, Any]] = None,
) -> dict[str, Any]:
body = None
headers = headers.copy()
if data is not None:
body = json.dumps(data).encode("ascii")
headers["Content-Type"] = "application/json"
headers["X-ZT1-AUTH"] = self.authtoken
url = f"http://127.0.0.1:{self.port}{path}"
req = urllib.request.Request(url, headers=headers, method=method, data=body)
resp = urllib.request.urlopen(req)
return json.load(resp)
def status(self) -> dict[str, Any]:
return self._http_request("/status")
def create_network(self, data: dict[str, Any] = {}) -> dict[str, Any]:
identity = (self.home / "identity.public").read_text()
node_id = identity.split(":")[0]
return self._http_request(
f"/controller/network/{node_id}______", method="POST", data=data
)
def get_network(self, id: str) -> dict[str, Any]:
return self._http_request(f"/controller/network/{id}")
@contextmanager
def zerotier_controller() -> Iterator[ZerotierController]:
# This check could be racy but it's unlikely in practice
controller_port = find_free_port()
if controller_port is None:
raise ClanError("cannot find a free port for zerotier controller")
with TemporaryDirectory() as d:
tempdir = Path(d)
home = tempdir / "zerotier-one"
home.mkdir()
cmd = [
"fakeroot",
"--",
"zerotier-one",
f"-p{controller_port}",
str(home),
]
with subprocess.Popen(cmd) as p:
try:
print(
f"wait for controller to be started on 127.0.0.1:{controller_port}...",
)
while not try_connect_port(controller_port):
status = p.poll()
if status is not None:
raise ClanError(
f"zerotier-one has been terminated unexpected with {status}"
)
time.sleep(0.1)
print()
yield ZerotierController(controller_port, home)
finally:
p.terminate()
p.wait()
# TODO: allow merging more network configuration here
def create_network() -> dict:
with zerotier_controller() as controller:
network = controller.create_network()
return {
"secret": controller.secret,
"networkid": network["nwid"],
}
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("network_id")
parser.add_argument("identity_secret")
args = parser.parse_args()
zerotier = create_network()
Path(args.network_id).write_text(zerotier["networkid"])
Path(args.identity_secret).write_text(zerotier["secret"])
if __name__ == "__main__":
main()

View File

@@ -1,6 +0,0 @@
{ ... }: {
flake.nixosModules = {
hidden-ssh-announce.imports = [ ./hidden-ssh-announce.nix ];
installer.imports = [ ./installer ];
};
}

View File

@@ -1,55 +0,0 @@
{ config
, lib
, pkgs
, ...
}: {
options.hidden-ssh-announce = {
enable = lib.mkEnableOption "hidden-ssh-announce";
script = lib.mkOption {
type = lib.types.package;
default = pkgs.writers.writeDash "test-output" "echo $1";
description = ''
script to run when the hidden tor service was started and they hostname is known.
takes the hostname as $1
'';
};
};
config = lib.mkIf config.hidden-ssh-announce.enable {
services.openssh.enable = true;
services.tor = {
enable = true;
relay.onionServices.hidden-ssh = {
version = 3;
map = [
{
port = 22;
target.port = 22;
}
];
};
client.enable = true;
};
systemd.services.hidden-ssh-announce = {
description = "announce hidden ssh";
after = [ "tor.service" "network-online.target" ];
wants = [ "tor.service" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
# ${pkgs.tor}/bin/torify
ExecStart = pkgs.writers.writeDash "announce-hidden-service" ''
set -efu
until test -e ${config.services.tor.settings.DataDirectory}/onion/hidden-ssh/hostname; do
echo "still waiting for ${config.services.tor.settings.DataDirectory}/onion/hidden-ssh/hostname"
sleep 1
done
${config.hidden-ssh-announce.script} "$(cat ${config.services.tor.settings.DataDirectory}/onion/hidden-ssh/hostname)"
'';
PrivateTmp = "true";
User = "tor";
Type = "oneshot";
};
};
};
}

View File

@@ -1,82 +0,0 @@
{ lib
, pkgs
, modulesPath
, ...
}: {
systemd.tmpfiles.rules = [
"d /var/shared 0777 root root - -"
];
imports = [
(modulesPath + "/profiles/installation-device.nix")
(modulesPath + "/profiles/all-hardware.nix")
(modulesPath + "/profiles/base.nix")
];
services.openssh.settings.PermitRootLogin = "yes";
system.activationScripts.root-password = ''
mkdir -p /var/shared
${pkgs.pwgen}/bin/pwgen -s 16 1 > /var/shared/root-password
echo "root:$(cat /var/shared/root-password)" | chpasswd
'';
hidden-ssh-announce = {
enable = true;
script = pkgs.writers.writeDash "write-hostname" ''
set -efu
mkdir -p /var/shared
echo "$1" > /var/shared/onion-hostname
${pkgs.jq}/bin/jq -nc \
--arg password "$(cat /var/shared/root-password)" \
--arg address "$(cat /var/shared/onion-hostname)" '{
password: $password, address: $address
}' > /var/shared/login.info
cat /var/shared/login.info |
${pkgs.qrencode}/bin/qrencode -t utf8 -o /var/shared/qrcode.utf8
cat /var/shared/login.info |
${pkgs.qrencode}/bin/qrencode -t png -o /var/shared/qrcode.png
'';
};
services.getty.autologinUser = lib.mkForce "root";
programs.bash.interactiveShellInit = ''
if [ "$(tty)" = "/dev/tty1" ]; then
echo 'waiting for tor to generate the hidden service'
until test -e /var/shared/qrcode.utf8; do echo .; sleep 1; done
cat /var/shared/qrcode.utf8
fi
'';
boot.loader.grub.efiInstallAsRemovable = true;
boot.loader.grub.efiSupport = true;
disko.devices = {
disk = {
stick = {
type = "disk";
device = "/vda";
imageSize = "3G";
content = {
type = "gpt";
partitions = {
boot = {
size = "1M";
type = "EF02"; # for grub MBR
};
ESP = {
size = "100M";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
};
};
root = {
size = "100%";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
};
};
};
};
};
};
};
}

View File

@@ -1,7 +1,9 @@
#!/usr/bin/env bash
# Because we depend on nixpkgs sources, uploading to builders takes a long time
source_up
if type nix_direnv_watch_file &>/dev/null; then
nix_direnv_watch_file flake-module.nix
nix_direnv_watch_file default.nix

View File

@@ -12,4 +12,11 @@
],
"python.testing.unittestEnabled": false,
"python.testing.pytestEnabled": true,
"search.exclude": {
"**/.direnv": true
},
"python.linting.mypyPath": "mypy",
"python.linting.mypyEnabled": true,
"python.linting.enabled": true,
"python.defaultInterpreterPath": "python"
}

View File

@@ -56,15 +56,15 @@ Add this `launch.json` to your .vscode directory to have working breakpoints in
## Run locally single-threaded for debugging
By default tests run in parallel using pytest-parallel.
pytest-parallel however breaks `breakpoint()`. To disable it, use this:
By default tests run in parallel using pytest-xdist.
pytest-xdist however breaks `breakpoint()`. To disable it, use this:
```console
pytest --workers "" -s
pytest -n0 -s
```
You can also run a single test like this:
```console
pytest --workers "" -s tests/test_secrets_cli.py::test_users
pytest -n0 -s tests/test_secrets_cli.py::test_users
```

View File

@@ -1,10 +1,13 @@
import argparse
import logging
import sys
from types import ModuleType
from typing import Optional
from . import config, create, join, machines, secrets, vms, webui
from .ssh import cli as ssh_cli
from . import webui
from .custom_logger import register
log = logging.getLogger(__name__)
argcomplete: Optional[ModuleType] = None
try:
@@ -24,33 +27,12 @@ def create_parser(prog: Optional[str] = None) -> argparse.ArgumentParser:
subparsers = parser.add_subparsers()
parser_create = subparsers.add_parser(
"create", help="create a clan flake inside the current directory"
)
create.register_parser(parser_create)
parser_join = subparsers.add_parser("join", help="join a remote clan")
join.register_parser(parser_join)
parser_config = subparsers.add_parser("config", help="set nixos configuration")
config.register_parser(parser_config)
parser_ssh = subparsers.add_parser("ssh", help="ssh to a remote machine")
ssh_cli.register_parser(parser_ssh)
parser_secrets = subparsers.add_parser("secrets", help="manage secrets")
secrets.register_parser(parser_secrets)
parser_machine = subparsers.add_parser(
"machines", help="Manage machines and their configuration"
)
machines.register_parser(parser_machine)
parser_webui = subparsers.add_parser("webui", help="start webui")
webui.register_parser(parser_webui)
parser_vms = subparsers.add_parser("vms", help="manage virtual machines")
vms.register_parser(parser_vms)
# if args.debug:
register(logging.DEBUG)
log.debug("Debug log activated")
if argcomplete:
argcomplete.autocomplete(parser)

View File

@@ -1,18 +1,35 @@
import asyncio
import logging
import shlex
from pathlib import Path
from typing import Any, Callable, Coroutine, Dict, NamedTuple, Optional
from .errors import ClanError
log = logging.getLogger(__name__)
async def run(cmd: list[str]) -> bytes:
class CmdOut(NamedTuple):
stdout: str
stderr: str
cwd: Optional[Path] = None
async def run(cmd: list[str], cwd: Optional[Path] = None) -> CmdOut:
log.debug(f"$: {shlex.join(cmd)}")
cwd_res = None
if cwd is not None:
if not cwd.exists():
raise ClanError(f"Working directory {cwd} does not exist")
if not cwd.is_dir():
raise ClanError(f"Working directory {cwd} is not a directory")
cwd_res = cwd.resolve()
log.debug(f"Working directory: {cwd_res}")
proc = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
cwd=cwd_res,
)
stdout, stderr = await proc.communicate()
@@ -20,9 +37,30 @@ async def run(cmd: list[str]) -> bytes:
raise ClanError(
f"""
command: {shlex.join(cmd)}
working directory: {cwd_res}
exit code: {proc.returncode}
command output:
stderr:
{stderr.decode("utf-8")}
stdout:
{stdout.decode("utf-8")}
"""
)
return stdout
return CmdOut(stdout.decode("utf-8"), stderr.decode("utf-8"), cwd=cwd)
def runforcli(
func: Callable[..., Coroutine[Any, Any, Dict[str, CmdOut]]], *args: Any
) -> None:
try:
res = asyncio.run(func(*args))
for i in res.items():
name, out = i
if out.stderr:
print(f"{name}: {out.stderr}", end="")
if out.stdout:
print(f"{name}: {out.stdout}", end="")
except ClanError as e:
print(e)
exit(1)

View File

@@ -1,363 +0,0 @@
# !/usr/bin/env python3
import argparse
import json
import os
import re
import shlex
import subprocess
import sys
from pathlib import Path
from typing import Any, Optional, Tuple, get_origin
from clan_cli.dirs import get_clan_flake_toplevel
from clan_cli.errors import ClanError
from clan_cli.git import commit_file
from clan_cli.machines.folders import machine_settings_file
from clan_cli.nix import nix_eval
script_dir = Path(__file__).parent
# nixos option type description to python type
def map_type(type: str) -> Any:
if type == "boolean":
return bool
elif type in [
"integer",
"signed integer",
"16 bit unsigned integer; between 0 and 65535 (both inclusive)",
]:
return int
elif type == "string":
return str
# lib.type.passwdEntry
elif type == "string, not containing newlines or colons":
return str
elif type.startswith("null or "):
subtype = type.removeprefix("null or ")
return Optional[map_type(subtype)]
elif type.startswith("attribute set of"):
subtype = type.removeprefix("attribute set of ")
return dict[str, map_type(subtype)] # type: ignore
elif type.startswith("list of"):
subtype = type.removeprefix("list of ")
return list[map_type(subtype)] # type: ignore
else:
raise ClanError(f"Unknown type {type}")
# merge two dicts recursively
def merge(a: dict, b: dict, path: list[str] = []) -> dict:
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge(a[key], b[key], path + [str(key)])
elif isinstance(a[key], list) and isinstance(b[key], list):
a[key].extend(b[key])
elif a[key] != b[key]:
a[key] = b[key]
else:
a[key] = b[key]
return a
# A container inheriting from list, but overriding __contains__ to return True
# for all values.
# This is used to allow any value for the "choices" field of argparse
class AllContainer(list):
def __contains__(self, item: Any) -> bool:
return True
# value is always a list, as the arg parser cannot know the type upfront
# and therefore always allows multiple arguments.
def cast(value: Any, type: Any, opt_description: str) -> Any:
try:
# handle bools
if isinstance(type, bool):
if value[0] in ["true", "True", "yes", "y", "1"]:
return True
elif value[0] in ["false", "False", "no", "n", "0"]:
return False
else:
raise ClanError(f"Invalid value {value} for boolean")
# handle lists
elif get_origin(type) == list:
subtype = type.__args__[0]
return [cast([x], subtype, opt_description) for x in value]
# handle dicts
elif get_origin(type) == dict:
if not isinstance(value, dict):
raise ClanError(
f"Cannot set {opt_description} directly. Specify a suboption like {opt_description}.<name>"
)
subtype = type.__args__[1]
return {k: cast(v, subtype, opt_description) for k, v in value.items()}
elif str(type) == "typing.Optional[str]":
if value[0] in ["null", "None"]:
return None
return value[0]
else:
if len(value) > 1:
raise ClanError(f"Too many values for {opt_description}")
return type(value[0])
except ValueError:
raise ClanError(
f"Invalid type for option {opt_description} (expected {type.__name__})"
)
def options_for_machine(machine_name: str, show_trace: bool = False) -> dict:
clan_dir = get_clan_flake_toplevel()
flags = []
if show_trace:
flags.append("--show-trace")
flags.append(
f"{clan_dir}#nixosConfigurations.{machine_name}.config.clanCore.optionsNix"
)
cmd = nix_eval(flags=flags)
proc = subprocess.run(
cmd,
stdout=subprocess.PIPE,
text=True,
)
if proc.returncode != 0:
raise ClanError(
f"Failed to read options for machine {machine_name}:\n{shlex.join(cmd)}\nexit with {proc.returncode}"
)
return json.loads(proc.stdout)
def read_machine_option_value(
machine_name: str, option: str, show_trace: bool = False
) -> str:
clan_dir = get_clan_flake_toplevel()
# use nix eval to read from .#nixosConfigurations.default.config.{option}
# this will give us the evaluated config with the options attribute
cmd = nix_eval(
flags=[
"--show-trace",
f"{clan_dir}#nixosConfigurations.{machine_name}.config.{option}",
],
)
proc = subprocess.run(cmd, stdout=subprocess.PIPE, text=True)
if proc.returncode != 0:
raise ClanError(
f"Failed to read option {option}:\n{shlex.join(cmd)}\nexit with {proc.returncode}"
)
value = json.loads(proc.stdout)
# print the value so that the output can be copied and fed as an input.
# for example a list should be displayed as space separated values surrounded by quotes.
if isinstance(value, list):
out = " ".join([json.dumps(x) for x in value])
elif isinstance(value, dict):
out = json.dumps(value, indent=2)
else:
out = json.dumps(value, indent=2)
return out
def get_or_set_option(args: argparse.Namespace) -> None:
if args.value == []:
print(read_machine_option_value(args.machine, args.option, args.show_trace))
else:
# load options
if args.options_file is None:
options = options_for_machine(
machine_name=args.machine, show_trace=args.show_trace
)
else:
with open(args.options_file) as f:
options = json.load(f)
# compute settings json file location
if args.settings_file is None:
get_clan_flake_toplevel()
settings_file = machine_settings_file(args.machine)
else:
settings_file = args.settings_file
# set the option with the given value
set_option(
option=args.option,
value=args.value,
options=options,
settings_file=settings_file,
option_description=args.option,
show_trace=args.show_trace,
)
if not args.quiet:
new_value = read_machine_option_value(args.machine, args.option)
print(f"New Value for {args.option}:")
print(new_value)
def find_option(
option: str, value: Any, options: dict, option_description: Optional[str] = None
) -> Tuple[str, Any]:
"""
The option path specified by the user doesn't have to match exactly to an
entry in the options.json file. Examples
Example 1:
$ clan config services.openssh.settings.SomeSetting 42
This is a freeform option that does not appear in the options.json
The actual option is `services.openssh.settings`
And the value must be wrapped: {"SomeSettings": 42}
Example 2:
$ clan config users.users.my-user.name my-name
The actual option is `users.users.<name>.name`
"""
# option description is used for error messages
if option_description is None:
option_description = option
option_path = option.split(".")
# fuzzy search the option paths, so when
# specified option path: "foo.bar.baz.bum"
# available option path: "foo.<name>.baz.<name>"
# we can still find the option
first = option_path[0]
regex = rf"({first}|<name>)"
for elem in option_path[1:]:
regex += rf"\.({elem}|<name>)"
for opt in options.keys():
if re.match(regex, opt):
return opt, value
# if the regex search did not find the option, start stripping the last
# element of the option path and find matching parent option
# (see examples above for why this is needed)
if len(option_path) == 1:
raise ClanError(f"Option {option_description} not found")
option_path_parent = option_path[:-1]
attr_prefix = option_path[-1]
return find_option(
option=".".join(option_path_parent),
value={attr_prefix: value},
options=options,
option_description=option_description,
)
def set_option(
option: str,
value: Any,
options: dict,
settings_file: Path,
option_description: str = "",
show_trace: bool = False,
) -> None:
option_path_orig = option.split(".")
# returns for example:
# option: "users.users.<name>.name"
# value: "my-name"
option, value = find_option(
option=option,
value=value,
options=options,
option_description=option_description,
)
option_path = option.split(".")
option_path_store = option_path_orig[: len(option_path)]
target_type = map_type(options[option]["type"])
casted = cast(value, target_type, option)
# construct a nested dict from the option path and set the value
result: dict[str, Any] = {}
current = result
for part in option_path_store[:-1]:
current[part] = {}
current = current[part]
current[option_path_store[-1]] = value
current[option_path_store[-1]] = casted
# check if there is an existing config file
if os.path.exists(settings_file):
with open(settings_file) as f:
current_config = json.load(f)
else:
current_config = {}
# merge and save the new config file
new_config = merge(current_config, result)
settings_file.parent.mkdir(parents=True, exist_ok=True)
with open(settings_file, "w") as f:
json.dump(new_config, f, indent=2)
print(file=f) # add newline at the end of the file to make git happy
if settings_file.resolve().is_relative_to(get_clan_flake_toplevel()):
commit_file(settings_file, commit_message=f"Set option {option_description}")
# takes a (sub)parser and configures it
def register_parser(
parser: Optional[argparse.ArgumentParser],
) -> None:
if parser is None:
parser = argparse.ArgumentParser(
description="Set or show NixOS options",
)
# inject callback function to process the input later
parser.set_defaults(func=get_or_set_option)
parser.add_argument(
"--machine",
"-m",
help="Machine to configure",
type=str,
default="default",
)
parser.add_argument(
"--show-trace",
help="Show nix trace on evaluation error",
action="store_true",
)
parser.add_argument(
"--options-file",
help="JSON file with options",
type=Path,
)
parser.add_argument(
"--settings-file",
help="JSON file with settings",
type=Path,
)
parser.add_argument(
"--quiet",
help="Do not print the value",
action="store_true",
)
parser.add_argument(
"option",
help="Option to read or set (e.g. foo.bar)",
type=str,
)
parser.add_argument(
"value",
# force this arg to be set
nargs="*",
help="option value to set (if omitted, the current value is printed)",
)
def main(argv: Optional[list[str]] = None) -> None:
if argv is None:
argv = sys.argv
parser = argparse.ArgumentParser()
register_parser(parser)
parser.parse_args(argv[1:])
if __name__ == "__main__":
main()

View File

@@ -1 +0,0 @@
../../../../lib/jsonschema

View File

@@ -1,77 +0,0 @@
import json
import subprocess
import sys
from pathlib import Path
from typing import Optional
from fastapi import HTTPException
from clan_cli.dirs import get_clan_flake_toplevel, nixpkgs_source
from clan_cli.git import commit_file, find_git_repo_root
from clan_cli.machines.folders import machine_folder, machine_settings_file
from clan_cli.nix import nix_eval
def config_for_machine(machine_name: str) -> dict:
# read the config from a json file located at {flake}/machines/{machine_name}/settings.json
if not machine_folder(machine_name).exists():
raise HTTPException(
status_code=404,
detail=f"Machine {machine_name} not found. Create the machine first`",
)
settings_path = machine_settings_file(machine_name)
if not settings_path.exists():
return {}
with open(settings_path) as f:
return json.load(f)
def set_config_for_machine(machine_name: str, config: dict) -> None:
# write the config to a json file located at {flake}/machines/{machine_name}/settings.json
if not machine_folder(machine_name).exists():
raise HTTPException(
status_code=404,
detail=f"Machine {machine_name} not found. Create the machine first`",
)
settings_path = machine_settings_file(machine_name)
settings_path.parent.mkdir(parents=True, exist_ok=True)
with open(settings_path, "w") as f:
json.dump(config, f)
repo_dir = find_git_repo_root()
if repo_dir is not None:
commit_file(settings_path, repo_dir)
def schema_for_machine(machine_name: str, flake: Optional[Path] = None) -> dict:
if flake is None:
flake = get_clan_flake_toplevel()
# use nix eval to lib.evalModules .#nixosModules.machine-{machine_name}
proc = subprocess.run(
nix_eval(
flags=[
"--impure",
"--show-trace",
"--expr",
f"""
let
flake = builtins.getFlake (toString {flake});
lib = import {nixpkgs_source()}/lib;
options = flake.nixosConfigurations.{machine_name}.options;
clanOptions = options.clan;
jsonschemaLib = import {Path(__file__).parent / "jsonschema"} {{ inherit lib; }};
jsonschema = jsonschemaLib.parseOptions clanOptions;
in
jsonschema
""",
],
),
capture_output=True,
text=True,
)
if proc.returncode != 0:
print(proc.stderr, file=sys.stderr)
raise Exception(
f"Failed to read schema for machine {machine_name}:\n{proc.stderr}"
)
return json.loads(proc.stdout)

View File

@@ -1,109 +0,0 @@
import json
import subprocess
from pathlib import Path
from typing import Any, Optional, Type, Union
from ..errors import ClanError
from ..nix import nix_eval
script_dir = Path(__file__).parent
type_map: dict[str, type] = {
"array": list,
"boolean": bool,
"integer": int,
"number": float,
"string": str,
}
def schema_from_module_file(
file: Union[str, Path] = f"{script_dir}/jsonschema/example-schema.json",
) -> dict[str, Any]:
absolute_path = Path(file).absolute()
# define a nix expression that loads the given module file using lib.evalModules
nix_expr = f"""
let
lib = import <nixpkgs/lib>;
slib = import {script_dir}/jsonschema {{inherit lib;}};
in
slib.parseModule {absolute_path}
"""
# run the nix expression and parse the output as json
cmd = nix_eval(["--expr", nix_expr])
proc = subprocess.run(cmd, stdout=subprocess.PIPE, check=True)
return json.loads(proc.stdout)
def subtype_from_schema(schema: dict[str, Any]) -> Type:
if schema["type"] == "object":
if "additionalProperties" in schema:
sub_type = subtype_from_schema(schema["additionalProperties"])
return dict[str, sub_type] # type: ignore
elif "properties" in schema:
raise ClanError("Nested dicts are not supported")
else:
raise ClanError("Unknown object type")
elif schema["type"] == "array":
if "items" not in schema:
raise ClanError("Untyped arrays are not supported")
sub_type = subtype_from_schema(schema["items"])
return list[sub_type] # type: ignore
else:
return type_map[schema["type"]]
def type_from_schema_path(
schema: dict[str, Any],
path: list[str],
full_path: Optional[list[str]] = None,
) -> Type:
if full_path is None:
full_path = path
if len(path) == 0:
return subtype_from_schema(schema)
elif schema["type"] == "object":
if "properties" in schema:
subtype = type_from_schema_path(schema["properties"][path[0]], path[1:])
return subtype
elif "additionalProperties" in schema:
subtype = type_from_schema_path(schema["additionalProperties"], path[1:])
return subtype
else:
raise ClanError(f"Unknown type for path {path}")
else:
raise ClanError(f"Unknown type for path {path}")
def options_types_from_schema(schema: dict[str, Any]) -> dict[str, Type]:
result: dict[str, Type] = {}
for name, value in schema.get("properties", {}).items():
assert isinstance(value, dict)
type_ = value["type"]
if type_ == "object":
# handle additionalProperties
if "additionalProperties" in value:
sub_type = value["additionalProperties"].get("type")
if sub_type not in type_map:
raise ClanError(
f"Unsupported object type {sub_type} (field {name})"
)
result[f"{name}.<name>"] = type_map[sub_type]
continue
# handle properties
sub_result = options_types_from_schema(value)
for sub_name, sub_type in sub_result.items():
result[f"{name}.{sub_name}"] = sub_type
continue
elif type_ == "array":
if "items" not in value:
raise ClanError(f"Untyped arrays are not supported (field: {name})")
sub_type = value["items"].get("type")
if sub_type not in type_map:
raise ClanError(f"Unsupported list type {sub_type} (field {name})")
sub_type_: type = type_map[sub_type]
result[name] = list[sub_type_] # type: ignore
continue
result[name] = type_map[type_]
return result

View File

@@ -1,25 +0,0 @@
# !/usr/bin/env python3
import argparse
import subprocess
from .nix import nix_command
def create(args: argparse.Namespace) -> None:
# TODO create clan template in flake
subprocess.run(
nix_command(
[
"flake",
"init",
"-t",
"git+https://git.clan.lol/clan/clan-core#new-clan",
]
),
check=True,
)
# takes a (sub)parser and configures it
def register_parser(parser: argparse.ArgumentParser) -> None:
parser.set_defaults(func=create)

View File

@@ -1,5 +1,7 @@
import inspect
import logging
from typing import Any
from pathlib import Path
from typing import Any, Callable
grey = "\x1b[38;20m"
yellow = "\x1b[33;20m"
@@ -9,11 +11,20 @@ green = "\u001b[32m"
blue = "\u001b[34m"
def get_formatter(color: str) -> logging.Formatter:
reset = "\x1b[0m"
return logging.Formatter(
f"{color}%(levelname)s{reset}:(%(filename)s:%(lineno)d): %(message)s"
)
def get_formatter(color: str) -> Callable[[logging.LogRecord, bool], logging.Formatter]:
def myformatter(
record: logging.LogRecord, with_location: bool
) -> logging.Formatter:
reset = "\x1b[0m"
filepath = Path(record.pathname).resolve()
if not with_location:
return logging.Formatter(f"{color}%(levelname)s{reset}: %(message)s")
return logging.Formatter(
f"{color}%(levelname)s{reset}: %(message)s\n {filepath}:%(lineno)d::%(funcName)s\n"
)
return myformatter
FORMATTER = {
@@ -26,12 +37,34 @@ FORMATTER = {
class CustomFormatter(logging.Formatter):
def format(self, record: Any) -> str:
return FORMATTER[record.levelno].format(record)
def format(self, record: logging.LogRecord) -> str:
return FORMATTER[record.levelno](record, True).format(record)
class ThreadFormatter(logging.Formatter):
def format(self, record: logging.LogRecord) -> str:
return FORMATTER[record.levelno](record, False).format(record)
def get_caller() -> str:
frame = inspect.currentframe()
if frame is None:
return "unknown"
caller_frame = frame.f_back
if caller_frame is None:
return "unknown"
caller_frame = caller_frame.f_back
if caller_frame is None:
return "unknown"
frame_info = inspect.getframeinfo(caller_frame)
ret = f"{frame_info.filename}:{frame_info.lineno}::{frame_info.function}"
return ret
def register(level: Any) -> None:
ch = logging.StreamHandler()
ch.setLevel(level)
ch.setFormatter(CustomFormatter())
logging.basicConfig(level=level, handlers=[ch])
handler = logging.StreamHandler()
handler.setLevel(level)
handler.setFormatter(CustomFormatter())
logger = logging.getLogger("registerHandler")
logger.addHandler(handler)
# logging.basicConfig(level=level, handlers=[handler])

View File

@@ -0,0 +1,72 @@
import logging
import multiprocessing as mp
import os
import shlex
import stat
import subprocess
import sys
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional
import ipdb
log = logging.getLogger(__name__)
def command_exec(cmd: List[str], work_dir: Path, env: Dict[str, str]) -> None:
subprocess.run(cmd, check=True, env=env, cwd=work_dir.resolve())
def repro_env_break(
work_dir: Path,
env: Optional[Dict[str, str]] = None,
cmd: Optional[List[str]] = None,
) -> None:
if env is None:
env = os.environ.copy()
else:
env = env.copy()
# Error checking
if "bash" in env["SHELL"]:
raise Exception("I assumed you use zsh, not bash")
# Cmd appending
args = ["xterm", "-e", "zsh", "-df"]
if cmd is not None:
mycommand = shlex.join(cmd)
write_command(mycommand, work_dir / "cmd.sh")
print(f"Adding to zsh history the command: {mycommand}", file=sys.stderr)
proc = spawn_process(func=command_exec, cmd=args, work_dir=work_dir, env=env)
try:
ipdb.set_trace()
finally:
proc.terminate()
def write_command(command: str, loc: Path) -> None:
with open(loc, "w") as f:
f.write("#!/usr/bin/env bash\n")
f.write(command)
st = os.stat(loc)
os.chmod(loc, st.st_mode | stat.S_IEXEC)
def spawn_process(func: Callable, **kwargs: Any) -> mp.Process:
mp.set_start_method(method="spawn")
proc = mp.Process(target=func, kwargs=kwargs)
proc.start()
return proc
def dump_env(env: Dict[str, str], loc: Path) -> None:
cenv = env.copy()
with open(loc, "w") as f:
f.write("#!/usr/bin/env bash\n")
for k, v in cenv.items():
if v.count("\n") > 0 or v.count('"') > 0 or v.count("'") > 0:
continue
f.write(f"export {k}='{v}'\n")
st = os.stat(loc)
os.chmod(loc, st.st_mode | stat.S_IEXEC)

View File

@@ -1,12 +1,16 @@
import logging
import os
import sys
from pathlib import Path
from typing import Optional
from .errors import ClanError
from .types import FlakeName
log = logging.getLogger(__name__)
def get_clan_flake_toplevel() -> Path:
def _get_clan_flake_toplevel() -> Path:
return find_toplevel([".clan-flake", ".git", ".hg", ".svn", "flake.nix"])
@@ -38,6 +42,58 @@ def user_config_dir() -> Path:
return Path(os.getenv("XDG_CONFIG_HOME", os.path.expanduser("~/.config")))
def user_data_dir() -> Path:
if sys.platform == "win32":
return Path(os.getenv("APPDATA", os.path.expanduser("~\\AppData\\Roaming\\")))
elif sys.platform == "darwin":
return Path(os.path.expanduser("~/Library/Application Support/"))
else:
return Path(os.getenv("XDG_DATA_HOME", os.path.expanduser("~/.local/state")))
def clan_data_dir() -> Path:
path = user_data_dir() / "clan"
if not path.exists():
log.debug(f"Creating path with parents {path}")
path.mkdir(parents=True)
return path.resolve()
def clan_config_dir() -> Path:
path = user_config_dir() / "clan"
if not path.exists():
log.debug(f"Creating path with parents {path}")
path.mkdir(parents=True)
return path.resolve()
def clan_flakes_dir() -> Path:
path = clan_data_dir() / "flake"
if not path.exists():
log.debug(f"Creating path with parents {path}")
path.mkdir(parents=True)
return path.resolve()
def specific_flake_dir(flake_name: FlakeName) -> Path:
flake_dir = clan_flakes_dir() / flake_name
if not flake_dir.exists():
raise ClanError(f"Flake '{flake_name}' does not exist")
return flake_dir
def machines_dir(flake_name: FlakeName) -> Path:
return specific_flake_dir(flake_name) / "machines"
def specific_machine_dir(flake_name: FlakeName, machine: str) -> Path:
return machines_dir(flake_name) / machine
def machine_settings_file(flake_name: FlakeName, machine: str) -> Path:
return specific_machine_dir(flake_name, machine) / "settings.json"
def module_root() -> Path:
return Path(__file__).parent

View File

@@ -1,88 +0,0 @@
import shlex
import subprocess
from pathlib import Path
from typing import Optional
from clan_cli.dirs import find_git_repo_root
from clan_cli.errors import ClanError
from clan_cli.nix import nix_shell
# generic vcs agnostic commit function
def commit_file(
file_path: Path,
repo_dir: Optional[Path] = None,
commit_message: Optional[str] = None,
) -> None:
if repo_dir is None:
repo_dir = find_git_repo_root()
if repo_dir is None:
return
# check that the file is in the git repository and exists
if not Path(file_path).resolve().is_relative_to(repo_dir.resolve()):
raise ClanError(f"File {file_path} is not in the git repository {repo_dir}")
if not file_path.exists():
raise ClanError(f"File {file_path} does not exist")
# generate commit message if not provided
if commit_message is None:
# ensure that mentioned file path is relative to repo
commit_message = f"Add {file_path.relative_to(repo_dir)}"
# check if the repo is a git repo and commit
if (repo_dir / ".git").exists():
_commit_file_to_git(repo_dir, file_path, commit_message)
else:
return
def _commit_file_to_git(repo_dir: Path, file_path: Path, commit_message: str) -> None:
"""Commit a file to a git repository.
:param repo_dir: The path to the git repository.
:param file_path: The path to the file to commit.
:param commit_message: The commit message.
:raises ClanError: If the file is not in the git repository.
"""
cmd = nix_shell(
["git"],
["git", "-C", str(repo_dir), "add", str(file_path)],
)
# add the file to the git index
try:
subprocess.run(cmd, check=True)
except subprocess.CalledProcessError as e:
raise ClanError(
f"Failed to add {file_path} to git repository {repo_dir}:\n{shlex.join(cmd)}\n exited with {e.returncode}"
) from e
# check if there is a diff
cmd = nix_shell(
["git"],
["git", "-C", str(repo_dir), "diff", "--cached", "--exit-code"],
)
result = subprocess.run(cmd, cwd=repo_dir)
# if there is no diff, return
if result.returncode == 0:
return
# commit only that file
cmd = nix_shell(
["git"],
[
"git",
"-C",
str(repo_dir),
"commit",
"-m",
commit_message,
str(file_path.relative_to(repo_dir)),
],
)
try:
subprocess.run(
cmd,
check=True,
)
except subprocess.CalledProcessError as e:
raise ClanError(
f"Failed to commit {file_path} to git repository {repo_dir}:\n{shlex.join(cmd)}\n exited with {e.returncode}"
) from e

View File

@@ -1,35 +0,0 @@
# !/usr/bin/env python3
import argparse
import subprocess
import urllib
from typing import Optional
def join(args: argparse.Namespace) -> None:
# start webui in background
uri = args.flake_uri.removeprefix("clan://")
subprocess.run(
["clan", "--debug", "webui", f"/join?flake={urllib.parse.quote_plus(uri)}"],
# stdout=sys.stdout,
# stderr=sys.stderr,
)
print(f"joined clan {args.flake_uri}")
# takes a (sub)parser and configures it
def register_parser(
parser: Optional[argparse.ArgumentParser],
) -> None:
if parser is None:
parser = argparse.ArgumentParser(
description="join a remote clan",
)
# inject callback function to process the input later
parser.set_defaults(func=join)
parser.add_argument(
"flake_uri",
help="flake uri to join",
type=str,
)

View File

@@ -1,33 +0,0 @@
# !/usr/bin/env python3
import argparse
from .create import register_create_parser
from .delete import register_delete_parser
from .install import register_install_parser
from .list import register_list_parser
from .update import register_update_parser
# takes a (sub)parser and configures it
def register_parser(parser: argparse.ArgumentParser) -> None:
subparser = parser.add_subparsers(
title="command",
description="the command to run",
help="the command to run",
required=True,
)
update_parser = subparser.add_parser("update", help="Update a machine")
register_update_parser(update_parser)
create_parser = subparser.add_parser("create", help="Create a machine")
register_create_parser(create_parser)
remove_parser = subparser.add_parser("remove", help="Remove a machine")
register_delete_parser(remove_parser)
list_parser = subparser.add_parser("list", help="List machines")
register_list_parser(list_parser)
install_parser = subparser.add_parser("install", help="Install a machine")
register_install_parser(install_parser)

View File

@@ -1,20 +0,0 @@
import argparse
from .folders import machine_folder
def create_machine(name: str) -> None:
folder = machine_folder(name)
folder.mkdir(parents=True, exist_ok=True)
# create empty settings.json file inside the folder
with open(folder / "settings.json", "w") as f:
f.write("{}")
def create_command(args: argparse.Namespace) -> None:
create_machine(args.host)
def register_create_parser(parser: argparse.ArgumentParser) -> None:
parser.add_argument("host", type=str)
parser.set_defaults(func=create_command)

View File

@@ -1,18 +0,0 @@
import argparse
import shutil
from ..errors import ClanError
from .folders import machine_folder
def delete_command(args: argparse.Namespace) -> None:
folder = machine_folder(args.host)
if folder.exists():
shutil.rmtree(folder)
else:
raise ClanError(f"Machine {args.host} does not exist")
def register_delete_parser(parser: argparse.ArgumentParser) -> None:
parser.add_argument("host", type=str)
parser.set_defaults(func=delete_command)

View File

@@ -1,9 +0,0 @@
from .folders import machine_folder
def machine_has_fact(machine: str, fact: str) -> bool:
return (machine_folder(machine) / "facts" / fact).exists()
def machine_get_fact(machine: str, fact: str) -> str:
return (machine_folder(machine) / "facts" / fact).read_text()

View File

@@ -1,15 +0,0 @@
from pathlib import Path
from ..dirs import get_clan_flake_toplevel
def machines_folder() -> Path:
return get_clan_flake_toplevel() / "machines"
def machine_folder(machine: str) -> Path:
return machines_folder() / machine
def machine_settings_file(machine: str) -> Path:
return machine_folder(machine) / "settings.json"

View File

@@ -1,60 +0,0 @@
import argparse
import subprocess
from pathlib import Path
from tempfile import TemporaryDirectory
from ..machines.machines import Machine
from ..nix import nix_shell
from ..secrets.generate import generate_secrets
def install_nixos(machine: Machine) -> None:
h = machine.host
target_host = f"{h.user or 'root'}@{h.host}"
flake_attr = h.meta.get("flake_attr", "")
generate_secrets(machine)
with TemporaryDirectory() as tmpdir_:
tmpdir = Path(tmpdir_)
machine.upload_secrets(tmpdir / machine.secrets_upload_directory)
subprocess.run(
nix_shell(
["nixos-anywhere"],
[
"nixos-anywhere",
"-f",
f"{machine.clan_dir}#{flake_attr}",
"-t",
"--no-reboot",
"--extra-files",
str(tmpdir),
target_host,
],
),
check=True,
)
def install_command(args: argparse.Namespace) -> None:
machine = Machine(args.machine)
machine.deployment_address = args.target_host
install_nixos(machine)
def register_install_parser(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"machine",
type=str,
help="machine to install",
)
parser.add_argument(
"target_host",
type=str,
help="ssh address to install to in the form of user@host:2222",
)
parser.set_defaults(func=install_command)

View File

@@ -1,29 +0,0 @@
import argparse
import logging
import os
from .folders import machines_folder
from .types import validate_hostname
log = logging.getLogger(__name__)
def list_machines() -> list[str]:
path = machines_folder()
log.debug(f"Listing machines in {path}")
if not path.exists():
return []
objs: list[str] = []
for f in os.listdir(path):
if validate_hostname(f):
objs.append(f)
return objs
def list_command(args: argparse.Namespace) -> None:
for machine in list_machines():
print(machine)
def register_list_parser(parser: argparse.ArgumentParser) -> None:
parser.set_defaults(func=list_command)

View File

@@ -1,116 +0,0 @@
import json
import os
import subprocess
import sys
from pathlib import Path
from typing import Optional
from ..dirs import get_clan_flake_toplevel
from ..nix import nix_build, nix_config, nix_eval
from ..ssh import Host, parse_deployment_address
def build_machine_data(machine_name: str, clan_dir: Path) -> dict:
config = nix_config()
system = config["system"]
outpath = subprocess.run(
nix_build(
[
f'path:{clan_dir}#clanInternals.machines."{system}"."{machine_name}".config.system.clan.deployment.file'
]
),
stdout=subprocess.PIPE,
check=True,
text=True,
).stdout.strip()
return json.loads(Path(outpath).read_text())
class Machine:
def __init__(
self,
name: str,
clan_dir: Optional[Path] = None,
machine_data: Optional[dict] = None,
) -> None:
"""
Creates a Machine
@name: the name of the machine
@clan_dir: the directory of the clan, optional, if not set it will be determined from the current working directory
@machine_json: can be optionally used to skip evaluation of the machine, location of the json file with machine data
"""
self.name = name
if clan_dir is None:
self.clan_dir = get_clan_flake_toplevel()
else:
self.clan_dir = clan_dir
if machine_data is None:
self.machine_data = build_machine_data(name, self.clan_dir)
else:
self.machine_data = machine_data
self.deployment_address = self.machine_data["deploymentAddress"]
self.upload_secrets = self.machine_data["uploadSecrets"]
self.generate_secrets = self.machine_data["generateSecrets"]
self.secrets_upload_directory = self.machine_data["secretsUploadDirectory"]
@property
def host(self) -> Host:
return parse_deployment_address(
self.name, self.deployment_address, meta={"machine": self}
)
def run_upload_secrets(self, secrets_dir: Path) -> bool:
"""
Upload the secrets to the provided directory
@secrets_dir: the directory to store the secrets in
"""
env = os.environ.copy()
env["CLAN_DIR"] = str(self.clan_dir)
env["PYTHONPATH"] = str(
":".join(sys.path)
) # TODO do this in the clanCore module
env["SECRETS_DIR"] = str(secrets_dir)
print(f"uploading secrets... {self.upload_secrets}")
proc = subprocess.run(
[self.upload_secrets],
env=env,
stdout=subprocess.PIPE,
text=True,
)
if proc.returncode == 23:
print("no secrets to upload")
return False
elif proc.returncode != 0:
print("failed generate secrets directory")
exit(1)
return True
def eval_nix(self, attr: str) -> str:
"""
eval a nix attribute of the machine
@attr: the attribute to get
"""
output = subprocess.run(
nix_eval([f"path:{self.clan_dir}#{attr}"]),
stdout=subprocess.PIPE,
check=True,
text=True,
).stdout.strip()
return output
def build_nix(self, attr: str) -> Path:
"""
build a nix attribute of the machine
@attr: the attribute to get
"""
outpath = subprocess.run(
nix_build([f"path:{self.clan_dir}#{attr}"]),
stdout=subprocess.PIPE,
check=True,
text=True,
).stdout.strip()
return Path(outpath)

View File

@@ -1,22 +0,0 @@
import argparse
import re
VALID_HOSTNAME = re.compile(r"^[a-z0-9]([-a-z0-9]*[a-z0-9])?$", re.IGNORECASE)
def validate_hostname(hostname: str) -> bool:
if len(hostname) > 63:
return False
return VALID_HOSTNAME.match(hostname) is not None
def machine_name_type(arg_value: str) -> str:
if len(arg_value) > 63:
raise argparse.ArgumentTypeError(
"Machine name must be less than 63 characters long"
)
if not VALID_HOSTNAME.match(arg_value):
raise argparse.ArgumentTypeError(
"Invalid character in machine name. Allowed characters are a-z, 0-9, ., -, and _. Must not start with a number"
)
return arg_value

View File

@@ -1,150 +0,0 @@
import argparse
import json
import os
import subprocess
from pathlib import Path
from ..dirs import get_clan_flake_toplevel
from ..machines.machines import Machine
from ..nix import nix_build, nix_command, nix_config
from ..secrets.generate import generate_secrets
from ..secrets.upload import upload_secrets
from ..ssh import Host, HostGroup, HostKeyCheck, parse_deployment_address
def deploy_nixos(hosts: HostGroup, clan_dir: Path) -> None:
"""
Deploy to all hosts in parallel
"""
def deploy(h: Host) -> None:
target = f"{h.user or 'root'}@{h.host}"
ssh_arg = f"-p {h.port}" if h.port else ""
env = os.environ.copy()
env["NIX_SSHOPTS"] = ssh_arg
res = h.run_local(
nix_command(["flake", "archive", "--to", f"ssh://{target}", "--json"]),
check=True,
stdout=subprocess.PIPE,
extra_env=env,
)
data = json.loads(res.stdout)
path = data["path"]
if h.host_key_check != HostKeyCheck.STRICT:
ssh_arg += " -o StrictHostKeyChecking=no"
if h.host_key_check == HostKeyCheck.NONE:
ssh_arg += " -o UserKnownHostsFile=/dev/null"
ssh_arg += " -i " + h.key if h.key else ""
flake_attr = h.meta.get("flake_attr", "")
generate_secrets(h.meta["machine"])
upload_secrets(h.meta["machine"])
target_host = h.meta.get("target_host")
if target_host:
target_user = h.meta.get("target_user")
if target_user:
target_host = f"{target_user}@{target_host}"
extra_args = h.meta.get("extra_args", [])
cmd = (
["nixos-rebuild", "switch"]
+ extra_args
+ [
"--fast",
"--option",
"keep-going",
"true",
"--option",
"accept-flake-config",
"true",
"--build-host",
"",
"--flake",
f"{path}#{flake_attr}",
]
)
if target_host:
cmd.extend(["--target-host", target_host])
ret = h.run(cmd, check=False)
# re-retry switch if the first time fails
if ret.returncode != 0:
ret = h.run(cmd)
hosts.run_function(deploy)
# function to speedup eval if we want to evauluate all machines
def get_all_machines(clan_dir: Path) -> HostGroup:
config = nix_config()
system = config["system"]
machines_json = subprocess.run(
nix_build([f'{clan_dir}#clanInternals.all-machines-json."{system}"']),
stdout=subprocess.PIPE,
check=True,
text=True,
).stdout
machines = json.loads(Path(machines_json).read_text())
hosts = []
for name, machine_data in machines.items():
# very hacky. would be better to do a MachinesGroup instead
host = parse_deployment_address(
name,
machine_data["deploymentAddress"],
meta={"machine": Machine(name=name, machine_data=machine_data)},
)
hosts.append(host)
return HostGroup(hosts)
def get_selected_machines(machine_names: list[str], clan_dir: Path) -> HostGroup:
hosts = []
for name in machine_names:
machine = Machine(name=name, clan_dir=clan_dir)
hosts.append(machine.host)
return HostGroup(hosts)
# FIXME: we want some kind of inventory here.
def update(args: argparse.Namespace) -> None:
clan_dir = get_clan_flake_toplevel()
if len(args.machines) == 1 and args.target_host is not None:
machine = Machine(name=args.machines[0], clan_dir=clan_dir)
machine.deployment_address = args.target_host
host = parse_deployment_address(
args.machines[0],
args.target_host,
meta={"machine": machine},
)
machines = HostGroup([host])
elif args.target_host is not None:
print("target host can only be specified for a single machine")
exit(1)
else:
if len(args.machines) == 0:
machines = get_all_machines(clan_dir)
else:
machines = get_selected_machines(args.machines, clan_dir)
deploy_nixos(machines, clan_dir)
def register_update_parser(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"machines",
type=str,
help="machine to update. if empty, update all machines",
nargs="*",
default=[],
)
parser.add_argument(
"--target-host",
type=str,
help="address of the machine to update, in the format of user@host:1234",
)
parser.set_defaults(func=update)

View File

@@ -2,8 +2,11 @@ import json
import os
import subprocess
import tempfile
from pathlib import Path
from typing import Any
from pydantic import AnyUrl
from .dirs import nixpkgs_flake, nixpkgs_source
@@ -11,7 +14,7 @@ def nix_command(flags: list[str]) -> list[str]:
return ["nix", "--extra-experimental-features", "nix-command flakes"] + flags
def nix_flake_show(flake_url: str) -> list[str]:
def nix_flake_show(flake_url: AnyUrl | Path) -> list[str]:
return nix_command(
[
"flake",

View File

@@ -1,46 +0,0 @@
# !/usr/bin/env python3
import argparse
from .generate import register_generate_parser
from .groups import register_groups_parser
from .import_sops import register_import_sops_parser
from .key import register_key_parser
from .machines import register_machines_parser
from .secrets import register_secrets_parser
from .upload import register_upload_parser
from .users import register_users_parser
# takes a (sub)parser and configures it
def register_parser(parser: argparse.ArgumentParser) -> None:
subparser = parser.add_subparsers(
title="command",
description="the command to run",
help="the command to run",
required=True,
)
groups_parser = subparser.add_parser("groups", help="manage groups")
register_groups_parser(groups_parser)
users_parser = subparser.add_parser("users", help="manage users")
register_users_parser(users_parser)
machines_parser = subparser.add_parser("machines", help="manage machines")
register_machines_parser(machines_parser)
import_sops_parser = subparser.add_parser("import-sops", help="import a sops file")
register_import_sops_parser(import_sops_parser)
parser_generate = subparser.add_parser(
"generate", help="generate secrets for machines if they don't exist yet"
)
register_generate_parser(parser_generate)
parser_upload = subparser.add_parser("upload", help="upload secrets for machines")
register_upload_parser(parser_upload)
parser_key = subparser.add_parser("key", help="create and show age keys")
register_key_parser(parser_key)
register_secrets_parser(subparser)

View File

@@ -1,43 +0,0 @@
import os
import shutil
from pathlib import Path
from typing import Callable
from ..dirs import get_clan_flake_toplevel
from ..errors import ClanError
def get_sops_folder() -> Path:
return get_clan_flake_toplevel() / "sops"
def gen_sops_subfolder(subdir: str) -> Callable[[], Path]:
def folder() -> Path:
return get_clan_flake_toplevel() / "sops" / subdir
return folder
sops_secrets_folder = gen_sops_subfolder("secrets")
sops_users_folder = gen_sops_subfolder("users")
sops_machines_folder = gen_sops_subfolder("machines")
sops_groups_folder = gen_sops_subfolder("groups")
def list_objects(path: Path, is_valid: Callable[[str], bool]) -> list[str]:
objs: list[str] = []
if not path.exists():
return objs
for f in os.listdir(path):
if is_valid(f):
objs.append(f)
return objs
def remove_object(path: Path, name: str) -> None:
try:
shutil.rmtree(path / name)
except FileNotFoundError:
raise ClanError(f"{name} not found in {path}")
if not os.listdir(path):
os.rmdir(path)

View File

@@ -1,41 +0,0 @@
import argparse
import logging
import os
import subprocess
import sys
from clan_cli.errors import ClanError
from ..machines.machines import Machine
log = logging.getLogger(__name__)
def generate_secrets(machine: Machine) -> None:
env = os.environ.copy()
env["CLAN_DIR"] = str(machine.clan_dir)
env["PYTHONPATH"] = ":".join(sys.path) # TODO do this in the clanCore module
print(f"generating secrets... {machine.generate_secrets}")
proc = subprocess.run(
[machine.generate_secrets],
env=env,
)
if proc.returncode != 0:
raise ClanError("failed to generate secrets")
else:
print("successfully generated secrets")
def generate_command(args: argparse.Namespace) -> None:
machine = Machine(args.machine)
generate_secrets(machine)
def register_generate_parser(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"machine",
help="The machine to generate secrets for",
)
parser.set_defaults(func=generate_command)

View File

@@ -1,249 +0,0 @@
import argparse
import os
from pathlib import Path
from ..errors import ClanError
from ..machines.types import machine_name_type, validate_hostname
from . import secrets
from .folders import (
sops_groups_folder,
sops_machines_folder,
sops_secrets_folder,
sops_users_folder,
)
from .sops import update_keys
from .types import (
VALID_USER_NAME,
group_name_type,
secret_name_type,
user_name_type,
)
def machines_folder(group: str) -> Path:
return sops_groups_folder() / group / "machines"
def users_folder(group: str) -> Path:
return sops_groups_folder() / group / "users"
class Group:
def __init__(self, name: str, machines: list[str], users: list[str]) -> None:
self.name = name
self.machines = machines
self.users = users
def list_groups() -> list[Group]:
groups: list[Group] = []
folder = sops_groups_folder()
if not folder.exists():
return groups
for name in os.listdir(folder):
group_folder = folder / name
if not group_folder.is_dir():
continue
machines_path = machines_folder(name)
machines = []
if machines_path.is_dir():
for f in machines_path.iterdir():
if validate_hostname(f.name):
machines.append(f.name)
users_path = users_folder(name)
users = []
if users_path.is_dir():
for f in users_path.iterdir():
if VALID_USER_NAME.match(f.name):
users.append(f.name)
groups.append(Group(name, machines, users))
return groups
def list_command(args: argparse.Namespace) -> None:
for group in list_groups():
print(group.name)
if group.machines:
print("machines:")
for machine in group.machines:
print(f" {machine}")
if group.users:
print("users:")
for user in group.users:
print(f" {user}")
print()
def list_directory(directory: Path) -> str:
if not directory.exists():
return f"{directory} does not exist"
msg = f"\n{directory} contains:"
for f in directory.iterdir():
msg += f"\n {f.name}"
return msg
def update_group_keys(group: str) -> None:
for secret_ in secrets.list_secrets():
secret = sops_secrets_folder() / secret_
if (secret / "groups" / group).is_symlink():
update_keys(
secret,
list(sorted(secrets.collect_keys_for_path(secret))),
)
def add_member(group_folder: Path, source_folder: Path, name: str) -> None:
source = source_folder / name
if not source.exists():
msg = f"{name} does not exist in {source_folder}: "
msg += list_directory(source_folder)
raise ClanError(msg)
group_folder.mkdir(parents=True, exist_ok=True)
user_target = group_folder / name
if user_target.exists():
if not user_target.is_symlink():
raise ClanError(
f"Cannot add user {name}. {user_target} exists but is not a symlink"
)
os.remove(user_target)
user_target.symlink_to(os.path.relpath(source, user_target.parent))
update_group_keys(group_folder.parent.name)
def remove_member(group_folder: Path, name: str) -> None:
target = group_folder / name
if not target.exists():
msg = f"{name} does not exist in group in {group_folder}: "
msg += list_directory(group_folder)
raise ClanError(msg)
os.remove(target)
if len(os.listdir(group_folder)) > 0:
update_group_keys(group_folder.parent.name)
if len(os.listdir(group_folder)) == 0:
os.rmdir(group_folder)
if len(os.listdir(group_folder.parent)) == 0:
os.rmdir(group_folder.parent)
def add_user(group: str, name: str) -> None:
add_member(users_folder(group), sops_users_folder(), name)
def add_user_command(args: argparse.Namespace) -> None:
add_user(args.group, args.user)
def remove_user(group: str, name: str) -> None:
remove_member(users_folder(group), name)
def remove_user_command(args: argparse.Namespace) -> None:
remove_user(args.group, args.user)
def add_machine(group: str, name: str) -> None:
add_member(machines_folder(group), sops_machines_folder(), name)
def add_machine_command(args: argparse.Namespace) -> None:
add_machine(args.group, args.machine)
def remove_machine(group: str, name: str) -> None:
remove_member(machines_folder(group), name)
def remove_machine_command(args: argparse.Namespace) -> None:
remove_machine(args.group, args.machine)
def add_group_argument(parser: argparse.ArgumentParser) -> None:
parser.add_argument("group", help="the name of the secret", type=group_name_type)
def add_secret(group: str, name: str) -> None:
secrets.allow_member(secrets.groups_folder(name), sops_groups_folder(), group)
def add_secret_command(args: argparse.Namespace) -> None:
add_secret(args.group, args.secret)
def remove_secret(group: str, name: str) -> None:
secrets.disallow_member(secrets.groups_folder(name), group)
def remove_secret_command(args: argparse.Namespace) -> None:
remove_secret(args.group, args.secret)
def register_groups_parser(parser: argparse.ArgumentParser) -> None:
subparser = parser.add_subparsers(
title="command",
description="the command to run",
help="the command to run",
required=True,
)
list_parser = subparser.add_parser("list", help="list groups")
list_parser.set_defaults(func=list_command)
add_machine_parser = subparser.add_parser(
"add-machine", help="add a machine to group"
)
add_group_argument(add_machine_parser)
add_machine_parser.add_argument(
"machine", help="the name of the machines to add", type=machine_name_type
)
add_machine_parser.set_defaults(func=add_machine_command)
remove_machine_parser = subparser.add_parser(
"remove-machine", help="remove a machine from group"
)
add_group_argument(remove_machine_parser)
remove_machine_parser.add_argument(
"machine", help="the name of the machines to remove", type=machine_name_type
)
remove_machine_parser.set_defaults(func=remove_machine_command)
add_user_parser = subparser.add_parser("add-user", help="add a user to group")
add_group_argument(add_user_parser)
add_user_parser.add_argument(
"user", help="the name of the user to add", type=user_name_type
)
add_user_parser.set_defaults(func=add_user_command)
remove_user_parser = subparser.add_parser(
"remove-user", help="remove a user from group"
)
add_group_argument(remove_user_parser)
remove_user_parser.add_argument(
"user", help="the name of the user to remove", type=user_name_type
)
remove_user_parser.set_defaults(func=remove_user_command)
add_secret_parser = subparser.add_parser(
"add-secret", help="allow a user to access a secret"
)
add_secret_parser.add_argument(
"group", help="the name of the user", type=group_name_type
)
add_secret_parser.add_argument(
"secret", help="the name of the secret", type=secret_name_type
)
add_secret_parser.set_defaults(func=add_secret_command)
remove_secret_parser = subparser.add_parser(
"remove-secret", help="remove a group's access to a secret"
)
remove_secret_parser.add_argument(
"group", help="the name of the group", type=group_name_type
)
remove_secret_parser.add_argument(
"secret", help="the name of the secret", type=secret_name_type
)
remove_secret_parser.set_defaults(func=remove_secret_command)

View File

@@ -1,93 +0,0 @@
import argparse
import json
import subprocess
import sys
from pathlib import Path
from ..errors import ClanError
from ..nix import nix_shell
from .secrets import encrypt_secret, sops_secrets_folder
def import_sops(args: argparse.Namespace) -> None:
file = Path(args.sops_file)
file_type = file.suffix
try:
file.read_text()
except OSError as e:
raise ClanError(f"Could not read file {file}: {e}") from e
if file_type == ".yaml":
cmd = ["sops"]
if args.input_type:
cmd += ["--input-type", args.input_type]
cmd += ["--output-type", "json", "--decrypt", args.sops_file]
cmd = nix_shell(["sops"], cmd)
try:
res = subprocess.run(cmd, check=True, text=True, stdout=subprocess.PIPE)
except subprocess.CalledProcessError as e:
raise ClanError(f"Could not import sops file {file}: {e}") from e
secrets = json.loads(res.stdout)
for k, v in secrets.items():
k = args.prefix + k
if not isinstance(v, str):
print(
f"WARNING: {k} is not a string but {type(v)}, skipping",
file=sys.stderr,
)
continue
if (sops_secrets_folder() / k / "secret").exists():
print(
f"WARNING: {k} already exists, skipping",
file=sys.stderr,
)
continue
encrypt_secret(
sops_secrets_folder() / k,
v,
add_groups=args.group,
add_machines=args.machine,
add_users=args.user,
)
def register_import_sops_parser(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"--input-type",
type=str,
default=None,
help="the input type of the sops file (yaml, json, ...). If not specified, it will be guessed from the file extension",
)
parser.add_argument(
"--group",
type=str,
action="append",
default=[],
help="the group to import the secrets to",
)
parser.add_argument(
"--machine",
type=str,
action="append",
default=[],
help="the machine to import the secrets to",
)
parser.add_argument(
"--user",
type=str,
action="append",
default=[],
help="the user to import the secrets to",
)
parser.add_argument(
"--prefix",
type=str,
default="",
help="the prefix to use for the secret names",
)
parser.add_argument(
"sops_file",
type=str,
help="the sops file to import (- for stdin)",
)
parser.set_defaults(func=import_sops)

View File

@@ -1,48 +0,0 @@
import argparse
from .. import tty
from ..errors import ClanError
from .sops import default_sops_key_path, generate_private_key, get_public_key
def generate_key() -> str:
path = default_sops_key_path()
if path.exists():
raise ClanError(f"Key already exists at {path}")
priv_key, pub_key = generate_private_key()
path.write_text(priv_key)
return pub_key
def show_key() -> str:
return get_public_key(default_sops_key_path().read_text())
def generate_command(args: argparse.Namespace) -> None:
pub_key = generate_key()
tty.info(
f"Generated age private key at '{default_sops_key_path()}' for your user. Please back it up on a secure location or you will lose access to your secrets."
)
tty.info(
f"Also add your age public key to the repository with 'clan secrets users add youruser {pub_key}' (replace youruser with your user name)"
)
pass
def show_command(args: argparse.Namespace) -> None:
print(show_key())
def register_key_parser(parser: argparse.ArgumentParser) -> None:
subparser = parser.add_subparsers(
title="command",
description="the command to run",
help="the command to run",
required=True,
)
parser_generate = subparser.add_parser("generate", help="generate age key")
parser_generate.set_defaults(func=generate_command)
parser_show = subparser.add_parser("show", help="show age public key")
parser_show.set_defaults(func=show_command)

View File

@@ -1,131 +0,0 @@
import argparse
from ..machines.types import machine_name_type, validate_hostname
from . import secrets
from .folders import list_objects, remove_object, sops_machines_folder
from .sops import read_key, write_key
from .types import public_or_private_age_key_type, secret_name_type
def add_machine(name: str, key: str, force: bool) -> None:
write_key(sops_machines_folder() / name, key, force)
def remove_machine(name: str) -> None:
remove_object(sops_machines_folder(), name)
def get_machine(name: str) -> str:
return read_key(sops_machines_folder() / name)
def has_machine(name: str) -> bool:
return (sops_machines_folder() / name / "key.json").exists()
def list_machines() -> list[str]:
path = sops_machines_folder()
def validate(name: str) -> bool:
return validate_hostname(name) and has_machine(name)
return list_objects(path, validate)
def add_secret(machine: str, secret: str) -> None:
secrets.allow_member(
secrets.machines_folder(secret), sops_machines_folder(), machine
)
def remove_secret(machine: str, secret: str) -> None:
secrets.disallow_member(secrets.machines_folder(secret), machine)
def list_command(args: argparse.Namespace) -> None:
lst = list_machines()
if len(lst) > 0:
print("\n".join(lst))
def add_command(args: argparse.Namespace) -> None:
add_machine(args.machine, args.key, args.force)
def get_command(args: argparse.Namespace) -> None:
print(get_machine(args.machine))
def remove_command(args: argparse.Namespace) -> None:
remove_machine(args.machine)
def add_secret_command(args: argparse.Namespace) -> None:
add_secret(args.machine, args.secret)
def remove_secret_command(args: argparse.Namespace) -> None:
remove_secret(args.machine, args.secret)
def register_machines_parser(parser: argparse.ArgumentParser) -> None:
subparser = parser.add_subparsers(
title="command",
description="the command to run",
help="the command to run",
required=True,
)
list_parser = subparser.add_parser("list", help="list machines")
list_parser.set_defaults(func=list_command)
add_parser = subparser.add_parser("add", help="add a machine")
add_parser.add_argument(
"-f",
"--force",
help="overwrite existing machine",
action="store_true",
default=False,
)
add_parser.add_argument(
"machine", help="the name of the machine", type=machine_name_type
)
add_parser.add_argument(
"key",
help="public key or private key of the user",
type=public_or_private_age_key_type,
)
add_parser.set_defaults(func=add_command)
get_parser = subparser.add_parser("get", help="get a machine public key")
get_parser.add_argument(
"machine", help="the name of the machine", type=machine_name_type
)
get_parser.set_defaults(func=get_command)
remove_parser = subparser.add_parser("remove", help="remove a machine")
remove_parser.add_argument(
"machine", help="the name of the machine", type=machine_name_type
)
remove_parser.set_defaults(func=remove_command)
add_secret_parser = subparser.add_parser(
"add-secret", help="allow a machine to access a secret"
)
add_secret_parser.add_argument(
"machine", help="the name of the machine", type=machine_name_type
)
add_secret_parser.add_argument(
"secret", help="the name of the secret", type=secret_name_type
)
add_secret_parser.set_defaults(func=add_secret_command)
remove_secret_parser = subparser.add_parser(
"remove-secret", help="remove a group's access to a secret"
)
remove_secret_parser.add_argument(
"machine", help="the name of the group", type=machine_name_type
)
remove_secret_parser.add_argument(
"secret", help="the name of the secret", type=secret_name_type
)
remove_secret_parser.set_defaults(func=remove_secret_command)

View File

@@ -1,280 +0,0 @@
import argparse
import getpass
import os
import shutil
import sys
from pathlib import Path
from typing import IO
from .. import tty
from ..errors import ClanError
from .folders import (
list_objects,
sops_groups_folder,
sops_machines_folder,
sops_secrets_folder,
sops_users_folder,
)
from .sops import decrypt_file, encrypt_file, ensure_sops_key, read_key, update_keys
from .types import VALID_SECRET_NAME, secret_name_type
def collect_keys_for_type(folder: Path) -> set[str]:
if not folder.exists():
return set()
keys = set()
for p in folder.iterdir():
if not p.is_symlink():
continue
try:
target = p.resolve()
except FileNotFoundError:
tty.warn(f"Ignoring broken symlink {p}")
continue
kind = target.parent.name
if folder.name != kind:
tty.warn(f"Expected {p} to point to {folder} but points to {target.parent}")
continue
keys.add(read_key(target))
return keys
def collect_keys_for_path(path: Path) -> set[str]:
keys = set([])
keys.update(collect_keys_for_type(path / "machines"))
keys.update(collect_keys_for_type(path / "users"))
groups = path / "groups"
if not groups.is_dir():
return keys
for group in groups.iterdir():
keys.update(collect_keys_for_type(group / "machines"))
keys.update(collect_keys_for_type(group / "users"))
return keys
def encrypt_secret(
secret: Path,
value: IO[str] | str | None,
add_users: list[str] = [],
add_machines: list[str] = [],
add_groups: list[str] = [],
) -> None:
key = ensure_sops_key()
keys = set([])
for user in add_users:
allow_member(users_folder(secret.name), sops_users_folder(), user, False)
for machine in add_machines:
allow_member(
machines_folder(secret.name), sops_machines_folder(), machine, False
)
for group in add_groups:
allow_member(groups_folder(secret.name), sops_groups_folder(), group, False)
keys = collect_keys_for_path(secret)
if key.pubkey not in keys:
keys.add(key.pubkey)
allow_member(
users_folder(secret.name), sops_users_folder(), key.username, False
)
encrypt_file(secret / "secret", value, list(sorted(keys)))
def remove_secret(secret: str) -> None:
path = sops_secrets_folder() / secret
if not path.exists():
raise ClanError(f"Secret '{secret}' does not exist")
shutil.rmtree(path)
def remove_command(args: argparse.Namespace) -> None:
remove_secret(args.secret)
def add_secret_argument(parser: argparse.ArgumentParser) -> None:
parser.add_argument("secret", help="the name of the secret", type=secret_name_type)
def machines_folder(group: str) -> Path:
return sops_secrets_folder() / group / "machines"
def users_folder(group: str) -> Path:
return sops_secrets_folder() / group / "users"
def groups_folder(group: str) -> Path:
return sops_secrets_folder() / group / "groups"
def list_directory(directory: Path) -> str:
if not directory.exists():
return f"{directory} does not exist"
msg = f"\n{directory} contains:"
for f in directory.iterdir():
msg += f"\n {f.name}"
return msg
def allow_member(
group_folder: Path, source_folder: Path, name: str, do_update_keys: bool = True
) -> None:
source = source_folder / name
if not source.exists():
msg = f"{name} does not exist in {source_folder}: "
msg += list_directory(source_folder)
raise ClanError(msg)
group_folder.mkdir(parents=True, exist_ok=True)
user_target = group_folder / name
if user_target.exists():
if not user_target.is_symlink():
raise ClanError(
f"Cannot add user {name}. {user_target} exists but is not a symlink"
)
os.remove(user_target)
user_target.symlink_to(os.path.relpath(source, user_target.parent))
if do_update_keys:
update_keys(
group_folder.parent,
list(sorted(collect_keys_for_path(group_folder.parent))),
)
def disallow_member(group_folder: Path, name: str) -> None:
target = group_folder / name
if not target.exists():
msg = f"{name} does not exist in group in {group_folder}: "
msg += list_directory(group_folder)
raise ClanError(msg)
keys = collect_keys_for_path(group_folder.parent)
if len(keys) < 2:
raise ClanError(
f"Cannot remove {name} from {group_folder.parent.name}. No keys left. Use 'clan secrets remove {name}' to remove the secret."
)
os.remove(target)
if len(os.listdir(group_folder)) == 0:
os.rmdir(group_folder)
if len(os.listdir(group_folder.parent)) == 0:
os.rmdir(group_folder.parent)
update_keys(
target.parent.parent, list(sorted(collect_keys_for_path(group_folder.parent)))
)
def has_secret(secret: str) -> bool:
return (sops_secrets_folder() / secret / "secret").exists()
def list_secrets() -> list[str]:
path = sops_secrets_folder()
def validate(name: str) -> bool:
return VALID_SECRET_NAME.match(name) is not None and has_secret(name)
return list_objects(path, validate)
def list_command(args: argparse.Namespace) -> None:
lst = list_secrets()
if len(lst) > 0:
print("\n".join(lst))
def decrypt_secret(secret: str) -> str:
ensure_sops_key()
secret_path = sops_secrets_folder() / secret / "secret"
if not secret_path.exists():
raise ClanError(f"Secret '{secret}' does not exist")
return decrypt_file(secret_path)
def get_command(args: argparse.Namespace) -> None:
print(decrypt_secret(args.secret), end="")
def set_command(args: argparse.Namespace) -> None:
env_value = os.environ.get("SOPS_NIX_SECRET")
secret_value: str | IO[str] | None = sys.stdin
if args.edit:
secret_value = None
elif env_value:
secret_value = env_value
elif tty.is_interactive():
secret_value = getpass.getpass(prompt="Paste your secret: ")
encrypt_secret(
sops_secrets_folder() / args.secret,
secret_value,
args.user,
args.machine,
args.group,
)
def rename_command(args: argparse.Namespace) -> None:
old_path = sops_secrets_folder() / args.secret
new_path = sops_secrets_folder() / args.new_name
if not old_path.exists():
raise ClanError(f"Secret '{args.secret}' does not exist")
if new_path.exists():
raise ClanError(f"Secret '{args.new_name}' already exists")
os.rename(old_path, new_path)
def register_secrets_parser(subparser: argparse._SubParsersAction) -> None:
parser_list = subparser.add_parser("list", help="list secrets")
parser_list.set_defaults(func=list_command)
parser_get = subparser.add_parser("get", help="get a secret")
add_secret_argument(parser_get)
parser_get.set_defaults(func=get_command)
parser_set = subparser.add_parser("set", help="set a secret")
add_secret_argument(parser_set)
parser_set.add_argument(
"--group",
type=str,
action="append",
default=[],
help="the group to import the secrets to (can be repeated)",
)
parser_set.add_argument(
"--machine",
type=str,
action="append",
default=[],
help="the machine to import the secrets to (can be repeated)",
)
parser_set.add_argument(
"--user",
type=str,
action="append",
default=[],
help="the user to import the secrets to (can be repeated)",
)
parser_set.add_argument(
"-e",
"--edit",
action="store_true",
default=False,
help="edit the secret with $EDITOR instead of pasting it",
)
parser_set.set_defaults(func=set_command)
parser_rename = subparser.add_parser("rename", help="rename a secret")
add_secret_argument(parser_rename)
parser_rename.add_argument("new_name", type=str, help="the new name of the secret")
parser_rename.set_defaults(func=rename_command)
parser_remove = subparser.add_parser("remove", help="remove a secret")
add_secret_argument(parser_remove)
parser_remove.set_defaults(func=remove_command)

View File

@@ -1,218 +0,0 @@
import json
import os
import shutil
import subprocess
from contextlib import contextmanager
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import IO, Iterator
from ..dirs import user_config_dir
from ..errors import ClanError
from ..nix import nix_shell
from .folders import sops_machines_folder, sops_users_folder
class SopsKey:
def __init__(self, pubkey: str, username: str) -> None:
self.pubkey = pubkey
self.username = username
def get_public_key(privkey: str) -> str:
cmd = nix_shell(["age"], ["age-keygen", "-y"])
try:
res = subprocess.run(cmd, input=privkey, stdout=subprocess.PIPE, text=True)
except subprocess.CalledProcessError as e:
raise ClanError(
"Failed to get public key for age private key. Is the key malformed?"
) from e
return res.stdout.strip()
def generate_private_key() -> tuple[str, str]:
cmd = nix_shell(["age"], ["age-keygen"])
try:
proc = subprocess.run(cmd, check=True, stdout=subprocess.PIPE, text=True)
res = proc.stdout.strip()
pubkey = None
private_key = None
for line in res.splitlines():
if line.startswith("# public key:"):
pubkey = line.split(":")[1].strip()
if not line.startswith("#"):
private_key = line
if not pubkey:
raise ClanError("Could not find public key in age-keygen output")
if not private_key:
raise ClanError("Could not find private key in age-keygen output")
return private_key, pubkey
except subprocess.CalledProcessError as e:
raise ClanError("Failed to generate private sops key") from e
def get_user_name(user: str) -> str:
"""Ask the user for their name until a unique one is provided."""
while True:
name = input(
f"Your key is not yet added to the repository. Enter your user name for which your sops key will be stored in the repository [default: {user}]: "
)
if name:
user = name
if not (sops_users_folder() / user).exists():
return user
print(f"{sops_users_folder() / user} already exists")
def ensure_user_or_machine(pub_key: str) -> SopsKey:
key = SopsKey(pub_key, username="")
folders = [sops_users_folder(), sops_machines_folder()]
for folder in folders:
if folder.exists():
for user in folder.iterdir():
if not (user / "key.json").exists():
continue
if read_key(user) == pub_key:
key.username = user.name
return key
raise ClanError(
f"Your sops key is not yet added to the repository. Please add it with 'clan secrets users add youruser {pub_key}' (replace youruser with your user name)"
)
def default_sops_key_path() -> Path:
raw_path = os.environ.get("SOPS_AGE_KEY_FILE")
if raw_path:
return Path(raw_path)
else:
return user_config_dir() / "sops" / "age" / "keys.txt"
def ensure_sops_key() -> SopsKey:
key = os.environ.get("SOPS_AGE_KEY")
if key:
return ensure_user_or_machine(get_public_key(key))
path = default_sops_key_path()
if path.exists():
return ensure_user_or_machine(get_public_key(path.read_text()))
else:
raise ClanError(
"No sops key found. Please generate one with 'clan secrets key generate'."
)
@contextmanager
def sops_manifest(keys: list[str]) -> Iterator[Path]:
with NamedTemporaryFile(delete=False, mode="w") as manifest:
json.dump(
dict(creation_rules=[dict(key_groups=[dict(age=keys)])]), manifest, indent=2
)
manifest.flush()
yield Path(manifest.name)
def update_keys(secret_path: Path, keys: list[str]) -> None:
with sops_manifest(keys) as manifest:
cmd = nix_shell(
["sops"],
[
"sops",
"--config",
str(manifest),
"updatekeys",
"--yes",
str(secret_path / "secret"),
],
)
res = subprocess.run(cmd)
if res.returncode != 0:
raise ClanError(
f"Failed to update keys for {secret_path}: sops exited with {res.returncode}"
)
def encrypt_file(
secret_path: Path, content: IO[str] | str | None, keys: list[str]
) -> None:
folder = secret_path.parent
folder.mkdir(parents=True, exist_ok=True)
with sops_manifest(keys) as manifest:
if not content:
args = ["sops", "--config", str(manifest)]
args.extend([str(secret_path)])
cmd = nix_shell(["sops"], args)
p = subprocess.run(cmd)
# returns 200 if the file is changed
if p.returncode != 0 and p.returncode != 200:
raise ClanError(
f"Failed to encrypt {secret_path}: sops exited with {p.returncode}"
)
return
# hopefully /tmp is written to an in-memory file to avoid leaking secrets
with NamedTemporaryFile(delete=False) as f:
try:
with open(f.name, "w") as fd:
if isinstance(content, str):
fd.write(content)
else:
shutil.copyfileobj(content, fd)
# we pass an empty manifest to pick up existing configuration of the user
args = ["sops", "--config", str(manifest)]
args.extend(["-i", "--encrypt", str(f.name)])
cmd = nix_shell(["sops"], args)
subprocess.run(cmd, check=True)
# atomic copy of the encrypted file
with NamedTemporaryFile(dir=folder, delete=False) as f2:
shutil.copyfile(f.name, f2.name)
os.rename(f2.name, secret_path)
finally:
try:
os.remove(f.name)
except OSError:
pass
def decrypt_file(secret_path: Path) -> str:
with sops_manifest([]) as manifest:
cmd = nix_shell(
["sops"], ["sops", "--config", str(manifest), "--decrypt", str(secret_path)]
)
res = subprocess.run(cmd, stdout=subprocess.PIPE, text=True)
if res.returncode != 0:
raise ClanError(
f"Failed to decrypt {secret_path}: sops exited with {res.returncode}"
)
return res.stdout
def write_key(path: Path, publickey: str, overwrite: bool) -> None:
path.mkdir(parents=True, exist_ok=True)
try:
flags = os.O_CREAT | os.O_WRONLY | os.O_TRUNC
if not overwrite:
flags |= os.O_EXCL
fd = os.open(path / "key.json", flags)
except FileExistsError:
raise ClanError(f"{path.name} already exists in {path}")
with os.fdopen(fd, "w") as f:
json.dump({"publickey": publickey, "type": "age"}, f, indent=2)
def read_key(path: Path) -> str:
with open(path / "key.json") as f:
try:
key = json.load(f)
except json.JSONDecodeError as e:
raise ClanError(f"Failed to decode {path.name}: {e}")
if key["type"] != "age":
raise ClanError(
f"{path.name} is not an age key but {key['type']}. This is not supported"
)
publickey = key.get("publickey")
if not publickey:
raise ClanError(f"{path.name} does not contain a public key")
return publickey

View File

@@ -1,113 +0,0 @@
import os
import shlex
import shutil
import subprocess
import sys
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Any
from clan_cli.nix import nix_shell
from ..dirs import get_clan_flake_toplevel
from ..errors import ClanError
from .folders import sops_secrets_folder
from .machines import add_machine, has_machine
from .secrets import decrypt_secret, encrypt_secret, has_secret
from .sops import generate_private_key
def generate_host_key(machine_name: str) -> None:
if has_machine(machine_name):
return
priv_key, pub_key = generate_private_key()
encrypt_secret(sops_secrets_folder() / f"{machine_name}-age.key", priv_key)
add_machine(machine_name, pub_key, False)
def generate_secrets_group(
secret_group: str, machine_name: str, tempdir: Path, secret_options: dict[str, Any]
) -> None:
clan_dir = get_clan_flake_toplevel()
secrets = secret_options["secrets"]
needs_regeneration = any(
not has_secret(f"{machine_name}-{secret['name']}")
for secret in secrets.values()
)
generator = secret_options["generator"]
subdir = tempdir / secret_group
if needs_regeneration:
facts_dir = subdir / "facts"
facts_dir.mkdir(parents=True)
secrets_dir = subdir / "secrets"
secrets_dir.mkdir(parents=True)
text = f"""\
set -euo pipefail
export facts={shlex.quote(str(facts_dir))}
export secrets={shlex.quote(str(secrets_dir))}
{generator}
"""
try:
cmd = nix_shell(["bash"], ["bash", "-c", text])
subprocess.run(cmd, check=True)
except subprocess.CalledProcessError:
msg = "failed to the following command:\n"
msg += text
raise ClanError(msg)
for secret in secrets.values():
secret_file = secrets_dir / secret["name"]
if not secret_file.is_file():
msg = f"did not generate a file for '{secret['name']}' when running the following command:\n"
msg += text
raise ClanError(msg)
encrypt_secret(
sops_secrets_folder() / f"{machine_name}-{secret['name']}",
secret_file.read_text(),
add_machines=[machine_name],
)
for fact in secret_options["facts"].values():
fact_file = facts_dir / fact["name"]
if not fact_file.is_file():
msg = f"did not generate a file for '{fact['name']}' when running the following command:\n"
msg += text
raise ClanError(msg)
fact_path = clan_dir.joinpath(fact["path"])
fact_path.parent.mkdir(parents=True, exist_ok=True)
shutil.copyfile(fact_file, fact_path)
# this is called by the sops.nix clan core module
def generate_secrets_from_nix(
machine_name: str,
secret_submodules: dict[str, Any],
) -> None:
generate_host_key(machine_name)
errors = {}
with TemporaryDirectory() as d:
# if any of the secrets are missing, we regenerate all connected facts/secrets
for secret_group, secret_options in secret_submodules.items():
try:
generate_secrets_group(
secret_group, machine_name, Path(d), secret_options
)
except ClanError as e:
errors[secret_group] = e
for secret_group, error in errors.items():
print(f"failed to generate secrets for {machine_name}/{secret_group}:")
print(error, file=sys.stderr)
if len(errors) > 0:
sys.exit(1)
# this is called by the sops.nix clan core module
def upload_age_key_from_nix(
machine_name: str,
) -> None:
secret_name = f"{machine_name}-age.key"
if not has_secret(secret_name): # skip uploading the secret, not managed by us
return
secret = decrypt_secret(secret_name)
secrets_dir = Path(os.environ["SECRETS_DIR"])
(secrets_dir / "key.txt").write_text(secret)

View File

@@ -1,52 +0,0 @@
import argparse
import os
import re
from pathlib import Path
from typing import Callable
from ..errors import ClanError
from .sops import get_public_key
VALID_SECRET_NAME = re.compile(r"^[a-zA-Z0-9._-]+$")
VALID_USER_NAME = re.compile(r"^[a-z_]([a-z0-9_-]{0,31})?$")
def secret_name_type(arg_value: str) -> str:
if not VALID_SECRET_NAME.match(arg_value):
raise argparse.ArgumentTypeError(
"Invalid character in secret name. Allowed characters are a-z, A-Z, 0-9, ., -, and _"
)
return arg_value
def public_or_private_age_key_type(arg_value: str) -> str:
if os.path.isfile(arg_value):
arg_value = Path(arg_value).read_text().strip()
if arg_value.startswith("age1"):
return arg_value.strip()
if arg_value.startswith("AGE-SECRET-KEY-"):
return get_public_key(arg_value)
if not arg_value.startswith("age1"):
raise ClanError(
f"Please provide an age key starting with age1, got: '{arg_value}'"
)
return arg_value
def group_or_user_name_type(what: str) -> Callable[[str], str]:
def name_type(arg_value: str) -> str:
if len(arg_value) > 32:
raise argparse.ArgumentTypeError(
f"{what.capitalize()} name must be less than 32 characters long"
)
if not VALID_USER_NAME.match(arg_value):
raise argparse.ArgumentTypeError(
f"Invalid character in {what} name. Allowed characters are a-z, 0-9, -, and _. Must start with a letter or _"
)
return arg_value
return name_type
user_name_type = group_or_user_name_type("user")
group_name_type = group_or_user_name_type("group")

View File

@@ -1,49 +0,0 @@
import argparse
import logging
import subprocess
from pathlib import Path
from tempfile import TemporaryDirectory
from ..machines.machines import Machine
from ..nix import nix_shell
log = logging.getLogger(__name__)
def upload_secrets(machine: Machine) -> None:
with TemporaryDirectory() as tempdir_:
tempdir = Path(tempdir_)
should_upload = machine.run_upload_secrets(tempdir)
if should_upload:
host = machine.host
ssh_cmd = host.ssh_cmd()
subprocess.run(
nix_shell(
["rsync"],
[
"rsync",
"-e",
" ".join(["ssh"] + ssh_cmd[2:]),
"-az",
"--delete",
f"{str(tempdir)}/",
f"{host.user}@{host.host}:{machine.secrets_upload_directory}/",
],
),
check=True,
)
def upload_command(args: argparse.Namespace) -> None:
machine = Machine(args.machine)
upload_secrets(machine)
def register_upload_parser(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"machine",
help="The machine to upload secrets to",
)
parser.set_defaults(func=upload_command)

View File

@@ -1,122 +0,0 @@
import argparse
from . import secrets
from .folders import list_objects, remove_object, sops_users_folder
from .sops import read_key, write_key
from .types import (
VALID_USER_NAME,
public_or_private_age_key_type,
secret_name_type,
user_name_type,
)
def add_user(name: str, key: str, force: bool) -> None:
write_key(sops_users_folder() / name, key, force)
def remove_user(name: str) -> None:
remove_object(sops_users_folder(), name)
def get_user(name: str) -> str:
return read_key(sops_users_folder() / name)
def list_users() -> list[str]:
path = sops_users_folder()
def validate(name: str) -> bool:
return (
VALID_USER_NAME.match(name) is not None
and (path / name / "key.json").exists()
)
return list_objects(path, validate)
def add_secret(user: str, secret: str) -> None:
secrets.allow_member(secrets.users_folder(secret), sops_users_folder(), user)
def remove_secret(user: str, secret: str) -> None:
secrets.disallow_member(secrets.users_folder(secret), user)
def list_command(args: argparse.Namespace) -> None:
lst = list_users()
if len(lst) > 0:
print("\n".join(lst))
def add_command(args: argparse.Namespace) -> None:
add_user(args.user, args.key, args.force)
def get_command(args: argparse.Namespace) -> None:
print(get_user(args.user))
def remove_command(args: argparse.Namespace) -> None:
remove_user(args.user)
def add_secret_command(args: argparse.Namespace) -> None:
add_secret(args.user, args.secret)
def remove_secret_command(args: argparse.Namespace) -> None:
remove_secret(args.user, args.secret)
def register_users_parser(parser: argparse.ArgumentParser) -> None:
subparser = parser.add_subparsers(
title="command",
description="the command to run",
help="the command to run",
required=True,
)
list_parser = subparser.add_parser("list", help="list users")
list_parser.set_defaults(func=list_command)
add_parser = subparser.add_parser("add", help="add a user")
add_parser.add_argument(
"-f", "--force", help="overwrite existing user", action="store_true"
)
add_parser.add_argument("user", help="the name of the user", type=user_name_type)
add_parser.add_argument(
"key",
help="public key or private key of the user",
type=public_or_private_age_key_type,
)
add_parser.set_defaults(func=add_command)
get_parser = subparser.add_parser("get", help="get a user public key")
get_parser.add_argument("user", help="the name of the user", type=user_name_type)
get_parser.set_defaults(func=get_command)
remove_parser = subparser.add_parser("remove", help="remove a user")
remove_parser.add_argument("user", help="the name of the user", type=user_name_type)
remove_parser.set_defaults(func=remove_command)
add_secret_parser = subparser.add_parser(
"add-secret", help="allow a user to access a secret"
)
add_secret_parser.add_argument(
"user", help="the name of the group", type=user_name_type
)
add_secret_parser.add_argument(
"secret", help="the name of the secret", type=secret_name_type
)
add_secret_parser.set_defaults(func=add_secret_command)
remove_secret_parser = subparser.add_parser(
"remove-secret", help="remove a user's access to a secret"
)
remove_secret_parser.add_argument(
"user", help="the name of the group", type=user_name_type
)
remove_secret_parser.add_argument(
"secret", help="the name of the secret", type=secret_name_type
)
remove_secret_parser.set_defaults(func=remove_secret_command)

View File

@@ -1,863 +0,0 @@
# Adapted from https://github.com/numtide/deploykit
import fcntl
import logging
import math
import os
import select
import shlex
import subprocess
import sys
import time
from contextlib import ExitStack, contextmanager
from enum import Enum
from pathlib import Path
from shlex import quote
from threading import Thread
from typing import (
IO,
Any,
Callable,
Dict,
Generic,
Iterator,
List,
Literal,
Optional,
Tuple,
TypeVar,
Union,
overload,
)
# https://no-color.org
DISABLE_COLOR = not sys.stderr.isatty() or os.environ.get("NO_COLOR", "") != ""
def ansi_color(color: int) -> str:
return f"\x1b[{color}m"
class CommandFormatter(logging.Formatter):
"""
print errors in red and warnings in yellow
"""
def __init__(self) -> None:
super().__init__(
"%(prefix_color)s[%(command_prefix)s]%(color_reset)s %(color)s%(message)s%(color_reset)s"
)
self.hostnames: List[str] = []
self.hostname_color_offset = 1 # first host shouldn't get agressive red
def format(self, record: logging.LogRecord) -> str:
colorcode = 0
if record.levelno == logging.ERROR:
colorcode = 31 # red
if record.levelno == logging.WARN:
colorcode = 33 # yellow
color, prefix_color, color_reset = "", "", ""
if not DISABLE_COLOR:
command_prefix = getattr(record, "command_prefix", "")
color = ansi_color(colorcode)
prefix_color = ansi_color(self.hostname_colorcode(command_prefix))
color_reset = "\x1b[0m"
setattr(record, "color", color)
setattr(record, "prefix_color", prefix_color)
setattr(record, "color_reset", color_reset)
return super().format(record)
def hostname_colorcode(self, hostname: str) -> int:
try:
index = self.hostnames.index(hostname)
except ValueError:
self.hostnames += [hostname]
index = self.hostnames.index(hostname)
return 31 + (index + self.hostname_color_offset) % 7
def setup_loggers() -> Tuple[logging.Logger, logging.Logger]:
# If we use the default logger here (logging.error etc) or a logger called
# "deploykit", then cmdlog messages are also posted on the default logger.
# To avoid this message duplication, we set up a main and command logger
# and use a "deploykit" main logger.
kitlog = logging.getLogger("deploykit.main")
kitlog.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(logging.Formatter())
kitlog.addHandler(ch)
# use specific logger for command outputs
cmdlog = logging.getLogger("deploykit.command")
cmdlog.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(CommandFormatter())
cmdlog.addHandler(ch)
return (kitlog, cmdlog)
# loggers for: general deploykit, command output
kitlog, cmdlog = setup_loggers()
info = kitlog.info
warn = kitlog.warning
error = kitlog.error
@contextmanager
def _pipe() -> Iterator[Tuple[IO[str], IO[str]]]:
(pipe_r, pipe_w) = os.pipe()
read_end = os.fdopen(pipe_r, "r")
write_end = os.fdopen(pipe_w, "w")
try:
fl = fcntl.fcntl(read_end, fcntl.F_GETFL)
fcntl.fcntl(read_end, fcntl.F_SETFL, fl | os.O_NONBLOCK)
yield (read_end, write_end)
finally:
read_end.close()
write_end.close()
FILE = Union[None, int]
# Seconds until a message is printed when _run produces no output.
NO_OUTPUT_TIMEOUT = 20
class HostKeyCheck(Enum):
# Strictly check ssh host keys, prompt for unknown ones
STRICT = 0
# Trust on ssh keys on first use
TOFU = 1
# Do not check ssh host keys
NONE = 2
class Host:
def __init__(
self,
host: str,
user: Optional[str] = None,
port: Optional[int] = None,
key: Optional[str] = None,
forward_agent: bool = False,
command_prefix: Optional[str] = None,
host_key_check: HostKeyCheck = HostKeyCheck.STRICT,
meta: Dict[str, Any] = {},
verbose_ssh: bool = False,
ssh_options: dict[str, str] = {},
) -> None:
"""
Creates a Host
@host the hostname to connect to via ssh
@port the port to connect to via ssh
@forward_agent: wheter to forward ssh agent
@command_prefix: string to prefix each line of the command output with, defaults to host
@host_key_check: wether to check ssh host keys
@verbose_ssh: Enables verbose logging on ssh connections
@meta: meta attributes associated with the host. Those can be accessed in custom functions passed to `run_function`
"""
self.host = host
self.user = user
self.port = port
self.key = key
if command_prefix:
self.command_prefix = command_prefix
else:
self.command_prefix = host
self.forward_agent = forward_agent
self.host_key_check = host_key_check
self.meta = meta
self.verbose_ssh = verbose_ssh
self.ssh_options = ssh_options
def _prefix_output(
self,
displayed_cmd: str,
print_std_fd: Optional[IO[str]],
print_err_fd: Optional[IO[str]],
stdout: Optional[IO[str]],
stderr: Optional[IO[str]],
timeout: float = math.inf,
) -> Tuple[str, str]:
rlist = []
if print_std_fd is not None:
rlist.append(print_std_fd)
if print_err_fd is not None:
rlist.append(print_err_fd)
if stdout is not None:
rlist.append(stdout)
if stderr is not None:
rlist.append(stderr)
print_std_buf = ""
print_err_buf = ""
stdout_buf = ""
stderr_buf = ""
start = time.time()
last_output = time.time()
while len(rlist) != 0:
r, _, _ = select.select(rlist, [], [], min(timeout, NO_OUTPUT_TIMEOUT))
def print_from(
print_fd: IO[str], print_buf: str, is_err: bool = False
) -> Tuple[float, str]:
read = os.read(print_fd.fileno(), 4096)
if len(read) == 0:
rlist.remove(print_fd)
print_buf += read.decode("utf-8")
if (read == b"" and len(print_buf) != 0) or "\n" in print_buf:
# print and empty the print_buf, if the stream is draining,
# but there is still something in the buffer or on newline.
lines = print_buf.rstrip("\n").split("\n")
for line in lines:
if not is_err:
cmdlog.info(
line, extra=dict(command_prefix=self.command_prefix)
)
pass
else:
cmdlog.error(
line, extra=dict(command_prefix=self.command_prefix)
)
print_buf = ""
last_output = time.time()
return (last_output, print_buf)
if print_std_fd in r and print_std_fd is not None:
(last_output, print_std_buf) = print_from(
print_std_fd, print_std_buf, is_err=False
)
if print_err_fd in r and print_err_fd is not None:
(last_output, print_err_buf) = print_from(
print_err_fd, print_err_buf, is_err=True
)
now = time.time()
elapsed = now - start
if now - last_output > NO_OUTPUT_TIMEOUT:
elapsed_msg = time.strftime("%H:%M:%S", time.gmtime(elapsed))
cmdlog.warn(
f"still waiting for '{displayed_cmd}' to finish... ({elapsed_msg} elapsed)",
extra=dict(command_prefix=self.command_prefix),
)
def handle_fd(fd: Optional[IO[Any]]) -> str:
if fd and fd in r:
read = os.read(fd.fileno(), 4096)
if len(read) == 0:
rlist.remove(fd)
else:
return read.decode("utf-8")
return ""
stdout_buf += handle_fd(stdout)
stderr_buf += handle_fd(stderr)
if now - last_output >= timeout:
break
return stdout_buf, stderr_buf
def _run(
self,
cmd: List[str],
displayed_cmd: str,
shell: bool,
stdout: FILE = None,
stderr: FILE = None,
extra_env: Dict[str, str] = {},
cwd: Union[None, str, Path] = None,
check: bool = True,
timeout: float = math.inf,
) -> subprocess.CompletedProcess[str]:
with ExitStack() as stack:
read_std_fd, write_std_fd = (None, None)
read_err_fd, write_err_fd = (None, None)
if stdout is None or stderr is None:
read_std_fd, write_std_fd = stack.enter_context(_pipe())
read_err_fd, write_err_fd = stack.enter_context(_pipe())
if stdout is None:
stdout_read = None
stdout_write = write_std_fd
elif stdout == subprocess.PIPE:
stdout_read, stdout_write = stack.enter_context(_pipe())
else:
raise Exception(f"unsupported value for stdout parameter: {stdout}")
if stderr is None:
stderr_read = None
stderr_write = write_err_fd
elif stderr == subprocess.PIPE:
stderr_read, stderr_write = stack.enter_context(_pipe())
else:
raise Exception(f"unsupported value for stderr parameter: {stderr}")
env = os.environ.copy()
env.update(extra_env)
with subprocess.Popen(
cmd,
text=True,
shell=shell,
stdout=stdout_write,
stderr=stderr_write,
env=env,
cwd=cwd,
) as p:
if write_std_fd is not None:
write_std_fd.close()
if write_err_fd is not None:
write_err_fd.close()
if stdout == subprocess.PIPE:
assert stdout_write is not None
stdout_write.close()
if stderr == subprocess.PIPE:
assert stderr_write is not None
stderr_write.close()
start = time.time()
stdout_data, stderr_data = self._prefix_output(
displayed_cmd,
read_std_fd,
read_err_fd,
stdout_read,
stderr_read,
timeout,
)
try:
ret = p.wait(timeout=max(0, timeout - (time.time() - start)))
except subprocess.TimeoutExpired:
p.kill()
raise
if ret != 0:
if check:
raise subprocess.CalledProcessError(
ret, cmd=cmd, output=stdout_data, stderr=stderr_data
)
else:
cmdlog.warning(
f"[Command failed: {ret}] {displayed_cmd}",
extra=dict(command_prefix=self.command_prefix),
)
return subprocess.CompletedProcess(
cmd, ret, stdout=stdout_data, stderr=stderr_data
)
raise RuntimeError("unreachable")
def run_local(
self,
cmd: Union[str, List[str]],
stdout: FILE = None,
stderr: FILE = None,
extra_env: Dict[str, str] = {},
cwd: Union[None, str, Path] = None,
check: bool = True,
timeout: float = math.inf,
) -> subprocess.CompletedProcess[str]:
"""
Command to run locally for the host
@cmd the commmand to run
@stdout if not None stdout of the command will be redirected to this file i.e. stdout=subprocess.PIPE
@stderr if not None stderr of the command will be redirected to this file i.e. stderr=subprocess.PIPE
@extra_env environment variables to override whe running the command
@cwd current working directory to run the process in
@timeout: Timeout in seconds for the command to complete
@return subprocess.CompletedProcess result of the command
"""
shell = False
if isinstance(cmd, str):
cmd = [cmd]
shell = True
displayed_cmd = " ".join(cmd)
cmdlog.info(
f"$ {displayed_cmd}", extra=dict(command_prefix=self.command_prefix)
)
return self._run(
cmd,
displayed_cmd,
shell=shell,
stdout=stdout,
stderr=stderr,
extra_env=extra_env,
cwd=cwd,
check=check,
timeout=timeout,
)
def run(
self,
cmd: Union[str, List[str]],
stdout: FILE = None,
stderr: FILE = None,
become_root: bool = False,
extra_env: Dict[str, str] = {},
cwd: Union[None, str, Path] = None,
check: bool = True,
verbose_ssh: bool = False,
timeout: float = math.inf,
) -> subprocess.CompletedProcess[str]:
"""
Command to run on the host via ssh
@cmd the commmand to run
@stdout if not None stdout of the command will be redirected to this file i.e. stdout=subprocss.PIPE
@stderr if not None stderr of the command will be redirected to this file i.e. stderr=subprocess.PIPE
@become_root if the ssh_user is not root than sudo is prepended
@extra_env environment variables to override whe running the command
@cwd current working directory to run the process in
@verbose_ssh: Enables verbose logging on ssh connections
@timeout: Timeout in seconds for the command to complete
@return subprocess.CompletedProcess result of the ssh command
"""
sudo = ""
if become_root and self.user != "root":
sudo = "sudo -- "
vars = []
for k, v in extra_env.items():
vars.append(f"{shlex.quote(k)}={shlex.quote(v)}")
displayed_cmd = ""
export_cmd = ""
if vars:
export_cmd = f"export {' '.join(vars)}; "
displayed_cmd += export_cmd
if isinstance(cmd, list):
displayed_cmd += " ".join(cmd)
else:
displayed_cmd += cmd
cmdlog.info(
f"$ {displayed_cmd}", extra=dict(command_prefix=self.command_prefix)
)
bash_cmd = export_cmd
bash_args = []
if isinstance(cmd, list):
bash_cmd += 'exec "$@"'
bash_args += cmd
else:
bash_cmd += cmd
# FIXME we assume bash to be present here? Should be documented...
ssh_cmd = self.ssh_cmd(verbose_ssh=verbose_ssh) + [
"--",
f"{sudo}bash -c {quote(bash_cmd)} -- {' '.join(map(quote, bash_args))}",
]
return self._run(
ssh_cmd,
displayed_cmd,
shell=False,
stdout=stdout,
stderr=stderr,
cwd=cwd,
check=check,
timeout=timeout,
)
def ssh_cmd(
self,
verbose_ssh: bool = False,
) -> List:
if self.user is not None:
ssh_target = f"{self.user}@{self.host}"
else:
ssh_target = self.host
ssh_opts = ["-A"] if self.forward_agent else []
for k, v in self.ssh_options.items():
ssh_opts.extend(["-o", f"{k}={shlex.quote(v)}"])
if self.port:
ssh_opts.extend(["-p", str(self.port)])
if self.key:
ssh_opts.extend(["-i", self.key])
if self.host_key_check != HostKeyCheck.STRICT:
ssh_opts.extend(["-o", "StrictHostKeyChecking=no"])
if self.host_key_check == HostKeyCheck.NONE:
ssh_opts.extend(["-o", "UserKnownHostsFile=/dev/null"])
if verbose_ssh or self.verbose_ssh:
ssh_opts.extend(["-v"])
return ["ssh", ssh_target] + ssh_opts
T = TypeVar("T")
class HostResult(Generic[T]):
def __init__(self, host: Host, result: Union[T, Exception]) -> None:
self.host = host
self._result = result
@property
def error(self) -> Optional[Exception]:
"""
Returns an error if the command failed
"""
if isinstance(self._result, Exception):
return self._result
return None
@property
def result(self) -> T:
"""
Unwrap the result
"""
if isinstance(self._result, Exception):
raise self._result
return self._result
Results = List[HostResult[subprocess.CompletedProcess[str]]]
def _worker(
func: Callable[[Host], T],
host: Host,
results: List[HostResult[T]],
idx: int,
) -> None:
try:
results[idx] = HostResult(host, func(host))
except Exception as e:
kitlog.exception(e)
results[idx] = HostResult(host, e)
class HostGroup:
def __init__(self, hosts: List[Host]) -> None:
self.hosts = hosts
def _run_local(
self,
cmd: Union[str, List[str]],
host: Host,
results: Results,
stdout: FILE = None,
stderr: FILE = None,
extra_env: Dict[str, str] = {},
cwd: Union[None, str, Path] = None,
check: bool = True,
verbose_ssh: bool = False,
timeout: float = math.inf,
) -> None:
try:
proc = host.run_local(
cmd,
stdout=stdout,
stderr=stderr,
extra_env=extra_env,
cwd=cwd,
check=check,
timeout=timeout,
)
results.append(HostResult(host, proc))
except Exception as e:
kitlog.exception(e)
results.append(HostResult(host, e))
def _run_remote(
self,
cmd: Union[str, List[str]],
host: Host,
results: Results,
stdout: FILE = None,
stderr: FILE = None,
extra_env: Dict[str, str] = {},
cwd: Union[None, str, Path] = None,
check: bool = True,
verbose_ssh: bool = False,
timeout: float = math.inf,
) -> None:
try:
proc = host.run(
cmd,
stdout=stdout,
stderr=stderr,
extra_env=extra_env,
cwd=cwd,
check=check,
verbose_ssh=verbose_ssh,
timeout=timeout,
)
results.append(HostResult(host, proc))
except Exception as e:
kitlog.exception(e)
results.append(HostResult(host, e))
def _reraise_errors(self, results: List[HostResult[Any]]) -> None:
errors = 0
for result in results:
e = result.error
if e:
cmdlog.error(
f"failed with: {e}",
extra=dict(command_prefix=result.host.command_prefix),
)
errors += 1
if errors > 0:
raise Exception(
f"{errors} hosts failed with an error. Check the logs above"
)
def _run(
self,
cmd: Union[str, List[str]],
local: bool = False,
stdout: FILE = None,
stderr: FILE = None,
extra_env: Dict[str, str] = {},
cwd: Union[None, str, Path] = None,
check: bool = True,
verbose_ssh: bool = False,
timeout: float = math.inf,
) -> Results:
results: Results = []
threads = []
for host in self.hosts:
fn = self._run_local if local else self._run_remote
thread = Thread(
target=fn,
kwargs=dict(
results=results,
cmd=cmd,
host=host,
stdout=stdout,
stderr=stderr,
extra_env=extra_env,
cwd=cwd,
check=check,
verbose_ssh=verbose_ssh,
timeout=timeout,
),
)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
if check:
self._reraise_errors(results)
return results
def run(
self,
cmd: Union[str, List[str]],
stdout: FILE = None,
stderr: FILE = None,
extra_env: Dict[str, str] = {},
cwd: Union[None, str, Path] = None,
check: bool = True,
verbose_ssh: bool = False,
timeout: float = math.inf,
) -> Results:
"""
Command to run on the remote host via ssh
@stdout if not None stdout of the command will be redirected to this file i.e. stdout=subprocss.PIPE
@stderr if not None stderr of the command will be redirected to this file i.e. stderr=subprocess.PIPE
@cwd current working directory to run the process in
@verbose_ssh: Enables verbose logging on ssh connections
@timeout: Timeout in seconds for the command to complete
@return a lists of tuples containing Host and the result of the command for this Host
"""
return self._run(
cmd,
stdout=stdout,
stderr=stderr,
extra_env=extra_env,
cwd=cwd,
check=check,
verbose_ssh=verbose_ssh,
timeout=timeout,
)
def run_local(
self,
cmd: Union[str, List[str]],
stdout: FILE = None,
stderr: FILE = None,
extra_env: Dict[str, str] = {},
cwd: Union[None, str, Path] = None,
check: bool = True,
timeout: float = math.inf,
) -> Results:
"""
Command to run locally for each host in the group in parallel
@cmd the commmand to run
@stdout if not None stdout of the command will be redirected to this file i.e. stdout=subprocss.PIPE
@stderr if not None stderr of the command will be redirected to this file i.e. stderr=subprocess.PIPE
@cwd current working directory to run the process in
@extra_env environment variables to override whe running the command
@timeout: Timeout in seconds for the command to complete
@return a lists of tuples containing Host and the result of the command for this Host
"""
return self._run(
cmd,
local=True,
stdout=stdout,
stderr=stderr,
extra_env=extra_env,
cwd=cwd,
check=check,
timeout=timeout,
)
def run_function(
self, func: Callable[[Host], T], check: bool = True
) -> List[HostResult[T]]:
"""
Function to run for each host in the group in parallel
@func the function to call
"""
threads = []
results: List[HostResult[T]] = [
HostResult(h, Exception(f"No result set for thread {i}"))
for (i, h) in enumerate(self.hosts)
]
for i, host in enumerate(self.hosts):
thread = Thread(
target=_worker,
args=(func, host, results, i),
)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
if check:
self._reraise_errors(results)
return results
def filter(self, pred: Callable[[Host], bool]) -> "HostGroup":
"""Return a new Group with the results filtered by the predicate"""
return HostGroup(list(filter(pred, self.hosts)))
def parse_deployment_address(
machine_name: str, host: str, meta: dict[str, Any] = {}
) -> Host:
parts = host.split("@")
user: Optional[str] = None
if len(parts) > 1:
user = parts[0]
hostname = parts[1]
else:
hostname = parts[0]
maybe_options = hostname.split("?")
options: Dict[str, str] = {}
if len(maybe_options) > 1:
hostname = maybe_options[0]
for option in maybe_options[1].split("&"):
k, v = option.split("=")
options[k] = v
maybe_port = hostname.split(":")
port = None
if len(maybe_port) > 1:
hostname = maybe_port[0]
port = int(maybe_port[1])
meta = meta.copy()
meta["flake_attr"] = machine_name
return Host(
hostname,
user=user,
port=port,
command_prefix=machine_name,
meta=meta,
ssh_options=options,
)
@overload
def run(
cmd: Union[List[str], str],
text: Literal[True] = ...,
stdout: FILE = ...,
stderr: FILE = ...,
extra_env: Dict[str, str] = ...,
cwd: Union[None, str, Path] = ...,
check: bool = ...,
) -> subprocess.CompletedProcess[str]:
...
@overload
def run(
cmd: Union[List[str], str],
text: Literal[False],
stdout: FILE = ...,
stderr: FILE = ...,
extra_env: Dict[str, str] = ...,
cwd: Union[None, str, Path] = ...,
check: bool = ...,
) -> subprocess.CompletedProcess[bytes]:
...
def run(
cmd: Union[List[str], str],
text: bool = True,
stdout: FILE = None,
stderr: FILE = None,
extra_env: Dict[str, str] = {},
cwd: Union[None, str, Path] = None,
check: bool = True,
) -> subprocess.CompletedProcess[Any]:
"""
Run command locally
@cmd if this parameter is a string the command is interpreted as a shell command,
otherwise if it is a list, than the first list element is the command
and the remaining list elements are passed as arguments to the
command.
@text when true, file objects for stdout and stderr are opened in text mode.
@stdout if not None stdout of the command will be redirected to this file i.e. stdout=subprocss.PIPE
@stderr if not None stderr of the command will be redirected to this file i.e. stderr=subprocess.PIPE
@extra_env environment variables to override whe running the command
@cwd current working directory to run the process in
@check If check is true, and the process exits with a non-zero exit code, a
CalledProcessError exception will be raised. Attributes of that exception
hold the arguments, the exit code, and stdout and stderr if they were
captured.
"""
if isinstance(cmd, list):
info("$ " + " ".join(cmd))
else:
info(f"$ {cmd}")
env = os.environ.copy()
env.update(extra_env)
return subprocess.run(
cmd,
stdout=stdout,
stderr=stderr,
env=env,
cwd=cwd,
check=check,
shell=not isinstance(cmd, list),
text=text,
)

View File

@@ -1,80 +0,0 @@
import argparse
import json
import subprocess
from typing import Optional
from ..nix import nix_shell
def ssh(
host: str,
user: str = "root",
password: Optional[str] = None,
ssh_args: list[str] = [],
) -> None:
packages = ["tor", "openssh"]
password_args = []
if password:
packages.append("sshpass")
password_args = [
"sshpass",
"-p",
password,
]
_ssh_args = ssh_args + [
"ssh",
"-o",
"UserKnownHostsFile=/dev/null",
"-o",
"StrictHostKeyChecking=no",
f"{user}@{host}",
]
cmd = nix_shell(packages, ["torify"] + password_args + _ssh_args)
subprocess.run(cmd)
def qrcode_scan(picture_file: str) -> str:
return (
subprocess.run(
nix_shell(
["zbar"],
[
"zbarimg",
"--quiet",
"--raw",
picture_file,
],
),
stdout=subprocess.PIPE,
check=True,
)
.stdout.decode()
.strip()
)
def main(args: argparse.Namespace) -> None:
if args.json:
with open(args.json) as file:
ssh_data = json.load(file)
ssh(host=ssh_data["address"], password=ssh_data["password"])
elif args.png:
ssh_data = json.loads(qrcode_scan(args.png))
ssh(host=ssh_data["address"], password=ssh_data["password"])
def register_parser(parser: argparse.ArgumentParser) -> None:
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
"-j",
"--json",
help="specify the json file for ssh data (generated by starting the clan installer)",
)
group.add_argument(
"-P",
"--png",
help="specify the json file for ssh data as the qrcode image (generated by starting the clan installer)",
)
# TODO pass all args we don't parse into ssh_args, currently it fails if arg starts with -
parser.add_argument("ssh_args", nargs="*", default=[])
parser.set_defaults(func=main)

View File

@@ -8,9 +8,11 @@ import sys
import threading
import traceback
from enum import Enum
from pathlib import Path
from typing import Any, Iterator, Optional, Type, TypeVar
from uuid import UUID, uuid4
from .custom_logger import ThreadFormatter, get_caller
from .errors import ClanError
@@ -30,14 +32,31 @@ class Command:
self._output.put(None)
self.done = True
def run(self, cmd: list[str], env: Optional[dict[str, str]] = None) -> None:
def run(
self,
cmd: list[str],
env: Optional[dict[str, str]] = None,
cwd: Optional[Path] = None,
) -> None:
self.running = True
self.log.debug(f"Running command: {shlex.join(cmd)}")
self.log.debug(f"Command: {shlex.join(cmd)}")
self.log.debug(f"Caller: {get_caller()}")
cwd_res = None
if cwd is not None:
if not cwd.exists():
raise ClanError(f"Working directory {cwd} does not exist")
if not cwd.is_dir():
raise ClanError(f"Working directory {cwd} is not a directory")
cwd_res = cwd.resolve()
self.log.debug(f"Working directory: {cwd_res}")
self.p = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
cwd=cwd_res,
env=env,
)
assert self.p.stdout is not None and self.p.stderr is not None
@@ -51,10 +70,10 @@ class Command:
try:
for line in fd:
if fd == self.p.stderr:
print(f"[{cmd[0]}] stderr: {line}")
self.log.debug(f"[{cmd[0]}] stderr: {line}")
self.stderr.append(line)
else:
print(f"[{cmd[0]}] stdout: {line}")
self.log.debug(f"[{cmd[0]}] stdout: {line}")
self.stdout.append(line)
self._output.put(line)
except BlockingIOError:
@@ -63,8 +82,6 @@ class Command:
if self.p.returncode != 0:
raise ClanError(f"Failed to run command: {shlex.join(cmd)}")
self.log.debug("Successfully ran command")
class TaskStatus(str, Enum):
NOTSTARTED = "NOTSTARTED"
@@ -77,7 +94,13 @@ class BaseTask:
def __init__(self, uuid: UUID, num_cmds: int) -> None:
# constructor
self.uuid: UUID = uuid
self.log = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
handler.setFormatter(ThreadFormatter())
logger = logging.getLogger(__name__)
logger.addHandler(handler)
self.log = logger
self.log = logger
self.procs: list[Command] = []
self.status = TaskStatus.NOTSTARTED
self.logs_lock = threading.Lock()
@@ -91,6 +114,10 @@ class BaseTask:
self.status = TaskStatus.RUNNING
try:
self.run()
# TODO: We need to check, if too many commands have been initialized,
# but not run. This would deadlock the log_lines() function.
# Idea: Run next(cmds) and check if it raises StopIteration if not,
# we have too many commands
except Exception as e:
# FIXME: fix exception handling here
traceback.print_exception(*sys.exc_info())
@@ -106,7 +133,7 @@ class BaseTask:
def run(self) -> None:
raise NotImplementedError
## TODO: If two clients are connected to the same task,
## TODO: Test when two clients are connected to the same task
def log_lines(self) -> Iterator[str]:
with self.logs_lock:
for proc in self.procs:

Some files were not shown because too many files have changed in this diff Show More