Initial commit

This commit is contained in:
2023-10-15 16:41:25 +02:00
parent 6def19b4c8
commit 545d389df0
46 changed files with 10 additions and 2640 deletions

View File

@@ -1,44 +0,0 @@
{ config, lib, ... }:
{
options.clan.diskLayouts.singleDiskExt4 = {
device = lib.mkOption {
type = lib.types.str;
example = "/dev/disk/by-id/ata-Samsung_SSD_850_EVO_250GB_S21PNXAGB12345";
};
};
config.disko.devices = {
disk = {
main = {
type = "disk";
device = config.clan.diskLayouts.singleDiskExt4.device;
content = {
type = "gpt";
partitions = {
boot = {
size = "1M";
type = "EF02"; # for grub MBR
};
ESP = {
size = "512M";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
};
};
root = {
size = "100%";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
};
};
};
};
};
};
};
}

View File

@@ -1,12 +0,0 @@
{ self, lib, ... }: {
flake.clanModules = {
diskLayouts = lib.mapAttrs'
(name: _: lib.nameValuePair (lib.removeSuffix ".nix" name) {
imports = [
self.inputs.disko.nixosModules.disko
./diskLayouts/${name}
];
})
(builtins.readDir ./diskLayouts);
};
}

View File

@@ -1,69 +0,0 @@
# cLAN config
`clan config` allows you to manage your nixos configuration via the terminal.
Similar as how `git config` reads and sets git options, `clan config` does the same with your nixos options
It also supports auto completion making it easy to find the right options.
## Set up clan-config
Add the clan tool to your flake inputs:
```
clan.url = "git+https://git.clan.lol/clan/clan-core";
```
and inside the mkFlake:
```
imports = [
inputs.clan.flakeModules.clan-config
];
```
Add an empty config file and add it to git
```command
echo "{}" > ./clan-settings.json
git add ./clan-settings.json
```
Import the clan-config module into your nixos configuration:
```nix
{
imports = [
# clan-settings.json is located in the same directory as your flake.
# Adapt the path if necessary.
(builtins.fromJSON (builtins.readFile ./clan-settings.json))
];
}
```
Make sure your nixos configuration is set a default
```nix
{self, ...}: {
flake.nixosConfigurations.default = self.nixosConfigurations.my-machine;
}
```
Use all inputs provided by the clan-config devShell in your own devShell:
```nix
{ ... }: {
perSystem = { pkgs, self', ... }: {
devShells.default = pkgs.mkShell {
inputsFrom = [ self'.devShells.clan-config ];
# ...
};
};
}
```
re-load your dev-shell to make the clan tool available.
```command
clan config --help
```

View File

@@ -1,153 +0,0 @@
# Initializing a New Clan Project
## Create a new Clan flake
1. To start a new project, execute the following command to add the clan cli to your shell:
```shellSession
$ nix shell git+https://git.clan.lol/clan/clan-core
```
2. Then use the following commands to initialize a new clan-flake:
```shellSession
$ clan flake create my-clan
```
This action will generate two primary files: `flake.nix` and `.clan-flake`.
```shellSession
$ ls -la
drwx------ joerg users 5 B a minute ago ./
drwxrwxrwt root root 139 B 12 seconds ago ../
.rw-r--r-- joerg users 77 B a minute ago .clan-flake
.rw-r--r-- joerg users 4.8 KB a minute ago flake.lock
.rw-r--r-- joerg users 242 B a minute ago flake.nix
```
### Understanding the .clan-flake Marker File
The `.clan-flake` marker file serves an optional purpose: it helps the `clan-cli` utility locate the project's root directory.
If `.clan-flake` is missing, `clan-cli` will instead search for other indicators like `.git`, `.hg`, `.svn`, or `flake.nix` to identify the project root.
## Add your first machine
```shellSession
$ clan machines create my-machine
$ clan machines list
my-machine
```
## Configure your machine
In this example we crate a user named `my-user` that is allowed to login to the machine
```shellSession
# create a new user
$ clan config --machine my-machine users.users.my-user.isNormalUser true
# set some password
$ clan config --machine my-machine users.users.my-user.hashedPassword $(mkpasswd)
```
## Test your machine config inside a VM
```shellSession
$ nix build .#nixosConfigurations.my-machine.config.system.build.vm
...
$ ./result/bin/run-nixos-vm
```
---
# Migrating Existing NixOS Configuration Flake
Absolutely, let's break down the migration step by step, explaining each action in detail:
#### Before You Begin
1. **Backup Your Current Configuration**: Always start by making a backup of your current NixOS configuration to ensure you can revert if needed.
```shellSession
$ cp -r /etc/nixos ~/nixos-backup
```
2. **Update Flake Inputs**: Add a new input for the `clan-core` dependency:
```nix
inputs.clan-core = {
url = "git+https://git.clan.lol/clan/clan-core";
# Don't do this if your machines are on nixpkgs stable.
inputs.nixpkgs.follows = "nixpkgs";
};
```
- `url`: Specifies the Git repository URL for Clan Core.
- `inputs.nixpkgs.follows`: Tells Nix to use the same `nixpkgs` input as your main input (in this case, it follows `nixpkgs`).
3. **Update Outputs**: Then modify the `outputs` section of your `flake.nix` to adapt to Clan Core's new provisioning method. The key changes are as follows:
Add `clan-core` to the output
```diff
- outputs = { self, nixpkgs, }:
+ outputs = { self, nixpkgs, clan-core }:
```
Previous configuration:
```nix
{
nixosConfigurations.example-desktop = nixpkgs.lib.nixosSystem {
system = "x86_64-linux";
modules = [
./configuration.nix
];
[...]
};
}
```
After change:
```nix
let clan = clan-core.lib.buildClan {
# this needs to point at the repository root
directory = self;
specialArgs = {};
machines = {
example-desktop = {
nixpkgs.hostPlatform = "x86_64-linux";
imports = [
./configuration.nix
];
};
};
};
in { inherit (clan) nixosConfigurations clanInternals; }
```
- `nixosConfigurations`: Defines NixOS configurations, using Clan Cores `buildClan` function to manage the machines.
- Inside `machines`, a new machine configuration is defined (in this case, `example-desktop`).
- Inside `example-desktop` which is the target machine hostname, `nixpkgs.hostPlatform` specifies the host platform as `x86_64-linux`.
- `clanInternals`: Is required to enable evaluation of the secret generation/upload script on every architecture
4. **Rebuild and Switch**: Rebuild your NixOS configuration using the updated flake:
```shellSession
$ sudo nixos-rebuild switch --flake .
```
- This command rebuilds and switches to the new configuration. Make sure to include the `--flake .` argument to use the current directory as the flake source.
5. **Test Configuration**: Before rebooting, verify that your new configuration builds without errors or warnings.
6. **Reboot**: If everything is fine, you can reboot your system to apply the changes:
```shellSession
$ sudo reboot
```
7. **Verify**: After the reboot, confirm that your system is running with the new configuration, and all services and applications are functioning as expected.
By following these steps, you've successfully migrated your NixOS Flake configuration to include the `clan-core` input and adapted the `outputs` section to work with Clan Core's new machine provisioning method.

View File

@@ -1,173 +0,0 @@
# Managing Secrets with Clan
Clan enables encryption of secrets within a Clan flake, ensuring secure sharing among users.
This documentation will guide you through managing secrets with the Clan CLI,
which utilizes the [sops](https://github.com/getsops/sops) format and
integrates with [sops-nix](https://github.com/Mic92/sops-nix) on NixOS machines.
## 1. Generating Keys and Creating Secrets
To begin, generate a key pair:
```shellSession
$ clan secrets key generate
```
**Output**:
```
Public key: age1wkth7uhpkl555g40t8hjsysr20drq286netu8zptw50lmqz7j95sw2t3l7
Generated age private key at '/home/joerg/.config/sops/age/keys.txt' for your user.
Generated age private key at '/home/joerg/.config/sops/age/keys.txt' for your user. Please back it up on a secure location or you will lose access to your secrets.
Also add your age public key to the repository with 'clan secrets users add youruser age1wkth7uhpkl555g40t8hjsysr20drq286netu8zptw50lmqz7j95sw2t3l7' (replace you
user with your user name)
```
⚠️ **Important**: Backup the generated private key securely, or risk losing access to your secrets.
Next, add your public key to the Clan flake repository:
```shellSession
$ clan secrets users add <your_username> <your_public_key>
```
Doing so creates this structure in your Clan flake:
```
sops/
└── users/
└── <your_username>/
└── key.json
```
Now, to set your first secret:
```shellSession
$ clan secrets set mysecret
Paste your secret:
```
Note: As you type your secret, keypresses won't be displayed. Press Enter to save the secret.
Retrieve the stored secret:
```shellSession
$ clan secrets get mysecret
```
And list all secrets like this:
```shellSession
$ clan secrets list
```
Secrets in the repository follow this structure:
```
sops/
├── secrets/
│ └── <secret_name>/
│ ├── secret
│ └── users/
│ └── <your_username>/
```
The content of the secret is stored encrypted inside the `secret` file under `mysecret`.
By default, secrets are encrypted with your key to ensure readability.
## 2. Adding Machine Keys
New machines in Clan come with age keys stored in `./sops/machines/<machine_name>`. To list these machines:
```shellSession
$ clan secrets machines list
```
For existing machines, add their keys:
```shellSession
$ clan secrets machines add <machine_name> <age_key>
```
To fetch an age key from an SSH host key:
```shellSession
$ ssh-keyscan <domain_name> | nix shell nixpkgs#ssh-to-age -c ssh-to-age
```
## 3. Assigning Access
By default, secrets are encrypted for your key. To specify which users and machines can access a secret:
```shellSession
$ clan secrets set --machine <machine1> --machine <machine2> --user <user1> --user <user2> <secret_name>
```
You can add machines/users to existing secrets without modifying the secret:
```shellSession
$ clan secrets machines add-secret <machine_name> <secret_name>
```
## 4. Utilizing Groups
For convenience, Clan CLI allows group creation to simplify access management. Here's how:
1. **Creating Groups**:
Assign users to a new group, e.g., `admins`:
```shellSession
$ clan secrets groups add admins <username>
```
2. **Listing Groups**:
```shellSession
$ clan secrets groups list
```
3. **Assigning Secrets to Groups**:
```shellSession
$ clan secrets groups add-secret <group_name> <secret_name>
```
# NixOS integration
A NixOS machine will automatically import all secrets that are encrypted for the
current machine. At runtime it will use the host key to decrypt all secrets into
a in-memory, non-persistent filesystem using
[sops-nix](https://github.com/Mic92/sops-nix). In your nixos configuration you
can get a path to secrets like this `config.sops.secrets.<name>.path`. Example:
```nix
{ config, ...}: {
sops.secrets.my-password.neededForUsers = true;
users.users.mic92 = {
isNormalUser = true;
passwordFile = config.sops.secrets.my-password.path;
};
}
```
See the [readme](https://github.com/Mic92/sops-nix) of sops-nix for more
examples.
# Importing existing sops-based keys / sops-nix
`clan secrets` stores each secrets in a single file, whereas [sops](https://github.com/Mic92/sops-nix)
commonly allows to put all secrets in a yaml or json documents.
If you already happend to use sops-nix, you can migrate by using the `clan secrets import-sops` command by importing these documents:
```shellSession
% clan secrets import-sops --prefix matchbox- --group admins --machine matchbox nixos/matchbox/secrets/secrets.yaml
```
This will create secrets for each secret found in `nixos/matchbox/secrets/secrets.yaml` in a ./sops folder of your repository.
Each member of the group `admins` will be able
Since our clan secret module will auto-import secrets that are encrypted for a particular nixos machine,
you can now remove `sops.secrets.<secrets> = { };` unless you need to specify more options for the secret like owner/group of the secret file.

View File

@@ -1,43 +0,0 @@
# Self Hosting
## General Description
Self-hosting refers to the practice of hosting and maintaining servers, networks, storage, services, and other types of infrastructure by oneself rather than relying on a third-party vendor. This could involve running a server from a home or business location, or leasing a dedicated server at a data center.
There are several reasons for choosing to self-host. These can include:
1. Cost savings: Over time, self-hosting can be more cost-effective, especially for businesses with large scale needs.
1. Control: Self-hosting provides a greater level of control over the infrastructure and services. It allows the owner to customize the system to their specific needs.
1. Privacy and security: Self-hosting can offer improved privacy and security because data remains under the control of the host rather than being stored on third-party servers.
1. Independent: Being independent of third-party services can ensure that one's websites, applications, or services remain up even if the third-party service goes down.
## Stories
### Story 1: Private mumble server hosted at home
Alice wants to self-host a mumble server for her family.
- She visits to the cLAN website, and follows the instructions on how to install cLAN-OS on her server.
- Alice logs into a terminal on her server via SSH (alternatively uses cLAN GUI app)
- Using the cLAN CLI or GUI tool, alice creates a new private network for her family (VPN)
- Alice now browses a list of curated cLAN modules and finds a module for mumble.
- She adds this module to her network using the cLAN tool.
- After that, she uses the clan tool to invite her family members to her network
- Other family members join the private network via the invitation.
- By accepting the invitation, other members automatically install all required software to interact with the network on their machine.
### Story 2: Adding a service to an existing network
Alice wants to add a photos app to her private network
- She uses the clan CLI or GUI tool to manage her existing private cLAN family network
- She discovers a module for photoprism, and adds it to her server using the tool
- Other members who are already part of her network, will receive a notification that an update is required to their environment
- After accepting, all new software and services to interact with the new photoprism service will be installed automatically.
## Challenges
...

View File

@@ -1,37 +0,0 @@
# Joining a cLAN network
## General Description
Joining a self-hosted infrastructure involves connecting to a network, server, or system that is privately owned and managed, instead of being hosted by a third-party service provider. This could be a business's internal server, a private cloud setup, or any other private IT infrastructure that is not publicly accessible or controlled by outside entities.
## Stories
### Story 1: Joining a private network
Alice' son Bob has never heard of cLAN, but receives an invitation URL from Alice who already set up private cLAN network for her family.
Bob opens the invitation link and lands on the cLAN website. He quickly learns about what cLAN is and can see that the invitation is for a private network of his family that hosts a number of services, like a private voice chat and a photo sharing platform.
Bob decides to join the network and follows the instructions to install the cLAN tool on his computer.
Feeding the invitation link to the cLAN tool, bob registers his machine with the network.
All programs required to interact with the network will be installed and configured automatically and securely.
Optionally, bob can customize the configuration of these programs through a simplified configuration interface.
### Story 2: Receiving breaking changes
The cLAN family network which Bob is part of received an update.
The existing photo sharing service has been removed and replaced with another alternative service. The new photo sharing service requires a different client app to view and upload photos.
Bob accepts the update. Now his environment will be updated. The old client software will be removed and the new one installed.
Because Bob has customized the previous photo viewing app, he is notified that this customization is no longer valid, as the software has been removed (deprecation message).l
Optionally, Bob can now customize the new photo viewing software through his cLAN configuration app or via a config file.
## Challenges
...

View File

@@ -1,25 +0,0 @@
# cLAN module maintaining
## General Description
cLAN modules are pieces of software that can be used by admins to build a private or public infrastructure.
cLAN modules should have the following properties:
1. Documented: It should be clear what the module does and how to use it.
1. Self contained: A module should be usable as is. If it requires any other software or settings, those should be delivered with the module itself.
1. Simple to deploy and use: Modules should have opinionated defaults that just work. Any customization should be optional
## Stories
### Story 1: Maintaining a shared folder module
Alice maintains a module for a shared folder service that she uses in her own infra, but also publishes for the community.
By following clan module standards (Backups, Interfaces, Output schema, etc), other community members have an easy time re-using the module within their own infra.
She benefits from publishing the module, because other community members start using it and help to maintain it.
## Challenges
...

View File

@@ -1,17 +0,0 @@
# (TITLE)
## General Description
## Stories
### Story 1: Some Description
Alice...
### Story 2: Some Description
Bob...
## Challenges
...

View File

@@ -1,69 +0,0 @@
# ZeroTier Configuration with NixOS in Clan
This guide provides detailed instructions for configuring
[ZeroTier VPN](https://zerotier.com) within Clan. Follow the
outlined steps to set up a machine as a VPN controller (`<CONTROLLER>`) and to
include a new machine into the VPN.
## 1. Setting Up the VPN Controller
The VPN controller is initially essential for providing configuration to new
peers. Post the address allocation, the controller's continuous operation is not
crucial.
### Instructions:
1. **Designate a Machine**: Label a machine as the VPN controller in the clan,
referred to as `<CONTROLLER>` henceforth in this guide.
2. **Add Configuration**: Input the below configuration to the NixOS
configuration of the controller machine:
```nix
clan.networking.zerotier.controller = {
enable = true;
public = true;
};
```
3. **Update the Controller Machine**: Execute the following:
```console
$ clan machines update <CONTROLLER>
```
Your machine is now operational as the VPN controller.
## 2. Integrating a New Machine to the VPN
To introduce a new machine to the VPN, adhere to the following steps:
### Instructions:
1. **Update Configuration**: On the new machine, incorporate the below to its
configuration, substituting `<CONTROLLER>` with the controller machine name:
```nix
{ config, ... }: {
clan.networking.zerotier.networkId = builtins.readFile (config.clanCore.clanDir + "/machines/<CONTROLLER>/facts/zerotier-network-id");
}
```
2. **Update the New Machine**: Execute:
```console
$ clan machines update <NEW_MACHINE>
```
Replace `<NEW_MACHINE>` with the designated new machine name.
3. **Retrieve the ZeroTier ID**: On the `new_machine`, execute:
```console
$ sudo zerotier-cli info
```
Example Output: `200 info d2c71971db 1.12.1 OFFLINE`, where `d2c71971db` is
the ZeroTier ID.
4. **Authorize the New Machine on Controller**: On the controller machine,
execute:
```console
$ sudo zerotier-members allow <ID>
```
Substitute `<ID>` with the ZeroTier ID obtained previously.
5. **Verify Connection**: On the `new_machine`, re-execute:
```console
$ sudo zerotier-cli info
```
The status should now be "ONLINE" e.g., `200 info 47303517ef 1.12.1 ONLINE`.
Congratulations! The new machine is now part of the VPN, and the ZeroTier
configuration on NixOS within the Clan project is complete.

92
flake.lock generated
View File

@@ -1,25 +1,5 @@
{ {
"nodes": { "nodes": {
"disko": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1696468923,
"narHash": "sha256-qSM7NKgf8LcZ5hjKHZ8ANFI8+LQivvAypbhJHBJmYFM=",
"owner": "nix-community",
"repo": "disko",
"rev": "cde886a1c97ef2399b4f91409db045785020291f",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "disko",
"type": "github"
}
},
"flake-parts": { "flake-parts": {
"inputs": { "inputs": {
"nixpkgs-lib": [ "nixpkgs-lib": [
@@ -60,90 +40,30 @@
"type": "github" "type": "github"
} }
}, },
"nixlib": {
"locked": {
"lastModified": 1693701915,
"narHash": "sha256-waHPLdDYUOHSEtMKKabcKIMhlUOHPOOPQ9UyFeEoovs=",
"owner": "nix-community",
"repo": "nixpkgs.lib",
"rev": "f5af57d3ef9947a70ac86e42695231ac1ad00c25",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nixpkgs.lib",
"type": "github"
}
},
"nixos-generators": {
"inputs": {
"nixlib": "nixlib",
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1696058303,
"narHash": "sha256-eNqKWpF5zG0SrgbbtljFOrRgFgRzCc4++TMFADBMLnc=",
"owner": "nix-community",
"repo": "nixos-generators",
"rev": "150f38bd1e09e20987feacb1b0d5991357532fb5",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nixos-generators",
"type": "github"
}
},
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1696051733, "lastModified": 1697059129,
"narHash": "sha256-fEC8/6wJOWgCSvBjPwMBdaYtp57OUfQd3dJgp0D/It4=", "narHash": "sha256-9NJcFF9CEYPvHJ5ckE8kvINvI84SZZ87PvqMbH6pro0=",
"owner": "Mic92", "owner": "nixos",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "c3bd4f19ef0062d4462444aa413e26c917187ae9", "rev": "5e4c2ada4fcd54b99d56d7bd62f384511a7e2593",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "Mic92", "owner": "nixos",
"ref": "fakeroot", "ref": "nixos-unstable",
"repo": "nixpkgs", "repo": "nixpkgs",
"type": "github" "type": "github"
} }
}, },
"root": { "root": {
"inputs": { "inputs": {
"disko": "disko",
"flake-parts": "flake-parts", "flake-parts": "flake-parts",
"floco": "floco", "floco": "floco",
"nixos-generators": "nixos-generators",
"nixpkgs": "nixpkgs", "nixpkgs": "nixpkgs",
"sops-nix": "sops-nix",
"treefmt-nix": "treefmt-nix" "treefmt-nix": "treefmt-nix"
} }
}, },
"sops-nix": {
"inputs": {
"nixpkgs": [
"sops-nix"
],
"nixpkgs-stable": []
},
"locked": {
"lastModified": 1696734395,
"narHash": "sha256-O/g/wwBqqSS7RQ53bE6Ssf0pXVTCYfN7NnJDhKfggQY=",
"owner": "Mic92",
"repo": "sops-nix",
"rev": "d7380c38d407eaf06d111832f4368ba3486b800e",
"type": "github"
},
"original": {
"owner": "Mic92",
"repo": "sops-nix",
"type": "github"
}
},
"treefmt-nix": { "treefmt-nix": {
"inputs": { "inputs": {
"nixpkgs": [ "nixpkgs": [

View File

@@ -1,22 +1,12 @@
{ {
description = "clan.lol base operating system"; description = "Consulting Website";
nixConfig.extra-substituters = [ "https://cache.clan.lol" ];
nixConfig.extra-trusted-public-keys = [ "cache.clan.lol-1:3KztgSAB5R1M+Dz7vzkBGzXdodizbgLXGXKXlcQLA28=" ];
inputs = { inputs = {
#nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; #nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
# https://github.com/NixOS/nixpkgs/pull/257462 # https://github.com/NixOS/nixpkgs/pull/257462
nixpkgs.url = "github:Mic92/nixpkgs/fakeroot"; nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
floco.url = "github:aakropotkin/floco"; floco.url = "github:aakropotkin/floco";
floco.inputs.nixpkgs.follows = "nixpkgs"; floco.inputs.nixpkgs.follows = "nixpkgs";
disko.url = "github:nix-community/disko";
disko.inputs.nixpkgs.follows = "nixpkgs";
sops-nix.url = "github:Mic92/sops-nix";
sops-nix.inputs.nixpkgs.follows = "sops-nix";
sops-nix.inputs.nixpkgs-stable.follows = "";
nixos-generators.url = "github:nix-community/nixos-generators";
nixos-generators.inputs.nixpkgs.follows = "nixpkgs";
flake-parts.url = "github:hercules-ci/flake-parts"; flake-parts.url = "github:hercules-ci/flake-parts";
flake-parts.inputs.nixpkgs-lib.follows = "nixpkgs"; flake-parts.inputs.nixpkgs-lib.follows = "nixpkgs";
treefmt-nix.url = "github:numtide/treefmt-nix"; treefmt-nix.url = "github:numtide/treefmt-nix";
@@ -34,14 +24,7 @@
./checks/flake-module.nix ./checks/flake-module.nix
./devShell.nix ./devShell.nix
./formatter.nix ./formatter.nix
./templates/flake-module.nix
./clanModules/flake-module.nix
./pkgs/flake-module.nix ./pkgs/flake-module.nix
./lib/flake-module.nix
./nixosModules/flake-module.nix
./nixosModules/clanCore/flake-module.nix
]; ];
}); });
} }

View File

@@ -1,59 +0,0 @@
{ nixpkgs, self, lib }:
{ directory # The directory containing the machines subdirectory
, specialArgs ? { } # Extra arguments to pass to nixosSystem i.e. useful to make self available
, machines ? { } # allows to include machine-specific modules i.e. machines.${name} = { ... }
}:
let
machinesDirs = lib.optionalAttrs (builtins.pathExists "${directory}/machines") (builtins.readDir (directory + /machines));
machineSettings = machineName:
lib.optionalAttrs (builtins.pathExists "${directory}/machines/${machineName}/settings.json")
(builtins.fromJSON
(builtins.readFile (directory + /machines/${machineName}/settings.json)));
# TODO: remove default system once we have a hardware-config mechanism
nixosConfiguration = { system ? "x86_64-linux", name }: nixpkgs.lib.nixosSystem {
modules = [
self.nixosModules.clanCore
(machineSettings name)
(machines.${name} or { })
{
clanCore.machineName = name;
clanCore.clanDir = directory;
nixpkgs.hostPlatform = lib.mkForce system;
}
];
inherit specialArgs;
};
allMachines = machinesDirs // machines;
supportedSystems = [
"x86_64-linux"
"aarch64-linux"
"riscv64-linux"
"x86_64-darwin"
"aarch64-darwin"
];
nixosConfigurations = lib.mapAttrs (name: _: nixosConfiguration { inherit name; }) allMachines;
# This instantiates nixos for each system that we support:
# configPerSystem = <system>.<machine>.nixosConfiguration
# We need this to build nixos secret generators for each system
configsPerSystem = builtins.listToAttrs
(builtins.map
(system: lib.nameValuePair system
(lib.mapAttrs (name: _: nixosConfiguration { inherit name system; }) allMachines))
supportedSystems);
in
{
inherit nixosConfigurations;
clanInternals = {
machines = configsPerSystem;
all-machines-json = lib.mapAttrs
(system: configs: nixpkgs.legacyPackages.${system}.writers.writeJSON "machines.json" (lib.mapAttrs (_: m: m.config.system.clan.deployment.data) configs))
configsPerSystem;
};
}

View File

@@ -1,6 +0,0 @@
{ lib, self, nixpkgs, ... }:
{
jsonschema = import ./jsonschema { inherit lib; };
buildClan = import ./build-clan { inherit lib self nixpkgs; };
}

View File

@@ -1,14 +0,0 @@
{ lib
, inputs
, self
, ...
}: {
imports = [
./jsonschema/flake-module.nix
];
flake.lib = import ./default.nix {
inherit lib;
inherit self;
inherit (inputs) nixpkgs;
};
}

View File

@@ -1,167 +0,0 @@
{ lib ? import <nixpkgs/lib> }:
let
# from nixos type to jsonschema type
typeMap = {
bool = "boolean";
float = "number";
int = "integer";
str = "string";
path = "string"; # TODO add prober path checks
};
# remove _module attribute from options
clean = opts: builtins.removeAttrs opts [ "_module" ];
# throw error if option type is not supported
notSupported = option: throw
"option type '${option.type.name}' ('${option.type.description}') not supported by jsonschema converter";
in
rec {
# parses a nixos module to a jsonschema
parseModule = module:
let
evaled = lib.evalModules {
modules = [ module ];
};
in
parseOptions evaled.options;
# parses a set of evaluated nixos options to a jsonschema
parseOptions = options':
let
options = clean options';
# parse options to jsonschema properties
properties = lib.mapAttrs (_name: option: parseOption option) options;
isRequired = prop: ! (prop ? default || prop.type == "object");
requiredProps = lib.filterAttrs (_: prop: isRequired prop) properties;
required = lib.optionalAttrs (requiredProps != { }) {
required = lib.attrNames requiredProps;
};
in
# return jsonschema
required // {
type = "object";
inherit properties;
};
# parses and evaluated nixos option to a jsonschema property definition
parseOption = option:
let
default = lib.optionalAttrs (option ? default) {
inherit (option) default;
};
description = lib.optionalAttrs (option ? description) {
inherit (option) description;
};
in
# handle nested options (not a submodule)
if ! option ? _type
then parseOptions option
# throw if not an option
else if option._type != "option"
then throw "parseOption: not an option"
# parse nullOr
else if option.type.name == "nullOr"
# return jsonschema property definition for nullOr
then default // description // {
type = [
"null"
(typeMap.${option.type.functor.wrapped.name} or (notSupported option))
];
}
# parse bool
else if option.type.name == "bool"
# return jsonschema property definition for bool
then default // description // {
type = "boolean";
}
# parse float
else if option.type.name == "float"
# return jsonschema property definition for float
then default // description // {
type = "number";
}
# parse int
else if (option.type.name == "int" || option.type.name == "positiveInt")
# return jsonschema property definition for int
then default // description // {
type = "integer";
}
# parse string
else if option.type.name == "str"
# return jsonschema property definition for string
then default // description // {
type = "string";
}
# parse string
else if option.type.name == "path"
# return jsonschema property definition for path
then default // description // {
type = "string";
}
# parse enum
else if option.type.name == "enum"
# return jsonschema property definition for enum
then default // description // {
enum = option.type.functor.payload;
}
# parse listOf submodule
else if option.type.name == "listOf" && option.type.functor.wrapped.name == "submodule"
# return jsonschema property definition for listOf submodule
then default // description // {
type = "array";
items = parseOptions (option.type.functor.wrapped.getSubOptions option.loc);
}
# parse list
else if
(option.type.name == "listOf")
&& (typeMap ? "${option.type.functor.wrapped.name}")
# return jsonschema property definition for list
then default // description // {
type = "array";
items = {
type = typeMap.${option.type.functor.wrapped.name};
};
}
# parse attrsOf submodule
else if option.type.name == "attrsOf" && option.type.nestedTypes.elemType.name == "submodule"
# return jsonschema property definition for attrsOf submodule
then default // description // {
type = "object";
additionalProperties = parseOptions (option.type.nestedTypes.elemType.getSubOptions option.loc);
}
# parse attrs
else if option.type.name == "attrsOf"
# return jsonschema property definition for attrs
then default // description // {
type = "object";
additionalProperties = {
type = typeMap.${option.type.nestedTypes.elemType.name} or (notSupported option);
};
}
# parse submodule
else if option.type.name == "submodule"
# return jsonschema property definition for submodule
# then (lib.attrNames (option.type.getSubOptions option.loc).opt)
then parseOptions (option.type.getSubOptions option.loc)
# throw error if option type is not supported
else notSupported option;
}

View File

@@ -1,14 +0,0 @@
{
"name": "John Doe",
"age": 42,
"isAdmin": false,
"kernelModules": ["usbhid", "usb_storage"],
"userIds": {
"mic92": 1,
"lassulus": 2,
"davhau": 3
},
"services": {
"opt": "this option doesn't make sense"
}
}

View File

@@ -1,51 +0,0 @@
/*
An example nixos module declaring an interface.
*/
{ lib, ... }: {
options = {
# str
name = lib.mkOption {
type = lib.types.str;
default = "John Doe";
description = "The name of the user";
};
# int
age = lib.mkOption {
type = lib.types.int;
default = 42;
description = "The age of the user";
};
# bool
isAdmin = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Is the user an admin?";
};
# a submodule option
services = lib.mkOption {
type = lib.types.submodule {
options.opt = lib.mkOption {
type = lib.types.str;
default = "foo";
description = "A submodule option";
};
};
};
# attrs of int
userIds = lib.mkOption {
type = lib.types.attrsOf lib.types.int;
description = "Some attributes";
default = {
horst = 1;
peter = 2;
albrecht = 3;
};
};
# list of str
kernelModules = lib.mkOption {
type = lib.types.listOf lib.types.str;
default = [ "nvme" "xhci_pci" "ahci" ];
description = "A list of enabled kernel modules";
};
};
}

View File

@@ -1,50 +0,0 @@
{
"type": "object",
"properties": {
"name": {
"type": "string",
"default": "John Doe",
"description": "The name of the user"
},
"age": {
"type": "integer",
"default": 42,
"description": "The age of the user"
},
"isAdmin": {
"type": "boolean",
"default": false,
"description": "Is the user an admin?"
},
"kernelModules": {
"type": "array",
"items": {
"type": "string"
},
"default": ["nvme", "xhci_pci", "ahci"],
"description": "A list of enabled kernel modules"
},
"userIds": {
"type": "object",
"default": {
"horst": 1,
"peter": 2,
"albrecht": 3
},
"additionalProperties": {
"type": "integer"
},
"description": "Some attributes"
},
"services": {
"type": "object",
"properties": {
"opt": {
"type": "string",
"default": "foo",
"description": "A submodule option"
}
}
}
}
}

View File

@@ -1,29 +0,0 @@
{
perSystem = { pkgs, self', ... }: {
checks = {
# check if the `clan config` example jsonschema and data is valid
lib-jsonschema-example-valid = pkgs.runCommand "lib-jsonschema-example-valid" { } ''
echo "Checking that example-schema.json is valid"
${pkgs.check-jsonschema}/bin/check-jsonschema \
--check-metaschema ${./.}/example-schema.json
echo "Checking that example-data.json is valid according to example-schema.json"
${pkgs.check-jsonschema}/bin/check-jsonschema \
--schemafile ${./.}/example-schema.json \
${./.}/example-data.json
touch $out
'';
# check if the `clan config` nix jsonschema converter unit tests succeed
lib-jsonschema-nix-unit-tests = pkgs.runCommand "lib-jsonschema-nix-unit-tests" { } ''
export NIX_PATH=nixpkgs=${pkgs.path}
${self'.packages.nix-unit}/bin/nix-unit \
${./.}/test.nix \
--eval-store $(realpath .)
touch $out
'';
};
};
}

View File

@@ -1,6 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
expr='let pkgs = import <nixpkgs> {}; lib = pkgs.lib; in (pkgs.nixosOptionsDoc {options = (lib.evalModules {modules=[./example-interface.nix];}).options;}).optionsJSON.options'
jq < "$(nix eval --impure --raw --expr "$expr")" > options.json

View File

@@ -1,89 +0,0 @@
{
"age": {
"declarations": [
"/home/grmpf/synced/projects/clan/clan-core/lib/jsonschema/example-interface.nix"
],
"default": {
"_type": "literalExpression",
"text": "42"
},
"description": "The age of the user",
"loc": ["age"],
"readOnly": false,
"type": "signed integer"
},
"isAdmin": {
"declarations": [
"/home/grmpf/synced/projects/clan/clan-core/lib/jsonschema/example-interface.nix"
],
"default": {
"_type": "literalExpression",
"text": "false"
},
"description": "Is the user an admin?",
"loc": ["isAdmin"],
"readOnly": false,
"type": "boolean"
},
"kernelModules": {
"declarations": [
"/home/grmpf/synced/projects/clan/clan-core/lib/jsonschema/example-interface.nix"
],
"default": {
"_type": "literalExpression",
"text": "[\n \"nvme\"\n \"xhci_pci\"\n \"ahci\"\n]"
},
"description": "A list of enabled kernel modules",
"loc": ["kernelModules"],
"readOnly": false,
"type": "list of string"
},
"name": {
"declarations": [
"/home/grmpf/synced/projects/clan/clan-core/lib/jsonschema/example-interface.nix"
],
"default": {
"_type": "literalExpression",
"text": "\"John Doe\""
},
"description": "The name of the user",
"loc": ["name"],
"readOnly": false,
"type": "string"
},
"services": {
"declarations": [
"/home/grmpf/synced/projects/clan/clan-core/lib/jsonschema/example-interface.nix"
],
"description": null,
"loc": ["services"],
"readOnly": false,
"type": "submodule"
},
"services.opt": {
"declarations": [
"/home/grmpf/synced/projects/clan/clan-core/lib/jsonschema/example-interface.nix"
],
"default": {
"_type": "literalExpression",
"text": "\"foo\""
},
"description": "A submodule option",
"loc": ["services", "opt"],
"readOnly": false,
"type": "string"
},
"userIds": {
"declarations": [
"/home/grmpf/synced/projects/clan/clan-core/lib/jsonschema/example-interface.nix"
],
"default": {
"_type": "literalExpression",
"text": "{\n albrecht = 3;\n horst = 1;\n peter = 2;\n}"
},
"description": "Some attributes",
"loc": ["userIds"],
"readOnly": false,
"type": "attribute set of signed integer"
}
}

View File

@@ -1,8 +0,0 @@
# run these tests via `nix-unit ./test.nix`
{ lib ? (import <nixpkgs> { }).lib
, slib ? import ./. { inherit lib; }
}:
{
parseOption = import ./test_parseOption.nix { inherit lib slib; };
parseOptions = import ./test_parseOptions.nix { inherit lib slib; };
}

View File

@@ -1,249 +0,0 @@
# tests for the nixos options to jsonschema converter
# run these tests via `nix-unit ./test.nix`
{ lib ? (import <nixpkgs> { }).lib
, slib ? import ./. { inherit lib; }
}:
let
description = "Test Description";
evalType = type: default:
let
evaledConfig = lib.evalModules {
modules = [{
options.opt = lib.mkOption {
inherit type;
inherit default;
inherit description;
};
}];
};
in
evaledConfig.options.opt;
in
{
testNoDefaultNoDescription =
let
evaledConfig = lib.evalModules {
modules = [{
options.opt = lib.mkOption {
type = lib.types.bool;
};
}];
};
in
{
expr = slib.parseOption evaledConfig.options.opt;
expected = {
type = "boolean";
};
};
testBool =
let
default = false;
in
{
expr = slib.parseOption (evalType lib.types.bool default);
expected = {
type = "boolean";
inherit default description;
};
};
testString =
let
default = "hello";
in
{
expr = slib.parseOption (evalType lib.types.str default);
expected = {
type = "string";
inherit default description;
};
};
testInteger =
let
default = 42;
in
{
expr = slib.parseOption (evalType lib.types.int default);
expected = {
type = "integer";
inherit default description;
};
};
testFloat =
let
default = 42.42;
in
{
expr = slib.parseOption (evalType lib.types.float default);
expected = {
type = "number";
inherit default description;
};
};
testEnum =
let
default = "foo";
values = [ "foo" "bar" "baz" ];
in
{
expr = slib.parseOption (evalType (lib.types.enum values) default);
expected = {
enum = values;
inherit default description;
};
};
testListOfInt =
let
default = [ 1 2 3 ];
in
{
expr = slib.parseOption (evalType (lib.types.listOf lib.types.int) default);
expected = {
type = "array";
items = {
type = "integer";
};
inherit default description;
};
};
testAttrsOfInt =
let
default = { foo = 1; bar = 2; baz = 3; };
in
{
expr = slib.parseOption (evalType (lib.types.attrsOf lib.types.int) default);
expected = {
type = "object";
additionalProperties = {
type = "integer";
};
inherit default description;
};
};
testNullOrBool =
let
default = null; # null is a valid value for this type
in
{
expr = slib.parseOption (evalType (lib.types.nullOr lib.types.bool) default);
expected = {
type = [ "null" "boolean" ];
inherit default description;
};
};
testSubmoduleOption =
let
subModule = {
options.opt = lib.mkOption {
type = lib.types.bool;
default = true;
inherit description;
};
};
in
{
expr = slib.parseOption (evalType (lib.types.submodule subModule) { });
expected = {
type = "object";
properties = {
opt = {
type = "boolean";
default = true;
inherit description;
};
};
};
};
testSubmoduleOptionWithoutDefault =
let
subModule = {
options.opt = lib.mkOption {
type = lib.types.bool;
inherit description;
};
};
in
{
expr = slib.parseOption (evalType (lib.types.submodule subModule) { });
expected = {
type = "object";
properties = {
opt = {
type = "boolean";
inherit description;
};
};
required = [ "opt" ];
};
};
testAttrsOfSubmodule =
let
subModule = {
options.opt = lib.mkOption {
type = lib.types.bool;
default = true;
inherit description;
};
};
default = { foo.opt = false; bar.opt = true; };
in
{
expr = slib.parseOption (evalType (lib.types.attrsOf (lib.types.submodule subModule)) default);
expected = {
type = "object";
additionalProperties = {
type = "object";
properties = {
opt = {
type = "boolean";
default = true;
inherit description;
};
};
};
inherit default description;
};
};
testListOfSubmodule =
let
subModule = {
options.opt = lib.mkOption {
type = lib.types.bool;
default = true;
inherit description;
};
};
default = [{ opt = false; } { opt = true; }];
in
{
expr = slib.parseOption (evalType (lib.types.listOf (lib.types.submodule subModule)) default);
expected = {
type = "array";
items = {
type = "object";
properties = {
opt = {
type = "boolean";
default = true;
inherit description;
};
};
};
inherit default description;
};
};
}

View File

@@ -1,46 +0,0 @@
# tests for the nixos options to jsonschema converter
# run these tests via `nix-unit ./test.nix`
{ lib ? (import <nixpkgs> { }).lib
, slib ? import ./. { inherit lib; }
}:
let
evaledOptions =
let
evaledConfig = lib.evalModules {
modules = [ ./example-interface.nix ];
};
in
evaledConfig.options;
in
{
testParseOptions = {
expr = slib.parseOptions evaledOptions;
expected = builtins.fromJSON (builtins.readFile ./example-schema.json);
};
testParseNestedOptions =
let
evaled = lib.evalModules {
modules = [{
options.foo.bar = lib.mkOption {
type = lib.types.bool;
};
}];
};
in
{
expr = slib.parseOptions evaled.options;
expected = {
properties = {
foo = {
properties = {
bar = { type = "boolean"; };
};
required = [ "bar" ];
type = "object";
};
};
type = "object";
};
};
}

View File

@@ -1,9 +0,0 @@
{ lib, ... }: {
options.clan.bloatware = lib.mkOption {
type = lib.types.submodule {
imports = [
../../../lib/jsonschema/example-interface.nix
];
};
};
}

View File

@@ -1,106 +0,0 @@
{ self, inputs, lib, ... }: {
flake.nixosModules.clanCore = { config, pkgs, options, ... }: {
imports = [
./secrets
./zerotier
./networking.nix
inputs.sops-nix.nixosModules.sops
# just some example options. Can be removed later
./bloatware
./vm.nix
./options.nix
];
options.clanSchema = lib.mkOption {
type = lib.types.attrs;
description = "The json schema for the .clan options namespace";
default = self.lib.jsonschema.parseOptions options.clan;
};
options.clanCore = {
clanDir = lib.mkOption {
type = lib.types.either lib.types.path lib.types.str;
description = ''
the location of the flake repo, used to calculate the location of facts and secrets
'';
};
machineName = lib.mkOption {
type = lib.types.str;
description = ''
the name of the machine
'';
};
clanPkgs = lib.mkOption {
default = self.packages.${pkgs.system};
defaultText = "self.packages.${pkgs.system}";
internal = true;
};
};
options.system.clan = lib.mkOption {
type = lib.types.submodule {
options = {
deployment.data = lib.mkOption {
type = lib.types.attrs;
description = ''
the data to be written to the deployment.json file
'';
};
deployment.file = lib.mkOption {
type = lib.types.path;
description = ''
the location of the deployment.json file
'';
};
deploymentAddress = lib.mkOption {
type = lib.types.str;
description = ''
the address of the deployment server
'';
};
secretsUploadDirectory = lib.mkOption {
type = lib.types.path;
description = ''
the directory on the deployment server where secrets are uploaded
'';
};
uploadSecrets = lib.mkOption {
type = lib.types.path;
description = ''
script to upload secrets to the deployment server
'';
default = "${pkgs.coreutils}/bin/true";
};
generateSecrets = lib.mkOption {
type = lib.types.path;
description = ''
script to generate secrets
'';
default = "${pkgs.coreutils}/bin/true";
};
vm.config = lib.mkOption {
type = lib.types.attrs;
description = ''
the vm config
'';
};
vm.create = lib.mkOption {
type = lib.types.path;
description = ''
json metadata about the vm
'';
};
};
};
description = ''
utility outputs for clan management of this machine
'';
};
# optimization for faster secret generate/upload and machines update
config = {
system.clan.deployment.data = {
inherit (config.system.clan) uploadSecrets generateSecrets;
inherit (config.clan.networking) deploymentAddress;
inherit (config.clanCore) secretsUploadDirectory;
};
system.clan.deployment.file = pkgs.writeText "deployment.json" (builtins.toJSON config.system.clan.deployment.data);
};
};
}

View File

@@ -1,21 +0,0 @@
{ config, lib, ... }:
{
options.clan.networking = {
deploymentAddress = lib.mkOption {
description = ''
The target SSH node for deployment.
By default, the node's attribute name will be used.
If set to null, only local deployment will be supported.
format: user@host:port&SSH_OPTION=SSH_VALUE
examples:
- machine.example.com
- user@machine2.example.com
- root@example.com:2222&IdentityFile=/path/to/private/key
'';
type = lib.types.nullOr lib.types.str;
default = "root@${config.networking.hostName}";
};
};
}

View File

@@ -1,12 +0,0 @@
{ pkgs, options, lib, ... }: {
options.clanCore.optionsNix = lib.mkOption {
type = lib.types.raw;
internal = true;
readOnly = true;
default = (pkgs.nixosOptionsDoc { inherit options; }).optionsNix;
defaultText = "optionsNix";
description = ''
This is to export nixos options used for `clan config`
'';
};
}

View File

@@ -1,121 +0,0 @@
{ config, lib, ... }:
{
options.clanCore.secretStore = lib.mkOption {
type = lib.types.enum [ "sops" "password-store" "custom" ];
default = "sops";
description = ''
method to store secrets
custom can be used to define a custom secret store.
one would have to define system.clan.generateSecrets and system.clan.uploadSecrets
'';
};
options.clanCore.secretsDirectory = lib.mkOption {
type = lib.types.path;
description = ''
The directory where secrets are installed to. This is backend specific.
'';
};
options.clanCore.secretsUploadDirectory = lib.mkOption {
type = lib.types.path;
description = ''
The directory where secrets are uploaded into, This is backend specific.
'';
};
options.clanCore.secretsPrefix = lib.mkOption {
type = lib.types.str;
default = "";
description = ''
Prefix for secrets. This is backend specific.
'';
};
options.clanCore.secrets = lib.mkOption {
default = { };
type = lib.types.attrsOf
(lib.types.submodule (secret: {
options = {
name = lib.mkOption {
type = lib.types.str;
default = secret.config._module.args.name;
description = ''
Namespace of the secret
'';
};
generator = lib.mkOption {
type = lib.types.str;
description = ''
Script to generate the secret.
The script will be called with the following variables:
- facts: path to a directory where facts can be stored
- secrets: path to a directory where secrets can be stored
The script is expected to generate all secrets and facts defined in the module.
'';
};
secrets =
let
config' = config;
in
lib.mkOption {
type = lib.types.attrsOf (lib.types.submodule ({ config, ... }: {
options = {
name = lib.mkOption {
type = lib.types.str;
description = ''
name of the secret
'';
default = config._module.args.name;
};
path = lib.mkOption {
type = lib.types.str;
description = ''
path to a secret which is generated by the generator
'';
default = "${config'.clanCore.secretsDirectory}/${config'.clanCore.secretsPrefix}${config.name}";
};
};
}));
description = ''
path where the secret is located in the filesystem
'';
};
facts = lib.mkOption {
default = { };
type = lib.types.attrsOf (lib.types.submodule (fact: {
options = {
name = lib.mkOption {
type = lib.types.str;
description = ''
name of the fact
'';
default = fact.config._module.args.name;
};
path = lib.mkOption {
type = lib.types.str;
description = ''
path to a fact which is generated by the generator
'';
default = "machines/${config.clanCore.machineName}/facts/${fact.config._module.args.name}";
};
value = lib.mkOption {
defaultText = lib.literalExpression "\${config.clanCore.clanDir}/\${fact.config.path}";
type = lib.types.nullOr lib.types.str;
default =
if builtins.pathExists "${config.clanCore.clanDir}/${fact.config.path}" then
builtins.readFile "${config.clanCore.clanDir}/${fact.config.path}"
else
null;
};
};
}));
};
};
}));
};
imports = [
./sops.nix
./password-store.nix
];
}

View File

@@ -1,116 +0,0 @@
{ config, lib, pkgs, ... }:
let
passwordstoreDir = "\${PASSWORD_STORE_DIR:-$HOME/.password-store}";
in
{
options.clan.password-store.targetDirectory = lib.mkOption {
type = lib.types.path;
default = "/etc/secrets";
description = ''
The directory where the password store is uploaded to.
'';
};
config = lib.mkIf (config.clanCore.secretStore == "password-store") {
clanCore.secretsDirectory = config.clan.password-store.targetDirectory;
clanCore.secretsUploadDirectory = config.clan.password-store.targetDirectory;
system.clan.generateSecrets = lib.mkIf (config.clanCore.secrets != { }) (
pkgs.writeScript "generate-secrets" ''
#!/bin/sh
set -efu
test -d "$CLAN_DIR"
PATH=${lib.makeBinPath [
pkgs.pass
]}:$PATH
# TODO maybe initialize password store if it doesn't exist yet
${lib.foldlAttrs (acc: n: v: ''
${acc}
# ${n}
# if any of the secrets are missing, we regenerate all connected facts/secrets
(if ! (${lib.concatMapStringsSep " && " (x: "test -e ${passwordstoreDir}/machines/${config.clanCore.machineName}/${x.name}.gpg >/dev/null") (lib.attrValues v.secrets)}); then
tmpdir=$(mktemp -d)
trap "rm -rf $tmpdir" EXIT
cd $tmpdir
facts=$(mktemp -d)
trap "rm -rf $facts" EXIT
secrets=$(mktemp -d)
trap "rm -rf $secrets" EXIT
( ${v.generator} )
${lib.concatMapStrings (fact: ''
mkdir -p "$CLAN_DIR"/"$(dirname ${fact.path})"
cp "$facts"/${fact.name} "$CLAN_DIR"/${fact.path}
'') (lib.attrValues v.facts)}
${lib.concatMapStrings (secret: ''
cat "$secrets"/${secret.name} | pass insert -m machines/${config.clanCore.machineName}/${secret.name}
'') (lib.attrValues v.secrets)}
fi)
'') "" config.clanCore.secrets}
''
);
system.clan.uploadSecrets = pkgs.writeScript "upload-secrets" ''
#!/bin/sh
set -efu
umask 0077
PATH=${lib.makeBinPath [
pkgs.pass
pkgs.git
pkgs.findutils
pkgs.rsync
]}:$PATH:${lib.getBin pkgs.openssh}
if test -e ${passwordstoreDir}/.git; then
local_pass_info=$(
git -C ${passwordstoreDir} log -1 --format=%H machines/${config.clanCore.machineName}
# we append a hash for every symlink, otherwise we would miss updates on
# files where the symlink points to
find ${passwordstoreDir}/machines/${config.clanCore.machineName} -type l \
-exec realpath {} + |
sort |
xargs -r -n 1 git -C ${passwordstoreDir} log -1 --format=%H
)
remote_pass_info=$(ssh ${config.clan.networking.deploymentAddress} -- ${lib.escapeShellArg ''
cat ${config.clan.password-store.targetDirectory}/.pass_info || :
''} || :)
if test "$local_pass_info" = "$remote_pass_info"; then
echo secrets already match
exit 23
fi
fi
find ${passwordstoreDir}/machines/${config.clanCore.machineName} -type f -follow ! -name .gpg-id |
while read -r gpg_path; do
rel_name=''${gpg_path#${passwordstoreDir}}
rel_name=''${rel_name%.gpg}
pass_date=$(
if test -e ${passwordstoreDir}/.git; then
git -C ${passwordstoreDir} log -1 --format=%aI "$gpg_path"
fi
)
pass_name=$rel_name
tmp_path="$SECRETS_DIR"/$(basename $rel_name)
mkdir -p "$(dirname "$tmp_path")"
pass show "$pass_name" > "$tmp_path"
if [ -n "$pass_date" ]; then
touch -d "$pass_date" "$tmp_path"
fi
done
if test -n "''${local_pass_info-}"; then
echo "$local_pass_info" > "$SECRETS_DIR"/.pass_info
fi
'';
};
}

View File

@@ -1,59 +0,0 @@
{ config, lib, pkgs, ... }:
let
secretsDir = config.clanCore.clanDir + "/sops/secrets";
groupsDir = config.clanCore.clanDir + "/sops/groups";
# My symlink is in the nixos module detected as a directory also it works in the repl. Is this because of pure evaluation?
containsSymlink = path:
builtins.pathExists path && (builtins.readFileType path == "directory" || builtins.readFileType path == "symlink");
containsMachine = parent: name: type:
type == "directory" && containsSymlink "${parent}/${name}/machines/${config.clanCore.machineName}";
containsMachineOrGroups = name: type:
(containsMachine secretsDir name type) || lib.any (group: type == "directory" && containsSymlink "${secretsDir}/${name}/groups/${group}") groups;
filterDir = filter: dir:
lib.optionalAttrs (builtins.pathExists dir)
(lib.filterAttrs filter (builtins.readDir dir));
groups = builtins.attrNames (filterDir (containsMachine groupsDir) groupsDir);
secrets = filterDir containsMachineOrGroups secretsDir;
in
{
config = lib.mkIf (config.clanCore.secretStore == "sops") {
clanCore.secretsDirectory = "/run/secrets";
clanCore.secretsPrefix = config.clanCore.machineName + "-";
system.clan = lib.mkIf (config.clanCore.secrets != { }) {
generateSecrets = pkgs.writeScript "generate-secrets" ''
#!${pkgs.python3}/bin/python
import json
from clan_cli.secrets.sops_generate import generate_secrets_from_nix
args = json.loads(${builtins.toJSON (builtins.toJSON { machine_name = config.clanCore.machineName; secret_submodules = config.clanCore.secrets; })})
generate_secrets_from_nix(**args)
'';
uploadSecrets = pkgs.writeScript "upload-secrets" ''
#!${pkgs.python3}/bin/python
import json
from clan_cli.secrets.sops_generate import upload_age_key_from_nix
# the second toJSON is needed to escape the string for the python
args = json.loads(${builtins.toJSON (builtins.toJSON { machine_name = config.clanCore.machineName; })})
upload_age_key_from_nix(**args)
'';
};
sops.secrets = builtins.mapAttrs
(name: _: {
sopsFile = config.clanCore.clanDir + "/sops/secrets/${name}/secret";
format = "binary";
})
secrets;
# To get proper error messages about missing secrets we need a dummy secret file that is always present
sops.defaultSopsFile = lib.mkIf config.sops.validateSopsFiles (lib.mkDefault (builtins.toString (pkgs.writeText "dummy.yaml" "")));
sops.age.keyFile = lib.mkIf (builtins.pathExists (config.clanCore.clanDir + "/sops/secrets/${config.clanCore.machineName}-age.key/secret"))
(lib.mkDefault "/var/lib/sops-nix/key.txt");
clanCore.secretsUploadDirectory = lib.mkDefault "/var/lib/sops-nix";
};
}

View File

@@ -1,74 +0,0 @@
{ lib, config, pkgs, options, extendModules, modulesPath, ... }:
let
vmConfig = extendModules {
modules = [
(modulesPath + "/virtualisation/qemu-vm.nix")
{
virtualisation.fileSystems.${config.clanCore.secretsUploadDirectory} = lib.mkForce {
device = "secrets";
fsType = "9p";
neededForBoot = true;
options = [ "trans=virtio" "version=9p2000.L" "cache=loose" ];
};
}
];
};
in
{
options = {
clan.virtualisation = {
cores = lib.mkOption {
type = lib.types.ints.positive;
default = 1;
description = lib.mdDoc ''
Specify the number of cores the guest is permitted to use.
The number can be higher than the available cores on the
host system.
'';
};
memorySize = lib.mkOption {
type = lib.types.ints.positive;
default = 1024;
description = lib.mdDoc ''
The memory size in megabytes of the virtual machine.
'';
};
graphics = lib.mkOption {
type = lib.types.bool;
default = true;
description = lib.mdDoc ''
Whether to run QEMU with a graphics window, or in nographic mode.
Serial console will be enabled on both settings, but this will
change the preferred console.
'';
};
};
};
config = {
system.clan.vm = {
# for clan vm inspect
config = {
inherit (config.clan.virtualisation) cores graphics;
memory_size = config.clan.virtualisation.memorySize;
};
# for clan vm create
create = pkgs.writeText "vm.json" (builtins.toJSON {
initrd = "${vmConfig.config.system.build.initialRamdisk}/${vmConfig.config.system.boot.loader.initrdFile}";
toplevel = vmConfig.config.system.build.toplevel;
regInfo = (pkgs.closureInfo { rootPaths = vmConfig.config.virtualisation.additionalPaths; });
inherit (config.clan.virtualisation) memorySize cores graphics;
generateSecrets = config.system.clan.generateSecrets;
uploadSecrets = config.system.clan.uploadSecrets;
});
};
virtualisation = lib.optionalAttrs (options.virtualisation ? cores) {
memorySize = lib.mkDefault config.clan.virtualisation.memorySize;
graphics = lib.mkDefault config.clan.virtualisation.graphics;
cores = lib.mkDefault config.clan.virtualisation.cores;
};
};
}

View File

@@ -1,122 +0,0 @@
{ config, lib, pkgs, ... }:
let
cfg = config.clan.networking.zerotier;
facts = config.clanCore.secrets.zerotier.facts;
networkConfig = {
authTokens = [
null
];
authorizationEndpoint = "";
capabilities = [ ];
clientId = "";
dns = [ ];
enableBroadcast = true;
id = cfg.networkId;
ipAssignmentPools = [ ];
mtu = 2800;
multicastLimit = 32;
name = "";
uwid = cfg.networkId;
objtype = "network";
private = !cfg.controller.public;
remoteTraceLevel = 0;
remoteTraceTarget = null;
revision = 1;
routes = [ ];
rules = [
{
not = false;
or = false;
type = "ACTION_ACCEPT";
}
];
rulesSource = "";
ssoEnabled = false;
tags = [ ];
v4AssignMode = {
zt = false;
};
v6AssignMode = {
"6plane" = false;
rfc4193 = true;
zt = false;
};
};
in
{
options.clan.networking.zerotier = {
networkId = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = ''
zerotier networking id
'';
};
controller = {
enable = lib.mkEnableOption "turn this machine into the networkcontroller";
public = lib.mkOption {
type = lib.types.bool;
default = false;
description = ''
everyone can join a public network without having the administrator to accept
'';
};
};
};
config = lib.mkMerge [
({
# Override license so that we can build zerotierone without
# having to re-import nixpkgs.
services.zerotierone.package = lib.mkDefault (pkgs.zerotierone.overrideAttrs (_old: { meta = { }; }));
})
(lib.mkIf (cfg.networkId != null) {
systemd.network.networks.zerotier = {
matchConfig.Name = "zt*";
networkConfig = {
LLMNR = true;
LLDP = true;
MulticastDNS = true;
KeepConfiguration = "static";
};
};
networking.firewall.interfaces."zt+".allowedTCPPorts = [ 5353 ]; # mdns
networking.firewall.interfaces."zt+".allowedUDPPorts = [ 5353 ]; # mdns
networking.networkmanager.unmanaged = [ "interface-name:zt*" ];
services.zerotierone = {
enable = true;
joinNetworks = [ cfg.networkId ];
};
})
(lib.mkIf cfg.controller.enable {
# only the controller needs to have the key in the repo, the other clients can be dynamic
# we generate the zerotier code manually for the controller, since it's part of the bootstrap command
clanCore.secrets.zerotier = {
facts.zerotier-network-id = { };
secrets.zerotier-identity-secret = { };
generator = ''
export PATH=${lib.makeBinPath [ config.services.zerotierone.package pkgs.fakeroot ]}
${pkgs.python3.interpreter} ${./generate-network.py} "$facts/zerotier-network-id" "$secrets/zerotier-identity-secret"
'';
};
environment.systemPackages = [ config.clanCore.clanPkgs.zerotier-members ];
})
(lib.mkIf ((config.clanCore.secrets ? zerotier) && (facts.zerotier-network-id.value != null)) {
clan.networking.zerotier.networkId = facts.zerotier-network-id.value;
environment.etc."zerotier/network-id".text = facts.zerotier-network-id.value;
systemd.services.zerotierone.serviceConfig.ExecStartPre = [
"+${pkgs.writeShellScript "init-zerotier" ''
cp ${config.clanCore.secrets.zerotier.secrets.zerotier-identity-secret.path} /var/lib/zerotier-one/identity.secret
mkdir -p /var/lib/zerotier-one/controller.d/network
ln -sfT ${pkgs.writeText "net.json" (builtins.toJSON networkConfig)} /var/lib/zerotier-one/controller.d/network/${cfg.networkId}.json
''}"
];
systemd.services.zerotierone.serviceConfig.ExecStartPost = [
"+${pkgs.writeShellScript "whitelist-controller" ''
${config.clanCore.clanPkgs.zerotier-members}/bin/zerotier-members allow ${builtins.substring 0 10 cfg.networkId}
''}"
];
})
];
}

View File

@@ -1,143 +0,0 @@
import argparse
import contextlib
import json
import socket
import subprocess
import time
import urllib.request
from contextlib import contextmanager
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Any, Iterator, Optional
class ClanError(Exception):
pass
def try_bind_port(port: int) -> bool:
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with tcp, udp:
try:
tcp.bind(("127.0.0.1", port))
udp.bind(("127.0.0.1", port))
return True
except OSError:
return False
def try_connect_port(port: int) -> bool:
sock = socket.socket(socket.AF_INET)
result = sock.connect_ex(("127.0.0.1", port))
sock.close()
return result == 0
def find_free_port() -> Optional[int]:
"""Find an unused localhost port from 1024-65535 and return it."""
with contextlib.closing(socket.socket(type=socket.SOCK_STREAM)) as sock:
sock.bind(("127.0.0.1", 0))
return sock.getsockname()[1]
class ZerotierController:
def __init__(self, port: int, home: Path) -> None:
self.port = port
self.home = home
self.authtoken = (home / "authtoken.secret").read_text()
self.secret = (home / "identity.secret").read_text()
def _http_request(
self,
path: str,
method: str = "GET",
headers: dict[str, str] = {},
data: Optional[dict[str, Any]] = None,
) -> dict[str, Any]:
body = None
headers = headers.copy()
if data is not None:
body = json.dumps(data).encode("ascii")
headers["Content-Type"] = "application/json"
headers["X-ZT1-AUTH"] = self.authtoken
url = f"http://127.0.0.1:{self.port}{path}"
req = urllib.request.Request(url, headers=headers, method=method, data=body)
resp = urllib.request.urlopen(req)
return json.load(resp)
def status(self) -> dict[str, Any]:
return self._http_request("/status")
def create_network(self, data: dict[str, Any] = {}) -> dict[str, Any]:
identity = (self.home / "identity.public").read_text()
node_id = identity.split(":")[0]
return self._http_request(
f"/controller/network/{node_id}______", method="POST", data=data
)
def get_network(self, id: str) -> dict[str, Any]:
return self._http_request(f"/controller/network/{id}")
@contextmanager
def zerotier_controller() -> Iterator[ZerotierController]:
# This check could be racy but it's unlikely in practice
controller_port = find_free_port()
if controller_port is None:
raise ClanError("cannot find a free port for zerotier controller")
with TemporaryDirectory() as d:
tempdir = Path(d)
home = tempdir / "zerotier-one"
home.mkdir()
cmd = [
"fakeroot",
"--",
"zerotier-one",
f"-p{controller_port}",
str(home),
]
with subprocess.Popen(cmd) as p:
try:
print(
f"wait for controller to be started on 127.0.0.1:{controller_port}...",
)
while not try_connect_port(controller_port):
status = p.poll()
if status is not None:
raise ClanError(
f"zerotier-one has been terminated unexpected with {status}"
)
time.sleep(0.1)
print()
yield ZerotierController(controller_port, home)
finally:
p.terminate()
p.wait()
# TODO: allow merging more network configuration here
def create_network() -> dict:
with zerotier_controller() as controller:
network = controller.create_network()
return {
"secret": controller.secret,
"networkid": network["nwid"],
}
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("network_id")
parser.add_argument("identity_secret")
args = parser.parse_args()
zerotier = create_network()
Path(args.network_id).write_text(zerotier["networkid"])
Path(args.identity_secret).write_text(zerotier["secret"])
if __name__ == "__main__":
main()

View File

@@ -1,6 +0,0 @@
{ ... }: {
flake.nixosModules = {
hidden-ssh-announce.imports = [ ./hidden-ssh-announce.nix ];
installer.imports = [ ./installer ];
};
}

View File

@@ -1,55 +0,0 @@
{ config
, lib
, pkgs
, ...
}: {
options.hidden-ssh-announce = {
enable = lib.mkEnableOption "hidden-ssh-announce";
script = lib.mkOption {
type = lib.types.package;
default = pkgs.writers.writeDash "test-output" "echo $1";
description = ''
script to run when the hidden tor service was started and they hostname is known.
takes the hostname as $1
'';
};
};
config = lib.mkIf config.hidden-ssh-announce.enable {
services.openssh.enable = true;
services.tor = {
enable = true;
relay.onionServices.hidden-ssh = {
version = 3;
map = [
{
port = 22;
target.port = 22;
}
];
};
client.enable = true;
};
systemd.services.hidden-ssh-announce = {
description = "announce hidden ssh";
after = [ "tor.service" "network-online.target" ];
wants = [ "tor.service" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
# ${pkgs.tor}/bin/torify
ExecStart = pkgs.writers.writeDash "announce-hidden-service" ''
set -efu
until test -e ${config.services.tor.settings.DataDirectory}/onion/hidden-ssh/hostname; do
echo "still waiting for ${config.services.tor.settings.DataDirectory}/onion/hidden-ssh/hostname"
sleep 1
done
${config.hidden-ssh-announce.script} "$(cat ${config.services.tor.settings.DataDirectory}/onion/hidden-ssh/hostname)"
'';
PrivateTmp = "true";
User = "tor";
Type = "oneshot";
};
};
};
}

View File

@@ -1,82 +0,0 @@
{ lib
, pkgs
, modulesPath
, ...
}: {
systemd.tmpfiles.rules = [
"d /var/shared 0777 root root - -"
];
imports = [
(modulesPath + "/profiles/installation-device.nix")
(modulesPath + "/profiles/all-hardware.nix")
(modulesPath + "/profiles/base.nix")
];
services.openssh.settings.PermitRootLogin = "yes";
system.activationScripts.root-password = ''
mkdir -p /var/shared
${pkgs.pwgen}/bin/pwgen -s 16 1 > /var/shared/root-password
echo "root:$(cat /var/shared/root-password)" | chpasswd
'';
hidden-ssh-announce = {
enable = true;
script = pkgs.writers.writeDash "write-hostname" ''
set -efu
mkdir -p /var/shared
echo "$1" > /var/shared/onion-hostname
${pkgs.jq}/bin/jq -nc \
--arg password "$(cat /var/shared/root-password)" \
--arg address "$(cat /var/shared/onion-hostname)" '{
password: $password, address: $address
}' > /var/shared/login.info
cat /var/shared/login.info |
${pkgs.qrencode}/bin/qrencode -t utf8 -o /var/shared/qrcode.utf8
cat /var/shared/login.info |
${pkgs.qrencode}/bin/qrencode -t png -o /var/shared/qrcode.png
'';
};
services.getty.autologinUser = lib.mkForce "root";
programs.bash.interactiveShellInit = ''
if [ "$(tty)" = "/dev/tty1" ]; then
echo 'waiting for tor to generate the hidden service'
until test -e /var/shared/qrcode.utf8; do echo .; sleep 1; done
cat /var/shared/qrcode.utf8
fi
'';
boot.loader.grub.efiInstallAsRemovable = true;
boot.loader.grub.efiSupport = true;
disko.devices = {
disk = {
stick = {
type = "disk";
device = "/vda";
imageSize = "3G";
content = {
type = "gpt";
partitions = {
boot = {
size = "1M";
type = "EF02"; # for grub MBR
};
ESP = {
size = "100M";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
};
};
root = {
size = "100%";
content = {
type = "filesystem";
format = "ext4";
mountpoint = "/";
};
};
};
};
};
};
};
}

View File

@@ -1,7 +1,7 @@
{ ... }: { { ... }: {
imports = [ imports = [
./clan-cli/flake-module.nix ./clan-cli/flake-module.nix
./installer/flake-module.nix
./ui/flake-module.nix ./ui/flake-module.nix
./theme/flake-module.nix ./theme/flake-module.nix
]; ];
@@ -9,7 +9,7 @@
perSystem = { pkgs, config, ... }: { perSystem = { pkgs, config, ... }: {
packages = { packages = {
tea-create-pr = pkgs.callPackage ./tea-create-pr { }; tea-create-pr = pkgs.callPackage ./tea-create-pr { };
zerotier-members = pkgs.callPackage ./zerotier-members { };
merge-after-ci = pkgs.callPackage ./merge-after-ci { merge-after-ci = pkgs.callPackage ./merge-after-ci {
inherit (config.packages) tea-create-pr; inherit (config.packages) tea-create-pr;
}; };

View File

@@ -1,31 +0,0 @@
{ lib
, buildGoModule
, fetchFromGitHub
,
}:
buildGoModule rec {
pname = "go-ssb";
version = "0.2.1";
src = fetchFromGitHub {
owner = "ssbc";
repo = "go-ssb";
#rev = "v${version}";
rev = "d6db27d1852d5edff9c7e07d2a3419fe6b11a8db";
hash = "sha256-SewaIDNVrODWGxdvJjIg4oTdfGy8THNMlgv48KX8okE=";
};
vendorHash = "sha256-ZytuWFre7Cz6Qt01tLQoPEuNzDIyoC938OkdIrU8nZo=";
ldflags = [ "-s" "-w" ];
# take very long
doCheck = false;
meta = with lib; {
description = "Go implementation of ssb (work in progress)";
homepage = "https://github.com/ssbc/go-ssb";
license = licenses.mit;
maintainers = with maintainers; [ ];
};
}

View File

@@ -1,19 +0,0 @@
{ self, lib, ... }:
let
installer = lib.nixosSystem {
pkgs = self.inputs.nixpkgs.legacyPackages.x86_64-linux;
system = "x86_64-linux";
modules = [
self.nixosModules.installer
self.nixosModules.hidden-ssh-announce
self.inputs.nixos-generators.nixosModules.all-formats
self.inputs.disko.nixosModules.disko
({ config, ... }: { system.stateVersion = config.system.nixos.version; })
];
};
in
{
flake.packages.x86_64-linux.install-iso = self.inputs.disko.lib.makeDiskImages { nixosConfig = installer; };
flake.apps.x86_64-linux.install-vm.program = installer.config.formats.vm.outPath;
flake.apps.x86_64-linux.install-vm-nogui.program = installer.config.formats.vm-nogui.outPath;
}

View File

@@ -1,14 +0,0 @@
{ stdenv, python3, lib }:
stdenv.mkDerivation {
name = "zerotier-members";
src = ./.;
buildInputs = [ python3 ];
installPhase = ''
install -Dm755 ${./zerotier-members.py} $out/bin/zerotier-members
'';
meta = with lib; {
description = "A tool to list/allow members of a ZeroTier network";
license = licenses.mit;
};
}

View File

@@ -1,78 +0,0 @@
#!/usr/bin/env python
import argparse
import http.client
import json
import sys
from pathlib import Path
ZEROTIER_STATE_DIR = Path("/var/lib/zerotier-one")
class ClanError(Exception):
pass
# this is managed by the nixos module
def get_network_id() -> str:
p = Path("/etc/zerotier/network-id")
if not p.exists():
raise ClanError(
f"{p} file not found. Have you enabled the zerotier controller on this host?"
)
return p.read_text().strip()
def allow_member(args: argparse.Namespace) -> None:
member_id = args.member_id
network_id = get_network_id()
token = ZEROTIER_STATE_DIR.joinpath("authtoken.secret").read_text()
conn = http.client.HTTPConnection("localhost", 9993)
conn.request(
"POST",
f"/controller/network/{network_id}/member/{member_id}",
'{"authorized": true}',
{"X-ZT1-AUTH": token},
)
resp = conn.getresponse()
if resp.status != 200:
raise ClanError(
f"the zerotier daemon returned this error: {resp.status} {resp.reason}"
)
print(resp.status, resp.reason)
def list_members(args: argparse.Namespace) -> None:
network_id = get_network_id()
networks = ZEROTIER_STATE_DIR / "controller.d" / "network" / network_id / "member"
if not networks.exists():
return
for member in networks.iterdir():
with member.open() as f:
data = json.load(f)
try:
member_id = data["id"]
except KeyError:
raise ClanError(f"error: {member} does not contain an id")
print(member_id)
def main() -> None:
parser = argparse.ArgumentParser()
subparser = parser.add_subparsers(dest="command")
parser_allow = subparser.add_parser("allow", help="Allow a member to join")
parser_allow.add_argument("member_id")
parser_allow.set_defaults(func=allow_member)
parser_list = subparser.add_parser("list", help="List members")
parser_list.set_defaults(func=list_members)
args = parser.parse_args()
try:
args.func(args)
except ClanError as e:
print(e)
sys.exit(1)
if __name__ == "__main__":
main()

View File

@@ -1,9 +0,0 @@
{ self, ... }: {
flake.templates = {
new-clan = {
description = "Initialize a new clan flake";
path = ./new-clan;
};
default = self.templates.new-clan;
};
}

View File

@@ -1,2 +0,0 @@
# DO NOT DELETE
# This file is used by the clan cli to discover a clan flake

View File

@@ -1,24 +0,0 @@
{
description = "<Put your description here>";
inputs.clan-core.url = "git+https://git.clan.lol/clan/clan-core";
outputs = { self, clan-core, ... }:
let
system = "x86_64-linux";
pkgs = clan-core.inputs.nixpkgs.legacyPackages.${system};
clan = clan-core.lib.buildClan {
directory = self;
};
in
{
# all machines managed by cLAN
inherit (clan) nixosConfigurations clanInternals;
# add the cLAN cli tool to the dev shell
devShells.${system}.default = pkgs.mkShell {
packages = [
clan-core.packages.${system}.clan-cli
];
};
};
}