Merge commit '10025f9fa5503865918cbae2af5366afe7fd7c54' as 'components/mycelium'

This commit is contained in:
2025-08-16 21:12:34 +02:00
132 changed files with 50951 additions and 0 deletions

View File

@@ -0,0 +1,31 @@
# To get started with Dependabot version updates, you'll need to specify which
# package ecosystems to update and where the package manifests are located.
# Please see the documentation for all configuration options:
# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
version: 2
updates:
- package-ecosystem: "cargo" # See documentation for possible values
directory: "/" # Location of package manifests
schedule:
interval: "weekly"
groups:
mycelium:
patterns:
- "*"
- package-ecosystem: "cargo" # See documentation for possible values
directory: "/myceliumd" # Location of package manifests
schedule:
interval: "weekly"
groups:
myceliumd:
patterns:
- "*"
- package-ecosystem: "cargo" # See documentation for possible values
directory: "/myceliumd-private" # Location of package manifests
schedule:
interval: "weekly"
groups:
myceliumd-private:
patterns:
- "*"

View File

@@ -0,0 +1,133 @@
name: ci
on:
push:
branches: [ master ]
pull_request:
branches: [ master ]
env:
CARGO_TERM_COLOR: always
jobs:
check_fmt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@nightly
with:
components: rustfmt
- uses: clechasseur/rs-fmt-check@v2
clippy:
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
runs-on: ${{ matrix.os }}
steps:
- name: Set windows VCPKG_ROOT env variable
run: echo "VCPKG_ROOT=$env:VCPKG_INSTALLATION_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append
if: runner.os == 'Windows'
- name: Install windows openssl
run: vcpkg install openssl:x64-windows-static-md
if: runner.os == 'Windows'
- uses: actions/checkout@v4
- name: Run Clippy
run: cargo clippy --all-features -- -Dwarnings
check_library:
strategy:
matrix:
os: [ubuntu-latest, macos-latest,windows-latest]
runs-on: ${{ matrix.os }}
steps:
- name: Set windows VCPKG_ROOT env variable
run: echo "VCPKG_ROOT=$env:VCPKG_INSTALLATION_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append
if: runner.os == 'Windows'
- name: Install windows openssl
run: vcpkg install openssl:x64-windows-static-md
if: runner.os == 'Windows'
- uses: actions/checkout@v4
- name: Build
run: cargo build -p mycelium --all-features --verbose
- name: Run tests
run: cargo test -p mycelium --all-features --verbose
check_ios_library:
runs-on: macos-latest
steps:
- uses: actions/checkout@v4
- name: install ios target
run: rustup target add aarch64-apple-ios
- name: Cache cargo
uses: actions/cache@v3
with:
path: ~/.cargo/registry
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
- name: Build
run: cargo build --target aarch64-apple-ios
working-directory: mobile
check_android_library:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: install android target
run: rustup target add aarch64-linux-android
- name: Setup Java
uses: actions/setup-java@v2
with:
distribution: 'adopt'
java-version: '17'
- name: Set up Android NDK
uses: android-actions/setup-android@v3
- name: Accept Android Licenses
run: yes | sdkmanager --licenses || true
- name: Cache cargo
uses: actions/cache@v3
with:
path: ~/.cargo/registry
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
- name: install cargo NDK
run: cargo install cargo-ndk
- name: Build
run: cargo ndk -t arm64-v8a build
working-directory: mobile
check_binaries:
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
binary: [myceliumd, myceliumd-private]
runs-on: ${{ matrix.os }}
steps:
- name: Set windows VCPKG_ROOT env variable
run: echo "VCPKG_ROOT=$env:VCPKG_INSTALLATION_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append
if: runner.os == 'Windows'
- name: Install windows openssl
run: vcpkg install openssl:x64-windows-static-md
if: runner.os == 'Windows'
- uses: actions/checkout@v4
- name: Change directory to binary
run: cd ${{ matrix.binary }}
- name: Build
run: cargo build --verbose
- name: Run tests
run: cargo test --verbose
- name: Run Clippy
run: cargo clippy --all-features -- -Dwarnings
check_flake:
strategy:
matrix:
os: [ubuntu-latest, macos-latest]
runs-on: ${{ matrix.os }}
permissions:
id-token: "write"
contents: "read"
steps:
- uses: actions/checkout@v4
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/flake-checker-action@main
- name: Run `nix build`
run: nix build .

View File

@@ -0,0 +1,121 @@
name: Release
permissions:
contents: write
on:
push:
tags:
- v[0-9]+.*
jobs:
create-release:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: taiki-e/create-gh-release-action@v1
with:
changelog: CHANGELOG.md
# (required) GitHub token for creating GitHub Releases.
token: ${{ secrets.GITHUB_TOKEN }}
upload-assets-mycelium:
needs: create-release
strategy:
matrix:
include:
- target: aarch64-apple-darwin
os: macos-latest
- target: x86_64-unknown-linux-musl
os: ubuntu-latest
- target: x86_64-apple-darwin
os: macos-latest
- target: aarch64-unknown-linux-musl
os: ubuntu-latest
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v4
- uses: taiki-e/upload-rust-binary-action@v1
with:
# Name of the compiled binary, also name of the non-extension part of the produced file
bin: mycelium
# --target flag value, default is host
target: ${{ matrix.target }}
# Name of the archive when uploaded
archive: $bin-$target
# (required) GitHub token for uploading assets to GitHub Releases.
token: ${{ secrets.GITHUB_TOKEN }}
# Specify manifest since we are in a subdirectory
manifest-path: myceliumd/Cargo.toml
# TODO: Figure out the correct matrix setup to have this in a single action
upload-assets-myceliumd-private:
needs: create-release
strategy:
matrix:
include:
- target: aarch64-apple-darwin
os: macos-latest
- target: x86_64-unknown-linux-musl
os: ubuntu-latest
- target: x86_64-apple-darwin
os: macos-latest
- target: aarch64-unknown-linux-musl
os: ubuntu-latest
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v4
- uses: taiki-e/upload-rust-binary-action@v1
with:
# Name of the compiled binary, also name of the non-extension part of the produced file
bin: mycelium-private
# Set the vendored-openssl flag for provided release builds
features: vendored-openssl
# --target flag value, default is host
target: ${{ matrix.target }}
# Name of the archive when uploaded
archive: $bin-$target
# (required) GitHub token for uploading assets to GitHub Releases.
token: ${{ secrets.GITHUB_TOKEN }}
# Specify manifest since we are in a subdirectory
manifest-path: myceliumd-private/Cargo.toml
build-msi:
needs: create-release
runs-on: windows-latest
steps:
- name: Checkout repo
uses: actions/checkout@v4
- name: Create .exe file
shell: bash
run: cd myceliumd && RUSTFLAGS="-C target-feature=+crt-static" cargo build --release && cd ..
- name: Setup .NET Core SDK
uses: actions/setup-dotnet@v4.0.0
- name: Install WiX Toolset
run: dotnet tool install --global wix
- name: Add WixToolset.UI.wixext extension
run: wix extension add WixToolset.UI.wixext
- name: Download Wintun zip file
run: curl -o wintun.zip https://www.wintun.net/builds/wintun-0.14.1.zip
- name: Unzip Wintun
run: unzip wintun.zip
- name: Move .dll file to myceliumd directory
run: move wintun\bin\amd64\wintun.dll myceliumd
- name: Build MSI package
run: wix build -loc installers\windows\wix\mycelium.en-us.wxl installers\windows\wix\mycelium.wxs -ext WixToolset.UI.wixext -arch x64 -dcl high -out mycelium_installer.msi
- name: Upload MSI artifact
uses: alexellis/upload-assets@0.4.0
env:
GITHUB_TOKEN: ${{ github.token }}
with:
asset_paths: '["mycelium_installer.msi"]'

21
components/mycelium/.gitignore vendored Normal file
View File

@@ -0,0 +1,21 @@
/target
nodeconfig.toml
keys.txt
priv_key.bin
# Profile output
*.profraw
*.profdata
profile.json
# vscode settings, keep these locally
.vscode
# visual studio project stuff
.vs
# wintun.dll, windows tun driver in repo root for windows development
wintun.dll
result/
.idea

View File

@@ -0,0 +1,614 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
### Added
- New log format option `plain`, this option is the same as logfmt, but with colors
always disabled.
## [0.6.1] - 2025-05-14
### Added
- When a route is used which is about to expire, we now send a route request to
try and refresh its duration before it expires.
- We now track when a peer was fist discovered and when we last connected to it.
This info is displayed in the CLI when listing peers.
- We now maintain a cache of recently sent route requests, so we can avoid spamming
peers with duplicate requests.
### Changed
- Only keep a record of retracted routes for 6 seconds instead of 60. We'll track
how this affects the route propagation before removing this altogether.
### Fixed
- Fixed an unsoundness issue in the routing table clone implementation.
- Clear dead peer buffer once peers have been removed from the routing table.
- Properly reply with an address unreachable ICMP when pinging an IP in the local
subnet which does not exist.
- Verify a packet has sufficient TTL to be routed before injecting it, and reply
with a TTL exceeded otherwise. This fixes an issue where packets with a TTL of
1 and 0 originating locally would not result in a proper ICMP reply. This happens
for instance when using `traceroute`.
- Check the local seqno request cache before sending a seqno request to a peer,
to avoid spamming in certain occasions.
- Don't accept packet for a destination if we only have fallback routes for said
destination.
## [0.6.0] - 2025-04-25
This is a breaking change, check the main README file for update info.
### Added
- Json-rpc based API, see the docs for more info.
- Message forwarding to unix sockets if configured.
- Config file support to handle messages, if this is enabled.
### Changed
- Routing has been reworked. We no longer advertise selected subnets (which aren't
our own). Now if a subnet is needed, we perform a route request for that subnet,
memorizing state and responses. The current imlementation expires routes every 5
minutes but does not yet refresh active routes before they expire.
- Before we process a seqno request for a subnet, check the seqno cache to see if
we recently forwarded an entry for it.
- Discard Update TLV's if there are too many in the queue already. This binds memory
usage but can cause nodes with a lot of load to not pick up routes immediately.
## [0.5.7] - 2024-11-31
### Fixed
- Properly set the interface ipv6 MTU on windows.
## [0.5.6] - 2024-10-03
### Fixed
- Fix a panic in the route cleanup task when a peer dies who is the last stored
announcer of a subnet.
## [0.5.5] - 2024-09-27
### Added
- Mycelium-ui, a standalone GUI which exposes (part of) the mycelium API. This
does **not** have a bundled mycelium node, so that needs to be run separately.
### Changed
- Default TUN name on Linux and Windows is now `mycelium`. On MacOS it is now `utun0`.
- TUN interface name validation on MacOS. If the user supplies an invalid or already
taken interface name, an available interface name will be automatically assigned.
### Fixed
- Release flow to create the Windows installer now properly extracts wintun
- Guard against a race condition when a route is deleted which could rarely
trigger a panic and subsequent memory leak.
## [0.5.4] - 2024-08-20
### Added
- Quic protocol can now be disabled using the `--disable-quic` flag
- Mycelium can now be started with a configuration file using `--config-file`.
If no configuration file is supplied, Mycelium will look in a default location
based on the OS. For more information see [README](/README.md#configuration)
- Windows installer for Mycelium. The `.msi` file can be downloaded from the release
assets.
- Added flag to specify how many update workers should be started, which governs
the amount of parallelism used for processing updates.
- Send a seqno request if we receive an unfeasible update for a subnet with no
routes, or if there is no selected route for the subnet.
- New public peers in US, India, and Singapore.
### Changed
- Increased the starting metric of a peer from 50 to 1000.
- Reworked the internals of the routing table, which should reduce memory consumption.
Additionally, it is now possible to apply updates in parallel
- Periodically reduce the allocated size of the seqno cache to avoid wasting some
memory which is not currently used by the cache but still allocated.
- Demote seqno cache warnings about duplicate seqno requests go debug lvl, as it
is valid to send duplicate requests if sufficient time passed.
- Skip route selection after an unfeasible update to a fallback route, as the (now
unfeasible) route won't be selected anyway.
- No longer refresh route timer after an unfeasbile update. This allows routes
which have become unfeasible to gracefully be removed from the routing table
over time.
- Expired routes which aren't selected are now immediately removed from the routing
table.
- Changed how updates are sent to be more performant.
- A triggered update is no longer sent just because a route sequence number got
increased. We do still send the update to peer in the seqno request cache.
- Reduced log level when a route changes next-hop to debug from info.
### Fixed
- When running `mycelium` with a command, a keyfile was loaded (or created, if not
yet present). This was not necessary in that context.
- Limit the amount of time allowed for inbound quic connections to be set up, and
process multiple of them in parallel. This fixes a DOS vector against the quic
listener.
- We now update the source table even if we don't send an update because we are
sure the receiver won't select us as a next-hop anyway.
## [0.5.3] - 2024-06-07
### Added
- On Linux and macOS, a more descriptive error is printed when setting up the tun
device fails because a device with the same name already exists.
- Seqno request cache, to avoid spamming peers with duplicate seqno requests and
to make sure seqno's are forwarded to different peers.
- Added myceliumd-private binary, which contains private network functionality.
- Added API endpoint to retrieve the public key associated with an IP.
- The CLI can now be used to list, remove or add peers (see `mycelium peers --help`)
- The CLI can now be used to list selected and fallback routes (see
`mycelium routes --help`)
### Changed
- We now send seqno requests to all peers who advertised a subnet if the selected
route to it is lost as a result of the next-hop dying, or and update coming in
which causes no routes to be feasible anymore.
- Switched from the log to the tracing ecosystem.
- Only do the periodic route announcement every 5 minutes instead of every minute.
- Mycelium binary is no longer part of the workspace, and no longer contains private
network functionality.
- If a packet received from a peer can't be forwarded to the router, terminate the
connection to the peer.
### Fixed
- Manually implement Hash for Subnet, previously we could potentially have multiple
distinct entries in the source table for the same source key.
## [0.5.2] - 2024-05-03
### Added
- New CI workflow to build and test the mycelium library separately from the full
provided binary build.
### Changed
- Disabled the protobuf feature on prometheus, this removes protobuf related code
and significantly reduces the release binary size.
- Changed log level when sending a protocol message to a peer which is no longer
alive from error to trace in most instances.
- Improved performance of sending protocol messages to peers by queueing up multiple
packets at once (if multiple are ready).
- Before trying to send an update we now check if it makes sense to do so.
- If a peer died, fallback routes using it are no longer retained with an infinite
metric but removed immediately.
- No longer run route selection for subnets if a peer died and the route is not
selected.
- If routes are removed, shrink the capacity of the route list in the route table
if it is larger than required.
- Check if the originator of a TLV is still available before processing said TLV.
- The router now uses a dedicated task per TLV type to handle received TLV's from
peers.
- Statically linking openssl is now a feature flag when building yourself.
### Fixed
- If a peer died, unselect the selected routes which have it as next-hop if there
is no other feasible route.
- Properly unselect a route if a retraction update comes in and there is no other
feasible route.
- If the router bumps it's seqno it now properly announces the local route to it's
peers instead of the selected routes
- Seqno bump requests for advertised local routes now properly bump the router
seqno.
## [0.5.1] - 2024-04-19
### Added
- The repo is now a workspace, and pure library code is separated out. This is mainly
done to make it easier to develop implementations on different platforms.
- Link local discovery will now send discovery beacons on every interface the process
listens on for remote beacons.
- Experimental private network support. See [the private network docs](/docs/private_network.md)
for more info.
- You can now optionally expose Prometheus compatible metrics about the system by
setting the --metrics-api-address flag.
- On Linux, you can now set an optional firewall mark by setting the --firewall-mark
flag.
- Added a nix flake to the repo.
### Changed
- We no longer create an outbound connection to a link local discovered IP if that
IP is already known (usually as inbound address) with potentially a different
port.
## [0.5.0] - 2024-04-04
### Changed
- Connection identifier is now included in the error log if we can't forward a
seqno request.
- Garbage collection time for source entries has been increased from 5 to 30 minutes
for now.
- The router implementation has been changed to use regular locks instead of an
always readable concurrency primitive for all but the actual routing table. This
should reduce the memory consumption a bit.
- Public key and shared secret for a destination are now saved on the router, instead
of maintaining a separate mapping for them. This slightly reduces memory consumption
of the router, and ensures stale data is properly cleaned up when all routes to
a subnet are removed.
- Hello packets now set the interval in which the next Hello will be sent properly
in centiseconds.
- IHU packets now set the interval properly in centiseconds.
- IHU packets now set an RX cost. For now this is the link cost, in the future
this will be set properly.
- Route expiration time is now calculated from the interval received in updates.
- Ip address derivation from public keys now uses the blake3 hash algorithm.
### Fixed
- Don't try to forward seqno requests to a peer if we know its connection is dead.
## [0.4.5] - 2024-03-26
### Changed
- Size of data packets is limited to 65535 bytes.
- Update interval is now expressed as centiseconds, in accordance with the babel
RFC.
- Update filters now allow retractions for a route from any router-id.
### Fixed
- The feasibility distance of an existing source key is no longer incorrectly updated
when the metric increases.
- Source key garbage collection timers are properly reset on update even if the
source key itself is not updated.
- Nodes now properly reply to route requests for a static route.
- A retraction is now sent as reply to a route request if the route is not known.
## [0.4.4] - 2024-03-22
### Changed
- The amount of bytes read and written to a peer are now no longer reset after
a reconnect (for outgoing connection).
- Renamed `connectionTxBytes` and `connectionRxBytes` on the peer stats struct
to `txBytes` and `rxBytes` to better express that they are no longer tied to
a single connection to the peer.
### Fixed
- When joining a link local multicast group on an interface returns a
`Address already in use` error, the error is now ignored and the interface is
considered to be joined.
- When sending an update to a peer, the source table is now correctly updated before
the update is sent, instead of doing a batched source table update afterward.
## [0.4.3] - 2024-03-15
### Added
- Feature flag for message subsystem. It is enabled by default, but a user can
make a custom build with `--default-features-false` which completely leaves out
the message related code, should he desire this and have no need for it.
- Link local discovery now periodically checks for new IPv6 enabled interfaces
and also joins the discovery multicast group on them.
- Trace logs are removed from release binaries at compile time, slightly reducing
binary size.
- New `--silent` flag which disables all logging except error logs.
### Changed
- Update GitHub CI action to use latest version of the checkout action.
- Update GitHub CI action to stop using deprecated actions-rs actions.
- Failing to join the link local discovery multicast group now logs as warning
instead of error.
- Failing to join any IPv6 multicast group for link local peer discovery will no
longer disable local peer discovery entirely.
### Fixed
- Add proper validation when receiving an OOB ICMP packet.
## [0.4.2] - 2024-02-28
### Fixed
- Make sure the HTTP API doesn't shut down immediately after startup.
## [0.4.1] - 2024-02-27
### Added
- Admin API
- Ability to see current peers and related info
- Ability to add a new peer
- Ability to remove an existing peer
- List current selected routes
- List current fallback routes
- General node info (for now just the node subnet)
### Changed
- The tokio_unstable config flag is no longer used when building.
- The key file is now created without read permissions for the group/world.
### Removed
- .cargo/config.toml aarch64-linux target specific entries. Cross compilation for
these platforms can use `cross` or entries in the global .cargo/config.toml of
the developer instead.
- Sending SIGUSR1 to the process on unix based systems no longer dumps internal
state, this can be accessed with the admin API instead.
## [0.4.0] - 2024-02-22
### Added
- Support for windows tunnels. While this works, there are no windows
packages yet, so this is still a "developer experience".
- Validation on source IP when sending packets over TUN interface.
### Changed
- Overlay network is now hosted in 400::/7 instead of 200::/7.
- Key file is no longer created if it does not exist when the
inspect command is run.
- Packets with destination outside the global subnet now return
a proper ICMP instead of being silently dropped.
### Fixed
- Log the endpoint when a Quic connection can't be established.
## [0.3.2] - 2024-01-31
### Added
- If the router notices a Peer is dead, the connection is now forcibly terminated.
- Example Systemd file.
### Changed
- The multicast discovery group is now joined from all available
interfaces. This should increase the resilience of local peer
discovery.
- Setup of the node is now done completely in the library.
- Route selection now accounts for the link cost of peers when
considering if it should switch to the new route.
- Hop count of data packets is now decremented on the first
hop as well. As a result the sending node will show up in
traceroute results.
### Fixed
- Inbound peers now replace existing peers in the peer manager. This should fix
an issue where Quic could leave zombie connections.
## [0.3.1] - 2024-01-23
### Added
- You can now check the version of the current binary with the --version flag.
- Bandwidth usage is now tracked per peer.
### Changed
- Prefix decoding is now more resilient to bad prefix lengths.
- The `-k/--key-file` flag is now global, allowing you to specify it for (inspect)
sub commands.
- Log the actual endpoint when we can't connect to it
## [0.3.0] - 2024-01-17
### Added
- Nodes can now explicitly request selected route(s) from connected peers by
sending a Route Request Tlv.
- The router can now inform a Peer that the connection is seemingly
dead. This should improve the reconnect speed on connection types
which can't tell themselves if they died.
### Changed
- Locally discovered peers are now forgotten if we fail to connect to them 3
times.
- Duration between periodic events has been increased, this should
reduce bandwidth when idle to maintain the system.
- Address encoding in update packets is now in-line with address
encoding as described by the babel RFC.
### Fixed
- TLV bodies of unknown type are now properly skipped. Previously, the
calculation of the body size was off by one, causing the connection to
the peer to always die. Now, these packets should be properly ignored.
- We are a bit more active now and no longer sleep for a second when we
need to remove an expired route entry.
## [0.2.3] - 2024-01-04
### Added
- Added automatic release builds for aarch64-linux.
### Changed
- Reduce the Quic keep-alive timeout.
## [0.2.2] - 2024-01-03
### Changed
- Changed default multicast peer discovery port to 9650.
## [0.2.1] - 2024-01-03
### Added
- Experimental Quic transport. The same socket is used for sending and
receiving. This transport is experimental and breaking changes are
possible which won't be covered by semver guarantees for now.
## [0.2.0] - 2023-12-29
### Added
- Link local peer discovery over IPv6. The system will automatically detect peers on the same LAN and try to connect to them.
This makes sure peers on the same network don't needlessly use bandwidth on external "hop" peers.
- Data packets now carry a Hop Limit field as part of the header. Every node decrements this value, and if it is decremented
to zero, the packet is discarded
- Intermediate nodes can now send ICMP packets back to the source in
reply to a dropped packet. This is useful if a hop does not have route
to forward a packet, or the hop count for a packet reaches 0.
- Local node now returns an ICMP Destination unreachable - no route if a
packet is sent on the TUN interface and there is no key for the remote
address (so the user data can't be encrypted).
- Peers connected over IPv4 now incur a higher processing cost, causing
IPv6 connections to be preferred for forwarding data.
- Peer addresses now include a protocol specifier, so multiple underlay
connections can be specified in the future.
### Changed
- The peer manager now tracks sufficient info of each connected peer to
avoid going through the router every time to see if it needs to
reconnect to said peers.
- We don't send the receiver nodes IP address in IHU packets anymore, as the packet is sent over a unicast link.
This is valid per the babel rfc.
- Setting the peers on the CLI now requires specifying the protocol to use.
For now only TCP is supported.
- Change `--port` flag to `--tcp-listen-port` to more accurately reflect
what it controls, as well as the fact that it is only for TCP.
### Removed
- Handshake when connecting to a new peer has been removed.
## [0.1.3] - 2023-11-22
### Added
- Add info log when next hop of a peer changes.
- Add windows builds to CI.
### Changed
- When printing the connected peers, print the underlay IP instead of the overlay IP.
- The link cost of a peer is now the smoothed average. This makes sure a single short latency spike doesn't disrupt routing.
- On Linux, set the TUN ip as /7 and avoid setting a /64 route. This brings it in line with OSX.
- When selecting the best route for a subnet, consider the currently
installed route and only switch if it is significantly better, or
directly connected.
- Increase the static link cost component of a peer. This will increase
the value of a hop in the metric of a route, in turn increasing the
impact of multiple hops on route selection. The route selection will
thus be more inclined to find a path with fewer hops toward a
destination. Ultimately, if multiple paths to a destination exist with
roughly the same latency, they one with fewer hops should be
preferred, since this avoids putting unnecessary pressure on multiple
nodes in the network.
- IHU packets now include the underlay IP instead of the overlay IP.
- When a new peer connects the underlay IP is logged instead of the
overlay IP.
### Fixed
- Ignore retraction updates if the route table has an existing but retracted route already. This fixes
an issue where retracted routes would not be flushed from the routing table.
### Removed
- All uses of the exchanged overlay IP in the peer handshake are fully
removed. Handshake is still performed to stay backwards compatible
until the next breaking release.
## [0.1.2] - 2023-11-21
### Changed
- Allow routes with infinite metric for a subnet to be selected. They will only be selected if no feasible route
with finite metric exists. They are also still ignored when looking up a route to a subnet.
### Fixed
- Don't trigger an update when a route retraction comes in for a subnet where the route is already retracted.
This fixes a potential storm of retraction requests in the network.
## [0.1.1] - 2023-11-21
### Added
- CHANGELOG.md file to keep track of notable additions, changes, fixes, deprecations, and removals.
- A peer can now detect if it is no longer useable in most cases, allowing it to notify the router if it died. This
allows near instant retraction of routes if a connection dies, decreasing the amount of time needed to find a
suitable alternative route.
- Add CHANGELOG.md entries when creating a release.
- When sending SIGUSR1 to the process, the routing table dump will now include a list of the public key derived IP
for every currently known subnet.
- You can now set the name of the TUN interface on the command line with the --tun-name flag.
- Added support for TUN devices on OSX.
### Changed
- When a peer is found to be dead, routes which use it as next-hop now have their metric set to infinity.
If the route is selected, route selection for the subnet is run again and if needed a triggered update is sent.
This will allow downstream peers to receive a timely update informing them of a potentially retracted route,
instead of having to wait for route expiration.
- Account for the link with the peer of a route when performing route selection. This was not the case previously,
and could theoretically lead to a case where a route was selected with a non-optimal path, because the lower metric
was offset by a high link cost of the peer.
### Fixed
- Remove trailing 'e' character from release archive names
## [0.1.0] - 2023-11-15
### Added
- Initial working setup with end-to-end encryption of traffic.
- Ability to specify peers to connect to
- Message subsystem, including API to use it.
- CLI options to send and receive messages with the API.
[0.1.1]: https://github.com/threefoldtech/mycelium/compare/v0.1.0...v0.1.1
[0.1.2]: https://github.com/threefoldtech/mycelium/compare/v0.1.1...v0.1.2
[0.1.3]: https://github.com/threefoldtech/mycelium/compare/v0.1.2...v0.1.3
[0.2.0]: https://github.com/threefoldtech/mycelium/compare/v0.1.3...v0.2.0
[0.2.1]: https://github.com/threefoldtech/mycelium/compare/v0.2.0...v0.2.1
[0.2.2]: https://github.com/threefoldtech/mycelium/compare/v0.2.1...v0.2.2
[0.2.3]: https://github.com/threefoldtech/mycelium/compare/v0.2.2...v0.2.3
[0.3.0]: https://github.com/threefoldtech/mycelium/compare/v0.2.3...v0.3.0
[0.3.1]: https://github.com/threefoldtech/mycelium/compare/v0.3.0...v0.3.1
[0.3.2]: https://github.com/threefoldtech/mycelium/compare/v0.3.1...v0.3.2
[0.4.0]: https://github.com/threefoldtech/mycelium/compare/v0.3.2...v0.4.0
[0.4.1]: https://github.com/threefoldtech/mycelium/compare/v0.4.0...v0.4.1
[0.4.2]: https://github.com/threefoldtech/mycelium/compare/v0.4.1...v0.4.2
[0.4.3]: https://github.com/threefoldtech/mycelium/compare/v0.4.2...v0.4.3
[0.4.4]: https://github.com/threefoldtech/mycelium/compare/v0.4.3...v0.4.4
[0.4.5]: https://github.com/threefoldtech/mycelium/compare/v0.4.4...v0.4.5
[0.5.0]: https://github.com/threefoldtech/mycelium/compare/v0.4.5...v0.5.0
[0.5.1]: https://github.com/threefoldtech/mycelium/compare/v0.5.0...v0.5.1
[0.5.2]: https://github.com/threefoldtech/mycelium/compare/v0.5.1...v0.5.2
[0.5.3]: https://github.com/threefoldtech/mycelium/compare/v0.5.2...v0.5.3
[0.5.4]: https://github.com/threefoldtech/mycelium/compare/v0.5.3...v0.5.4
[0.5.5]: https://github.com/threefoldtech/mycelium/compare/v0.5.4...v0.5.5
[0.5.6]: https://github.com/threefoldtech/mycelium/compare/v0.5.5...v0.5.6
[0.5.7]: https://github.com/threefoldtech/mycelium/compare/v0.5.6...v0.5.7
[0.6.0]: https://github.com/threefoldtech/mycelium/compare/v0.5.7...v0.6.0
[0.6.1]: https://github.com/threefoldtech/mycelium/compare/v0.6.0...v0.6.1
[unreleased]: https://github.com/threefoldtech/mycelium/compare/v0.6.1...HEAD

View File

@@ -0,0 +1,2 @@
# global code owners
* @leesmet

3967
components/mycelium/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,9 @@
[workspace]
members = ["mycelium", "mycelium-metrics", "mycelium-api", "mycelium-cli"]
exclude = ["myceliumd", "myceliumd-private", "mycelium-ui", "mobile"]
resolver = "2"
[profile.release]
lto = "fat"
codegen-units = 1

201
components/mycelium/LICENSE Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright TF Tech NV
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,229 @@
# Mycelium
Mycelium is an IPv6 overlay network written in Rust. Each node that joins the overlay
network will receive an overlay network IP in the 400::/7 range.
## Features
- Mycelium, is locality aware, it will look for the shortest path between nodes
- All traffic between the nodes is end-2-end encrypted
- Traffic can be routed over nodes of friends, location aware
- If a physical link goes down Mycelium will automatically reroute your traffic
- The IP address is IPV6 and linked to private key
- A simple reliable messagebus is implemented on top of Mycelium
- Mycelium has multiple ways how to communicate quic, tcp, ... and we are working on holepunching for Quick which means P2P traffic without middlemen for NATted networks e.g. most homes
- Scalability is very important for us, we tried many overlay networks before and got stuck on all of them, we are trying to design a network which scales to a planetary level
- You can run mycelium without TUN and only use it as reliable message bus.
> We are looking for lots of testers to push the system
> [see here for docs](https://github.com/threefoldtech/mycelium/tree/master/docs)
## Running
> Currently, Linux, macOS and Windows are supported.
### Linux and macOS
Get an useable binary, either by downloading [an artifact from a release](https://github.com/threefoldtech/mycelium/releases),
or by [checking out and building the code yourself](#developing).
### Windows
Download the [mycelium_installer.msi](https://github.com/threefoldtech/mycelium/releases/latest/download/mycelium_installer.msi) and run the installer.
### Run Mycelium
Once you have an useable binary, simply start it. If you want to connect to other
nodes, you can specify their listening address as part of the command (combined
with the protocol they are listening on, usually TCP). Check the next section if
you want to connect to hosted public nodes.
```sh
mycelium --peers tcp://188.40.132.242:9651 quic://185.69.166.8:9651
#other example with other tun interface if utun3 (the default) would already be used
#also here we use sudo e.g. on OSX
sudo mycelium --peers tcp://188.40.132.242:9651 quic://185.69.166.8:9651 --tun-name utun9
```
By default, the node will listen on port `9651`, though this can be overwritten
with the `-p` flag.
To check your own info
```bash
mycelium inspect --json
{
"publicKey": "abd16194646defe7ad2318a0f0a69eb2e3fe939c3b0b51cf0bb88bb8028ecd1d",
"address": "5c4:c176:bf44:b2ab:5e7e:f6a:b7e2:11ca"
}
# test that network works, ping to anyone in the network
ping6 54b:83ab:6cb5:7b38:44ae:cd14:53f3:a907
```
The node uses a `x25519` key pair from which its identity is derived. The private key of this key pair
is saved in a local file (32 bytes in binary format). You can specify the path to this file with the
`-k` flag. By default, the file is saved in the current working directory as `priv_key.bin`.
### Running without TUN interface
It is possible to run the system without creating a TUN interface, by starting with the `--no-tun` flag.
Obviously, this means that your node won't be able to send or receive L3 traffic. There is no interface
to send packets on, and consequently no interface to send received packets out of. From the point of
other nodes, your node will simply drop all incoming L3 traffic destined for it. The node **will still
route traffic** as normal. It takes part in routing, exchanges route info, and forwards packets not
intended for itself.
The node also still allows access to the [message subsystem](#message-system).
## Configuration
Mycelium can be started with an **optional** configuration file using the `--config-file`
option, which offers the same capabilities as the command line arguments.
If no configuration file is specified with `--config-file`, Mycelium will search for one
in a default location based on the operating system:
- Linux: $HOME/.config/mycelium.toml
- Windows: %APPDATA%/ThreeFold Tech/Mycelium/mycelium.toml
- Mac OS: $HOME/Library/Application Support/ThreeFold Tech/Mycelium/mycelium.toml
Command line arguments will override any settings found in the configuration file.
## Hosted public nodes v0.6.x
A couple of public nodes are provided, which can be freely connected to. This allows
anyone to join the global network. These are hosted in multiple geographic regions,
on both IPv4 and IPv6, and supporting both the Tcp and Quic protocols. The nodes
are the following:
| Node ID | Region | IPv4 | IPv6 | Tcp port | Quic port | Mycelium IP |
| ------- | ------- | -------------- | --------------------------------- | -------- | --------- | -------------------------------------- |
| 01 | DE | 188.40.132.242 | 2a01:4f8:221:1e0b::2 | 9651 | 9651 | 54b:83ab:6cb5:7b38:44ae:cd14:53f3:a907 |
| 02 | DE | 136.243.47.186 | 2a01:4f8:212:fa6::2 | 9651 | 9651 | 40a:152c:b85b:9646:5b71:d03a:eb27:2462 |
| 03 | BE | 185.69.166.7 | 2a02:1802:5e:0:ec4:7aff:fe51:e80d | 9651 | 9651 | 597:a4ef:806:b09:6650:cbbf:1b68:cc94 |
| 04 | BE | 185.69.166.8 | 2a02:1802:5e:0:ec4:7aff:fe51:e36b | 9651 | 9651 | 549:8bce:fa45:e001:cbf8:f2e2:2da6:a67c |
| 05 | FI | 65.21.231.58 | 2a01:4f9:6a:1dc5::2 | 9651 | 9651 | 410:2778:53bf:6f41:af28:1b60:d7c0:707a |
| 06 | FI | 65.109.18.113 | 2a01:4f9:5a:1042::2 | 9651 | 9651 | 488:74ac:8a31:277b:9683:c8e:e14f:79a7 |
| 07 | US-EAST | 209.159.146.190 | 2604:a00:50:17b:9e6b:ff:fe1f:e054 | 9651 | 9651 | 4ab:a385:5a4e:ef8f:92e0:1605:7cb6:24b2 |
| 08 | US-WEST | 5.78.122.16 | 2a01:4ff:1f0:8859::1 | 9651 | 9651 | 4de:b695:3859:8234:d04c:5de6:8097:c27c |
| 09 | SG | 5.223.43.251 | 2a01:4ff:2f0:3621::1 | 9651 | 9651 | 5eb:c711:f9ab:eb24:ff26:e392:a115:1c0e |
| 10 | IND | 142.93.217.194 | 2400:6180:100:d0::841:2001 | 9651 | 9651 | 445:465:fe81:1e2b:5420:a029:6b0:9f61 |
These nodes are all interconnected, so 2 peers who each connect to a different node
(or set of disjoint nodes) will still be able to reach each other. For optimal performance,
it is recommended to connect to all of the above at once however. An example connection
string could be:
`--peers tcp://188.40.132.242:9651 "quic://[2a01:4f8:212:fa6::2]:9651" tcp://185.69.166.7:9651 "quic://[2a02:1802:5e:0:ec4:7aff:fe51:e36b]:9651" tcp://65.21.231.58:9651 "quic://[2a01:4f9:5a:1042::2]:9651" "tcp://[2604:a00:50:17b:9e6b:ff:fe1f:e054]:9651" quic://5.78.122.16:9651 "tcp://[2a01:4ff:2f0:3621::1]:9651" quic://142.93.217.194:9651`
It is up to the user to decide which peers he wants to use, over which protocol.
Note that quotation may or may not be required, depending on which shell is being
used. IPv6 addresses should of course only be used if your ISP provides you with
IPv6 connectivity.
### Private network
Mycelium supports running a private network, in which you must know the network name
and a PSK (pre shared key) to connect to nodes in the network. For more info, check
out [the relevant docs](/docs/private_network.md).
## API
The node starts an HTTP API, which by default listens on `localhost:8989`. A different
listening address can be specified on the CLI when starting the system through the
`--api-addr` flag. The API allows access to [send and receive messages](#message-system),
and will later be expanded to allow admin functionality on the system. Note that
message are sent using the identity of the node, and a future admin API can be
used to change the system behavior. As such, care should be taken that this API
is not accessible to unauthorized users.
## Message system
A message system is provided which allows users to send a message, which is essentially just "some data"
to a remote. Since the system is end-to-end encrypted, a receiver of a message is sure of the authenticity
and confidentiality of the content. The system does not interpret the data in any way and handles it
as an opaque block of bytes. Messages are sent with a deadline. This means the system continuously
tries to send (part of) the message, until it either succeeds, or the deadline expires. This happens
similar to the way TCP handles data. Messages are transmitted in chunks, which are embedded in the
same data stream used by L3 packets. As such, intermediate nodes can't distinguish between regular L3
and message data.
The primary way to interact with the message system is through [the API](#api). The message API is
documented in [an OpenAPI spec in the docs folder](docs/api.yaml). For some more info about how to
use the message system, see [the message docs](/docs/message.md).
Messages can be categorized by topics, which can be configured with whitelisted subnets and socket forwarding paths.
For detailed information on how to configure topics, see the [Topic Configuration Guide](/docs/topic_configuration.md).
use the message system, see [the message docs](/docs/message.md).
## Inspecting node keys
Using the `inspect` subcommand, you can view the address associated with a public key. If no public key is provided, the node will show
its own public key. In either case, the derived address is also printed. You can specify the path to the private key with the `-k` flag.
If the file does not exist, a new private key will be generated. The optional `--json` flag can be used to print the information in json
format.
```sh
mycelium inspect a47c1d6f2a15b2c670d3a88fbe0aeb301ced12f7bcb4c8e3aa877b20f8559c02
Public key: a47c1d6f2a15b2c670d3a88fbe0aeb301ced12f7bcb4c8e3aa877b20f8559c02
Address: 47f:b2c5:a944:4dad:9cb1:da4:8bf7:7e65
```
```sh
mycelium inspect --json
{
"publicKey": "955bf6bea5e1150fd8e270c12e5b2fc08f08f7c5f3799d10550096cc137d671b",
"address": "54f:b680:ba6e:7ced:355f:346f:d97b:eecb"
}
```
## Developing
This project is built in Rust, and you must have a rust compiler to build the code
yourself. Please refer to [the official rust documentation](https://www.rust-lang.org/)
for information on how to install `rustc` and `cargo`. This project is a workspace,
however the binaries (`myceliumd` and `myceliumd-private`) are explicitly _not_
part of this workspace. The reason for this is the way the cargo resolver unifies
features. Making both binaries part of the workspace would make the library build
for the regular binary include the code for the private network, and since that
is internal code it won't be removed at link time.
First make sure you have cloned the repo
```sh
git clone https://github.com/threefoldtech/mycelium.git
cd mycelium
```
If you only want to build the library, you can do so from the root of the repo
```sh
cargo build
```
If you instead want to build a binary, that must be done from the appropriate subdirectory
```sh
cd myceliumd
cargo build
```
Refer to the README files in those directories for more info.
In case a release build is required, the `--release` flag can be added to the cargo
command (`cargo build --release`).
## Cross compilation
For cross compilation, it is advised to use the [`cross`](https://github.com/cross-rs/cross)
project. Alternatively, the standard way of cross compiling in rust can be used
(by specifying the `--target` flag in the `cargo build` command). This might require
setting some environment variables or local cargo config. On top of this, you should
also provide the `vendored-openssl` feature flag to build and statically link a copy
of openssl.
## Remarks
- The overlay network uses some of the core principles of the Babel routing protocol (<https://www.irif.fr/~jch/software/babel/>).

View File

@@ -0,0 +1,22 @@
# REMOVE/ADD COMMENT TO ENABLE/DISABLE LINE
peers = [
"tcp://188.40.132.242:9651",
"quic://[2a01:4f8:212:fa6::2]:9651",
"quic://185.69.166.7:9651",
"tcp://[2a02:1802:5e:0:8c9e:7dff:fec9:f0d2]:9651",
"quic://65.21.231.58:9651",
"tcp://[2a01:4f9:5a:1042::2]:9651",
]
api_addr = "127.0.0.1:8989"
tcp_listen_port = 9651
quic_listen_port = 9651
tun_name = "mycelium"
disable_peer_discovery = false
no_tun = false
#metrics_api_address = 0.0.0.0:9999
#firewall_mark = 30
## Options below only apply when myceliumd-private is used
#network_name = "private network name"
#network_key_file = "path_to_key_file"

View File

@@ -0,0 +1,14 @@
(
import
(
let
lock = builtins.fromJSON (builtins.readFile ./flake.lock);
in
fetchTarball {
url = lock.nodes.flake-compat.locked.url or "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz";
sha256 = lock.nodes.flake-compat.locked.narHash;
}
)
{src = ./.;}
)
.defaultNix

View File

@@ -0,0 +1,990 @@
openapi: 3.0.2
info:
version: "1.0.0"
title: Mycelium management
contact:
url: "https://github.com/threefoldtech/mycelium"
license:
name: Apache 2.0
url: "https://github.com/threefoldtech/mycelium/blob/master/LICENSE"
description: |
This is the specification of the **mycelium** management API. It is used to perform admin tasks on the system, and
to perform administrative duties.
externalDocs:
description: For full documentation, check out the mycelium github repo.
url: "https://github.com/threefoldtech/mycelium"
tags:
- name: Admin
description: Administrative operations
- name: Peer
description: Operations related to peer management
- name: Route
description: Operations related to network routes
- name: Message
description: Operations on the embedded message subsystem
- name: Topic
description: Operations related to message topic configuration
servers:
- url: "http://localhost:8989"
paths:
"/api/v1/admin":
get:
tags:
- Admin
summary: Get general info about the node
description: |
Get general info about the node, which is not related to other more specific functionality
operationId: getInfo
responses:
"200":
description: Success
content:
application/json:
schema:
$ref: "#/components/schemas/Info"
"/api/v1/admin/peers":
get:
tags:
- Admin
- Peer
summary: List known peers
description: |
List all peers known in the system, and info about their connection.
This includes the endpoint, how we know about the peer, the connection state, and if the connection is alive the amount
of bytes we've sent to and received from the peer.
operationId: getPeers
responses:
"200":
description: Success
content:
application/json:
schema:
type: array
items:
$ref: "#/components/schemas/PeerStats"
post:
tags:
- Admin
- Peer
summary: Add a new peer
description: |
Add a new peer identified by the provided endpoint.
The peer is added to the list of known peers. It will eventually be connected
to by the standard connection loop of the peer manager. This means that a peer
which can't be connected to will stay in the system, as it might be reachable
later on.
operationId: addPeer
responses:
"204":
description: Peer added
"400":
description: Malformed endpoint
content:
text/plain:
schema:
type: string
description: Details about why the endpoint is not valid
"409":
description: Peer already exists
content:
text/plain:
schema:
type: string
description: message saying we already know this peer
"/api/v1/admin/peers/{endpoint}":
delete:
tags:
- Admin
- Peer
summary: Remove an existing peer
description: |
Remove an existing peer identified by the provided endpoint.
The peer is removed from the list of known peers. If a connection to it
is currently active, it will be closed.
operationId: deletePeer
responses:
"204":
description: Peer removed
"400":
description: Malformed endpoint
content:
text/plain:
schema:
type: string
description: Details about why the endpoint is not valid
"404":
description: Peer doesn't exist
content:
text/plain:
schema:
type: string
description: message saying we don't know this peer
"/api/v1/admin/routes/selected":
get:
tags:
- Admin
- Route
summary: List all selected routes
description: |
List all selected routes in the system, and their next hop identifier, metric and sequence number.
It is possible for a route to be selected and have an infinite metric. This route will however not forward packets.
operationId: getSelectedRoutes
responses:
"200":
description: Success
content:
application/json:
schema:
type: array
items:
$ref: "#/components/schemas/Route"
"/api/v1/admin/routes/fallback":
get:
tags:
- Admin
- Route
summary: List all active fallback routes
description: |
List all fallback routes in the system, and their next hop identifier, metric and sequence number.
These routes are available to be selected in case the selected route for a destination suddenly fails, or gets retracted.
operationId: getFallbackRoutes
responses:
"200":
description: Success
content:
application/json:
schema:
type: array
items:
$ref: "#/components/schemas/Route"
"/api/v1/admin/routes/queried":
get:
tags:
- Admin
- Route
summary: List all currently queried subnets
description: |
List all currently queried subnets in the system, and the amount of seconds until the query expires.
These subnets are actively being probed in the network. If no route to them is discovered before the query expires,
they will be marked as not reachable temporarily.
operationId: getQueriedSubnets
responses:
"200":
description: Success
content:
application/json:
schema:
type: array
items:
$ref: "#/components/schemas/QueriedSubnet"
"/api/v1/admin/routes/no_route":
get:
tags:
- Admin
- Route
summary: List all subnets which are explicitly marked as no route
description: |
List all subnets in the system which are marked no route, and the amount of seconds until the query expires.
These subnets have recently been probed in the network, and no route for them was discovered in time. No more
route requests will be send for these subnets until the entry expires.
operationId: getNoRouteEntries
responses:
"200":
description: Success
content:
application/json:
schema:
type: array
items:
$ref: "#/components/schemas/NoRouteSubnet"
"/api/v1/messages":
get:
tags:
- Message
summary: Get a message from the inbound message queue
description: |
Get a message from the inbound message queue. By default, the message is removed from the queue and won't be shown again.
If the peek query parameter is set to true, the message will be peeked, and the next call to this endpoint will show the same message.
This method returns immediately by default: a message is returned if one is ready, and if there isn't nothing is returned. If the timeout
query parameter is set, this call won't return for the given amount of seconds, unless a message is received
operationId: popMessage
parameters:
- in: query
name: peek
required: false
schema:
type: boolean
description: Whether to peek the message or not. If this is true, the message won't be removed from the inbound queue when it is read
example: true
- in: query
name: timeout
required: false
schema:
type: integer
format: int64
minimum: 0
description: |
Amount of seconds to wait for a message to arrive if one is not available. Setting this to 0 is valid and will return
a message if present, or return immediately if there isn't
example: 60
- in: query
name: topic
required: false
schema:
type: string
format: byte
minLength: 0
maxLength: 340
description: |
Optional filter for loading messages. If set, the system checks if the message has the given string at the start. This way
a topic can be encoded.
example: example.topic
responses:
"200":
description: Message retrieved
content:
application/json:
schema:
$ref: "#/components/schemas/InboundMessage"
"204":
description: No message ready
post:
tags:
- Message
summary: Submit a new message to the system.
description: |
Push a new message to the systems outbound message queue. The system will continuously attempt to send the message until
it is either fully transmitted, or the send deadline is expired.
operationId: pushMessage
parameters:
- in: query
name: reply_timeout
required: false
schema:
type: integer
format: int64
minimum: 0
description: |
Amount of seconds to wait for a reply to this message to come in. If not set, the system won't wait for a reply and return
the ID of the message, which can be used later. If set, the system will wait for at most the given amount of seconds for a reply
to come in. If a reply arrives, it is returned to the client. If not, the message ID is returned for later use.
example: 120
requestBody:
content:
application/json:
schema:
$ref: "#/components/schemas/PushMessageBody"
responses:
"200":
description: We received a reply within the specified timeout
content:
application/json:
schema:
$ref: "#/components/schemas/InboundMessage"
"201":
description: Message pushed successfully, and not waiting for a reply
content:
application/json:
schema:
$ref: "#/components/schemas/PushMessageResponseId"
"408":
description: The system timed out waiting for a reply to the message
content:
application/json:
schema:
$ref: "#/components/schemas/PushMessageResponseId"
"/api/v1/messages/reply/{id}":
post:
tags:
- Message
summary: Reply to a message with the given ID
description: |
Submits a reply message to the system, where ID is an id of a previously received message. If the sender is waiting
for a reply, it will bypass the queue of open messages.
operationId: pushMessageReply
parameters:
- in: path
name: id
required: true
schema:
type: string
format: hex
minLength: 16
maxLength: 16
example: abcdef0123456789
requestBody:
content:
application/json:
schema:
$ref: "#/components/schemas/PushMessageBody"
responses:
"204":
description: successfully submitted the reply
"/api/v1/messages/status/{id}":
get:
tags:
- Message
summary: Get the status of an outbound message
description: |
Get information about the current state of an outbound message. This can be used to check the transmission
state, size and destination of the message.
operationId: getMessageInfo
parameters:
- in: path
name: id
required: true
schema:
type: string
format: hex
minLength: 16
maxLength: 16
example: abcdef0123456789
responses:
"200":
description: Success
content:
application/json:
schema:
$ref: "#/components/schemas/MessageStatusResponse"
"404":
description: Message not found
"/api/v1/messages/topics/default":
get:
tags:
- Message
- Topic
summary: Get the default topic action
description: |
Get the default action for topics that are not explicitly configured (accept or reject).
operationId: getDefaultTopicAction
responses:
"200":
description: Success
content:
application/json:
schema:
$ref: "#/components/schemas/DefaultTopicActionResponse"
put:
tags:
- Message
- Topic
summary: Set the default topic action
description: |
Set the default action for topics that are not explicitly configured (accept or reject).
operationId: setDefaultTopicAction
requestBody:
content:
application/json:
schema:
$ref: "#/components/schemas/DefaultTopicActionRequest"
responses:
"204":
description: Default topic action set successfully
"/api/v1/messages/topics":
get:
tags:
- Message
- Topic
summary: Get all configured topics
description: |
Get all topics that are explicitly configured in the system.
operationId: getTopics
responses:
"200":
description: Success
content:
application/json:
schema:
type: array
items:
type: string
format: byte
description: Base64 encoded topic identifier
post:
tags:
- Message
- Topic
summary: Add a new topic
description: |
Add a new topic to the system's whitelist.
operationId: addTopic
requestBody:
content:
application/json:
schema:
type: string
format: byte
description: The topic to add
responses:
"201":
description: Topic added successfully
"/api/v1/messages/topics/{topic}":
delete:
tags:
- Message
- Topic
summary: Remove a topic
description: |
Remove a topic from the system's whitelist.
operationId: removeTopic
parameters:
- in: path
name: topic
required: true
schema:
type: string
format: byte
description: The topic to remove (base64 encoded)
responses:
"204":
description: Topic removed successfully
"400":
description: Invalid topic format
"/api/v1/messages/topics/{topic}/sources":
get:
tags:
- Message
- Topic
summary: Get sources for a topic
description: |
Get all sources (subnets) that are allowed to send messages for a specific topic.
operationId: getTopicSources
parameters:
- in: path
name: topic
required: true
schema:
type: string
format: byte
description: The topic to get sources for (base64 encoded)
responses:
"200":
description: Success
content:
application/json:
schema:
type: array
items:
type: string
description: Subnet in CIDR notation
"400":
description: Invalid topic format
post:
tags:
- Message
- Topic
summary: Add a source to a topic
description: |
Add a source (subnet) that is allowed to send messages for a specific topic.
operationId: addTopicSource
parameters:
- in: path
name: topic
required: true
schema:
type: string
format: byte
description: The topic to add a source to (base64 encoded)
requestBody:
content:
application/json:
schema:
$ref: "#/components/schemas/TopicSourceRequest"
responses:
"204":
description: Source added successfully
"400":
description: Invalid topic format or subnet
"/api/v1/messages/topics/{topic}/sources/{subnet}":
delete:
tags:
- Message
- Topic
summary: Remove a source from a topic
description: |
Remove a source (subnet) that is allowed to send messages for a specific topic.
operationId: removeTopicSource
parameters:
- in: path
name: topic
required: true
schema:
type: string
format: byte
description: The topic to remove a source from (base64 encoded)
- in: path
name: subnet
required: true
schema:
type: string
description: The subnet to remove as a source
responses:
"204":
description: Source removed successfully
"400":
description: Invalid topic format or subnet
"/api/v1/messages/topics/{topic}/forward":
get:
tags:
- Message
- Topic
summary: Get the forward socket for a topic
description: |
Get the socket path where messages for a specific topic are forwarded to.
operationId: getTopicForwardSocket
parameters:
- in: path
name: topic
required: true
schema:
type: string
format: byte
description: The topic to get the forward socket for (base64 encoded)
responses:
"200":
description: Success
content:
application/json:
schema:
type: string
nullable: true
description: The socket path where messages are forwarded to
"400":
description: Invalid topic format
put:
tags:
- Message
- Topic
summary: Set the forward socket for a topic
description: |
Set the socket path where messages for a specific topic should be forwarded to.
operationId: setTopicForwardSocket
parameters:
- in: path
name: topic
required: true
schema:
type: string
format: byte
description: The topic to set the forward socket for (base64 encoded)
requestBody:
content:
application/json:
schema:
$ref: "#/components/schemas/TopicForwardSocketRequest"
responses:
"204":
description: Forward socket set successfully
"400":
description: Invalid topic format
delete:
tags:
- Message
- Topic
summary: Remove the forward socket for a topic
description: |
Remove the socket path where messages for a specific topic are forwarded to.
operationId: removeTopicForwardSocket
parameters:
- in: path
name: topic
required: true
schema:
type: string
format: byte
description: The topic to remove the forward socket for (base64 encoded)
responses:
"204":
description: Forward socket removed successfully
"400":
description: Invalid topic format
"/api/v1/pubkey/{mycelium_ip}":
get:
summary: Get the pubkey from node ip
description: |
Get the node's public key from it's IP address.
operationId: getPublicKeyFromIp
parameters:
- in: path
name: mycelium_ip
required: true
schema:
type: string
format: ipv6
example: 5fd:7636:b80:9ad0::1
responses:
"200":
description: Success
content:
application/json:
schema:
$ref: "#/components/schemas/PublicKeyResponse"
"404":
description: Public key not found
components:
schemas:
Info:
description: General information about a node
type: object
properties:
nodeSubnet:
description: The subnet owned by the node and advertised to peers
type: string
example: 54f:b680:ba6e:7ced::/64
nodePubkey:
description: The public key of the node
type: string
format: hex
minLength: 64
maxLength: 64
example: 02468ace13579bdf02468ace13579bdf02468ace13579bdf02468ace13579bdf
Endpoint:
description: Identification to connect to a peer
type: object
properties:
proto:
description: Protocol used
type: string
enum:
- "tcp"
- "quic"
example: tcp
socketAddr:
description: The socket address used
type: string
example: 192.0.2.6:9651
PeerStats:
description: Info about a peer
type: object
properties:
endpoint:
$ref: "#/components/schemas/Endpoint"
type:
description: How we know about this peer
type: string
enum:
- "static"
- "inbound"
- "linkLocalDiscovery"
example: static
connectionState:
description: The current state of the connection to the peer
type: string
enum:
- "alive"
- "connecting"
- "dead"
example: alive
txBytes:
description: The amount of bytes transmitted to this peer
type: integer
format: int64
minimum: 0
example: 464531564
rxBytes:
description: The amount of bytes received from this peer
type: integer
format: int64
minimum: 0
example: 64645089
Route:
description: Information about a route
type: object
properties:
subnet:
description: The overlay subnet for which this is the route
type: string
example: 469:1348:ab0c:a1d8::/64
nextHop:
description: A way to identify the next hop of the route, where forwarded packets will be sent
type: string
example: TCP 203.0.113.2:60128 <-> 198.51.100.27:9651
metric:
description: The metric of the route, an estimation of how long the packet will take to arrive at its final destination
oneOf:
- description: A finite metric value
type: integer
format: int32
minimum: 0
maximum: 65534
example: 13
- description: An infinite (unreachable) metric. This is always `infinite`
type: string
example: infinite
seqno:
description: the sequence number advertised with this route by the source
type: integer
format: int32
minimum: 0
maximum: 65535
example: 1
QueriedSubnet:
description: Information about a subnet currently being queried
type: object
properties:
subnet:
description: The overlay subnet which we are currently querying
type: string
example: 503:5478:df06:d79a::/64
expiration:
description: The amount of seconds until the query expires
type: string
example: "37"
NoRouteSubnet:
description: Information about a subnet which is marked as no route
type: object
properties:
subnet:
description: The overlay subnet which is marked
type: string
example: 503:5478:df06:d79a::/64
expiration:
description: The amount of seconds until the entry expires
type: string
example: "37"
InboundMessage:
description: A message received by the system
type: object
properties:
id:
description: Id of the message, hex encoded
type: string
format: hex
minLength: 16
maxLength: 16
example: 0123456789abcdef
srcIp:
description: Sender overlay IP address
type: string
format: ipv6
example: 449:abcd:0123:defa::1
srcPk:
description: Sender public key, hex encoded
type: string
format: hex
minLength: 64
maxLength: 64
example: fedbca9876543210fedbca9876543210fedbca9876543210fedbca9876543210
dstIp:
description: Receiver overlay IP address
type: string
format: ipv6
example: 34f:b680:ba6e:7ced:355f:346f:d97b:eecb
dstPk:
description: Receiver public key, hex encoded. This is the public key of the system
type: string
format: hex
minLength: 64
maxLength: 64
example: 02468ace13579bdf02468ace13579bdf02468ace13579bdf02468ace13579bdf
topic:
description: An optional message topic
type: string
format: byte
minLength: 0
maxLength: 340
example: hpV+
payload:
description: The message payload, encoded in standard alphabet base64
type: string
format: byte
example: xuV+
PushMessageBody:
description: A message to send to a given receiver
type: object
properties:
dst:
$ref: "#/components/schemas/MessageDestination"
topic:
description: An optional message topic
type: string
format: byte
minLength: 0
maxLength: 340
example: hpV+
payload:
description: The message to send, base64 encoded
type: string
format: byte
example: xuV+
MessageDestination:
oneOf:
- description: An IP in the subnet of the receiver node
type: object
properties:
ip:
description: The target IP of the message
format: ipv6
example: 449:abcd:0123:defa::1
- description: The hex encoded public key of the receiver node
type: object
properties:
pk:
description: The hex encoded public key of the target node
type: string
minLength: 64
maxLength: 64
example: bb39b4a3a4efd70f3e05e37887677e02efbda14681d0acd3882bc0f754792c32
PushMessageResponseId:
description: The ID generated for a message after pushing it to the system
type: object
properties:
id:
description: Id of the message, hex encoded
type: string
format: hex
minLength: 16
maxLength: 16
example: 0123456789abcdef
MessageStatusResponse:
description: Information about an outbound message
type: object
properties:
dst:
description: IP address of the receiving node
type: string
format: ipv6
example: 449:abcd:0123:defa::1
state:
$ref: "#/components/schemas/TransmissionState"
created:
description: Unix timestamp of when this message was created
type: integer
format: int64
example: 1649512789
deadline:
description: Unix timestamp of when this message will expire. If the message is not received before this, the system will give up
type: integer
format: int64
example: 1649513089
msgLen:
description: Length of the message in bytes
type: integer
minimum: 0
example: 27
TransmissionState:
description: The state of an outbound message in it's lifetime
oneOf:
- type: string
enum: ["pending", "received", "read", "aborted"]
example: "received"
- type: object
properties:
sending:
type: object
properties:
pending:
type: integer
minimum: 0
example: 5
sent:
type: integer
minimum: 0
example: 17
acked:
type: integer
minimum: 0
example: 3
example: "received"
PublicKeyResponse:
description: Public key requested based on a node's IP
type: object
properties:
NodePubKey:
type: string
format: hex
minLength: 64
maxLength: 64
example: 02468ace13579bdf02468ace13579bdf02468ace13579bdf02468ace13579bdf
DefaultTopicActionResponse:
description: Response for the default topic action
type: object
properties:
accept:
description: Whether unconfigured topics are accepted by default
type: boolean
example: true
DefaultTopicActionRequest:
description: Request to set the default topic action
type: object
properties:
accept:
description: Whether to accept unconfigured topics by default
type: boolean
example: true
TopicInfo:
description: Information about a configured topic
type: object
properties:
topic:
description: The topic identifier (base64 encoded)
type: string
format: byte
example: "ZXhhbXBsZS50b3BpYw=="
sources:
description: List of subnets that are allowed to send messages for this topic
type: array
items:
type: string
example: "503:5478:df06:d79a::/64"
forward_socket:
description: Optional socket path where messages for this topic are forwarded to
type: string
nullable: true
example: "/var/run/mycelium/topic_socket"
TopicSourceRequest:
description: Request to add a source to a topic whitelist
type: object
properties:
subnet:
description: The subnet to add as a source in CIDR notation
type: string
example: "503:5478:df06:d79a::/64"
TopicForwardSocketRequest:
description: Request to set a forward socket for a topic
type: object
properties:
socket_path:
description: The socket path where messages should be forwarded to
type: string
example: "/var/run/mycelium/topic_socket"

View File

@@ -0,0 +1,54 @@
# Data packet
A `data packet` contains user specified data. This can be any data, as long as the sender and receiver
both understand what it is, without further help. Intermediate hops, which route the data have sufficient
information with the header to know where to forward the packet. In practice, the data will be encrypted
to avoid eavesdropping by intermediate hops.
## Packet header
The packet header has a fixed size of 36 bytes, with the following layout:
```
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Reserved | Length | Hop Limit |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ +
| |
+ Source IP +
| |
+ +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ +
| |
+ Destination IP +
| |
+ +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
```
The first 8 bits are reserved and must be set to 0.
The next 16 bits are used to specify the length of the body. It is expected that
the actual length of a packet does not exceed 65K right now, and overhead related
to encryption should be handled by the client before sending the packet.
The next byte is the hop-limit. Every node decrements this value by 1 before sending
the packet. If a node decrements this value to 0, the packet is discarded.
The next 16 bytes contain the sender IP address.
The final 16 bytes contain the destination IP address.
## Body
Following the header is a variable length body. The protocol does not have any requirements for the
body, and the only requirement imposed is that the body is as long as specified in the header length
field. It is technically legal according to the protocol to transmit a data packet without a body,
i.e. a body length of 0. This is useless however, as there will not be any data to interpret.

View File

@@ -0,0 +1,161 @@
# Message subsystem
The message subsystem can be used to send arbitrary length messages to receivers. A receiver is any
other node in the network. It can be identified both by its public key, or an IP address in its announced
range. The message subsystem can be interacted with both via the HTTP API, which is
[documented here](./api.yaml), or via the `mycelium` binary. By default, the messages do not interpret
the data in any way. When using the binary, the message is slightly modified to include an optional
topic at the start of the message. Note that in the HTTP API, all messages are encoded in base64. This
might make it difficult to consume these messages without additional tooling.
Messages can be categorized by topics, which can be configured with whitelisted subnets and socket forwarding paths.
For detailed information on how to configure topics, see the [Topic Configuration Guide](./topic_configuration.md).
## JSON-RPC API Examples
These examples assume you have at least 2 nodes running, and that they are both part of the same network.
Send a message on node1, waiting up to 2 minutes for a possible reply:
```json
{
"jsonrpc": "2.0",
"method": "pushMessage",
"params": [
{
"dst": {"pk": "bb39b4a3a4efd70f3e05e37887677e02efbda14681d0acd3882bc0f754792c32"},
"payload": "xuV+"
},
120
],
"id": 1
}
```
Using curl:
```bash
curl -X POST http://localhost:8990/rpc \
-H "Content-Type: application/json" \
-d '{
"jsonrpc": "2.0",
"method": "pushMessage",
"params": [
{
"dst": {"pk": "bb39b4a3a4efd70f3e05e37887677e02efbda14681d0acd3882bc0f754792c32"},
"payload": "xuV+"
},
120
],
"id": 1
}'
```
Listen for a message on node2. Note that messages received while nothing is listening are added to
a queue for later consumption. Wait for up to 1 minute.
```json
{
"jsonrpc": "2.0",
"method": "popMessage",
"params": [false, 60, null],
"id": 1
}
```
Using curl:
```bash
curl -X POST http://localhost:8990/rpc \
-H "Content-Type: application/json" \
-d '{
"jsonrpc": "2.0",
"method": "popMessage",
"params": [false, 60, null],
"id": 1
}'
```
The system will (immediately) receive our previously sent message:
```json
{"id":"e47b25063912f4a9","srcIp":"34f:b680:ba6e:7ced:355f:346f:d97b:eecb","srcPk":"955bf6bea5e1150fd8e270c12e5b2fc08f08f7c5f3799d10550096cc137d671b","dstIp":"2e4:9ace:9252:630:beee:e405:74c0:d876","dstPk":"bb39b4a3a4efd70f3e05e37887677e02efbda14681d0acd3882bc0f754792c32","payload":"xuV+"}
```
To send a reply, we can post a message on the reply path, with the received message `id` (still on
node2):
```json
{
"jsonrpc": "2.0",
"method": "pushMessageReply",
"params": [
"e47b25063912f4a9",
{
"dst": {"pk":"955bf6bea5e1150fd8e270c12e5b2fc08f08f7c5f3799d10550096cc137d671b"},
"payload": "xuC+"
}
],
"id": 1
}
```
Using curl:
```bash
curl -X POST http://localhost:8990/rpc \
-H "Content-Type: application/json" \
-d '{
"jsonrpc": "2.0",
"method": "pushMessageReply",
"params": [
"e47b25063912f4a9",
{
"dst": {"pk":"955bf6bea5e1150fd8e270c12e5b2fc08f08f7c5f3799d10550096cc137d671b"},
"payload": "xuC+"
}
],
"id": 1
}'
```
If you did this fast enough, the initial sender (node1) will now receive the reply.
## Mycelium binary examples
As explained above, while using the binary the message is slightly modified to insert the optional
topic. As such, when using the binary to send messages, it is suggested to make sure the receiver is
also using the binary to listen for messages. The options discussed here are not covering all possibilities,
use the `--help` flag (`mycelium message send --help` and `mycelium message receive --help`) for a
full overview.
Once again, send a message. This time using a topic (example.topic). Note that there are no constraints
on what a valid topic is, other than that it is valid UTF-8, and at most 255 bytes in size. The `--wait`
flag can be used to indicate that we are waiting for a reply. If it is set, we can also use an additional
`--timeout` flag to govern exactly how long (in seconds) to wait for. The default is to wait forever.
```bash
mycelium message send 2e4:9ace:9252:630:beee:e405:74c0:d876 'this is a message' -t example.topic --wait
```
On the second node, listen for messages with this topic. If a different topic is used, the previous
message won't be received. If no topic is set, all messages are received. An optional timeout flag
can be specified, which indicates how long to wait for. Absence of this flag will cause the binary
to wait forever.
```bash
mycelium message receive -t example.topic
```
Again, if the previous command was executed a message will be received immediately:
```json
{"id":"4a6c956e8d36381f","topic":"example.topic","srcIp":"34f:b680:ba6e:7ced:355f:346f:d97b:eecb","srcPk":"955bf6bea5e1150fd8e270c12e5b2fc08f08f7c5f3799d10550096cc137d671b","dstIp":"2e4:9ace:9252:630:beee:e405:74c0:d876","dstPk":"bb39b4a3a4efd70f3e05e37887677e02efbda14681d0acd3882bc0f754792c32","payload":"this is a message"}
```
And once again, we can use the ID from this message to reply to the original sender, who might be waiting
for this reply (notice we used the hex encoded public key to identify the receiver here, rather than an IP):
```bash
mycelium message send 955bf6bea5e1150fd8e270c12e5b2fc08f08f7c5f3799d10550096cc137d671b "this is a reply" --reply-to 4a6c956e8d36381f
```

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,22 @@
# Packet
A `Packet` is the largest communication object between established `peers`. All communication is done
via these `packets`. The `packet` itself consists of a fixed size header, and a variable size body.
The body contains a more specific type of data.
## Packet header
The packet header has a fixed size of 4 bytes, with the following layout:
```
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Version | Type | Reserved |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
```
The first byte is used to indicate the version of the protocol. Currently, only version 1 is supported
(0x01). The next byte is used to indicate the type of the body. `0x00` indicates a data packet, while
`0x01` indicates a control packet. The remaining 16 bits are currently reserved, and should be set to
all 0.

View File

@@ -0,0 +1,35 @@
# Private network
> Private network functionality is currently in an experimental stage
While traffic is end-to-end encrypted in mycelium, any node in the network learns
every available connected subnet (and can derive the associated default address
in that subnet). As a result, running a mycelium node adds what is effectively
a public interface to your computer, so everyone can send traffic to it. On top
of this, the routing table consumes memory in relation to the amount of nodes in
the network. To remedy this, people can opt to run a "private network". By configuring
a pre shared key (and network name), only nodes which know the key associated to
the name can connect to your network.
## Implementation
Private networks are implemented entirely in the connection layer (no specific
protocol logic is implemented to support this). This relies on the pre shared key
functionality of TLS 1.3. As such, you need both a `network name` (an `identity`),
and the `PSK` itself. Next to the limitations in the protocol, we currently further
limit the network name and PSK as follows:
- Network name must be a UTF-8 encoded string of 2 to 64 bytes.
- PSK must be exactly 32 bytes.
Not all cipher suites supported in TLS1.3 are supported. At present, _at least_
`TLS_AES_128_GCM_SHA256` and `TLS_CHACHA20_POLY1305_SHA256` are supported.
## Enable private network
In order to use the private network implementation of `mycelium`, a separate `mycelium-private`
binary is available. Private network functionality can be enabled by setting both
the `network-name` and `network-key-file` flags on the command line. All nodes who
wish to join the network must use the same values for both flags.
> ⚠️ Network name is public, do not put any confidential data here.

View File

@@ -0,0 +1,211 @@
# Topic Configuration Guide
This document explains how to configure message topics in Mycelium, including how to add new topics, configure socket forwarding paths, and manage whitelisted subnets.
## Overview
Mycelium's messaging system uses topics to categorize and route messages. Each topic can be configured with:
- **Whitelisted subnets**: IP subnets that are allowed to send messages to this topic
- **Forward socket**: A Unix domain socket path where messages for this topic will be forwarded
When a message is received with a topic that has a configured socket path, the content of the message is pushed to the socket, and the system waits for a reply from the socket, which is then sent back to the original sender.
## Configuration Using JSON-RPC API
The JSON-RPC API provides a comprehensive set of methods for managing topics, socket forwarding paths, and whitelisted subnets.
## Adding a New Topic
### Using the JSON-RPC API
```json
{
"jsonrpc": "2.0",
"method": "addTopic",
"params": ["dGVzdC10b3BpYw=="], // base64 encoding of "test-topic"
"id": 1
}
```
Example using curl:
```bash
curl -X POST http://localhost:8990/rpc \
-H "Content-Type: application/json" \
-d '{
"jsonrpc": "2.0",
"method": "addTopic",
"params": ["dGVzdC10b3BpYw=="],
"id": 1
}'
```
## Configuring a Socket Forwarding Path
When a topic is configured with a socket forwarding path, messages for that topic will be forwarded to the specified Unix domain socket instead of being pushed to the message queue.
### Using the JSON-RPC API
```json
{
"jsonrpc": "2.0",
"method": "setTopicForwardSocket",
"params": ["dGVzdC10b3BpYw==", "/path/to/socket"],
"id": 1
}
```
Example using curl:
```bash
curl -X POST http://localhost:8990/rpc \
-H "Content-Type: application/json" \
-d '{
"jsonrpc": "2.0",
"method": "setTopicForwardSocket",
"params": ["dGVzdC10b3BpYw==", "/path/to/socket"],
"id": 1
}'
```
```
## Adding a Whitelisted Subnet
Whitelisted subnets control which IP addresses are allowed to send messages to a specific topic. If a message is received from an IP that is not in the whitelist, it will be dropped.
### Using the JSON-RPC API
```json
{
"jsonrpc": "2.0",
"method": "addTopicSource",
"params": ["dGVzdC10b3BpYw==", "192.168.1.0/24"],
"id": 1
}
```
Example using curl:
```bash
curl -X POST http://localhost:8990/rpc \
-H "Content-Type: application/json" \
-d '{
"jsonrpc": "2.0",
"method": "addTopicSource",
"params": ["dGVzdC10b3BpYw==", "192.168.1.0/24"],
"id": 1
}'
```
```
## Setting the Default Topic Action
You can configure the default action to take for topics that don't have explicit whitelist configurations:
### Using the JSON-RPC API
```json
{
"jsonrpc": "2.0",
"method": "setDefaultTopicAction",
"params": [true],
"id": 1
}
```
Example using curl:
```bash
curl -X POST http://localhost:8990/rpc \
-H "Content-Type: application/json" \
-d '{
"jsonrpc": "2.0",
"method": "setDefaultTopicAction",
"params": [true],
"id": 1
}'
```
```
## Socket Protocol
When a message is forwarded to a socket, the raw message data is sent to the socket. The socket is expected to process the message and send a reply, which will be forwarded back to the original sender.
The socket protocol is simple:
1. The message data is written to the socket
2. The system waits for a reply from the socket (with a configurable timeout)
3. The reply data is read from the socket and sent back to the original sender
## Example: Creating a Socket Server
Here's an example of a simple socket server that echoes back the received data:
```rust
use std::{
io::{Read, Write},
os::unix::net::UnixListener,
path::Path,
thread,
};
fn main() {
let socket_path = "/tmp/mycelium-socket";
// Remove the socket file if it already exists
if Path::new(socket_path).exists() {
std::fs::remove_file(socket_path).unwrap();
}
// Create the Unix domain socket
let listener = UnixListener::bind(socket_path).unwrap();
println!("Socket server listening on {}", socket_path);
// Accept connections in a loop
for stream in listener.incoming() {
match stream {
Ok(mut stream) => {
// Spawn a thread to handle the connection
thread::spawn(move || {
// Read the data
let mut buffer = Vec::new();
stream.read_to_end(&mut buffer).unwrap();
println!("Received {} bytes", buffer.len());
// Process the data (in this case, just echo it back)
// In a real application, you would parse and process the message here
// Send the reply
stream.write_all(&buffer).unwrap();
println!("Sent reply");
});
}
Err(e) => {
eprintln!("Error accepting connection: {}", e);
}
}
}
}
```
## Troubleshooting
### Message Not Being Forwarded to Socket
1. Check that the topic is correctly configured with a socket path
2. Verify that the socket server is running and the socket file exists
3. Ensure that the sender's IP is in the whitelisted subnets for the topic
4. Check the logs for any socket connection or timeout errors
### Socket Server Not Receiving Messages
1. Verify that the socket path is correct and accessible
2. Check that the socket server has the necessary permissions to read/write to the socket
3. Ensure that the socket server is properly handling the connection
### Reply Not Being Sent Back
1. Verify that the socket server is sending a reply
2. Check for any timeout errors in the logs
3. Ensure that the original sender is still connected and able to receive the reply

107
components/mycelium/flake.lock generated Normal file
View File

@@ -0,0 +1,107 @@
{
"nodes": {
"crane": {
"locked": {
"lastModified": 1742317686,
"narHash": "sha256-ScJYnUykEDhYeCepoAWBbZWx2fpQ8ottyvOyGry7HqE=",
"owner": "ipetkov",
"repo": "crane",
"rev": "66cb0013f9a99d710b167ad13cbd8cc4e64f2ddb",
"type": "github"
},
"original": {
"owner": "ipetkov",
"repo": "crane",
"type": "github"
}
},
"flake-compat": {
"locked": {
"lastModified": 1733328505,
"narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=",
"rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec",
"revCount": 69,
"type": "tarball",
"url": "https://api.flakehub.com/f/pinned/edolstra/flake-compat/1.1.0/01948eb7-9cba-704f-bbf3-3fa956735b52/source.tar.gz"
},
"original": {
"type": "tarball",
"url": "https://flakehub.com/f/edolstra/flake-compat/1.tar.gz"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"id": "flake-utils",
"type": "indirect"
}
},
"nix-filter": {
"locked": {
"lastModified": 1731533336,
"narHash": "sha256-oRam5PS1vcrr5UPgALW0eo1m/5/pls27Z/pabHNy2Ms=",
"owner": "numtide",
"repo": "nix-filter",
"rev": "f7653272fd234696ae94229839a99b73c9ab7de0",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "nix-filter",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1742288794,
"narHash": "sha256-Txwa5uO+qpQXrNG4eumPSD+hHzzYi/CdaM80M9XRLCo=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "b6eaf97c6960d97350c584de1b6dcff03c9daf42",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"crane": "crane",
"flake-compat": "flake-compat",
"flake-utils": "flake-utils",
"nix-filter": "nix-filter",
"nixpkgs": "nixpkgs"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

View File

@@ -0,0 +1,156 @@
{
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
crane.url = "github:ipetkov/crane";
flake-compat.url = "https://flakehub.com/f/edolstra/flake-compat/1.tar.gz";
nix-filter.url = "github:numtide/nix-filter";
};
outputs =
{ self
, crane
, flake-utils
, nix-filter
, ...
}@inputs:
{
overlays.default = final: prev:
let
inherit (final) lib stdenv darwin;
craneLib = crane.mkLib final;
in
{
myceliumd =
let
cargoToml = ./myceliumd/Cargo.toml;
cargoLock = ./myceliumd/Cargo.lock;
manifest = craneLib.crateNameFromCargoToml { inherit cargoToml; };
in
lib.makeOverridable craneLib.buildPackage {
src = nix-filter {
root = ./.;
# If no include is passed, it will include all the paths.
include = [
./Cargo.toml
./Cargo.lock
./mycelium
./mycelium-api
./mycelium-cli
./mycelium-metrics
./myceliumd
./myceliumd-private
./mobile
./docs
];
};
inherit (manifest) pname version;
inherit cargoToml cargoLock;
sourceRoot = "source/myceliumd";
doCheck = false;
nativeBuildInputs = [
final.pkg-config
# openssl base library
final.openssl
# required by openssl-sys
final.perl
];
buildInputs = lib.optionals stdenv.isDarwin [
darwin.apple_sdk.frameworks.Security
darwin.apple_sdk.frameworks.SystemConfiguration
final.libiconv
];
meta = {
mainProgram = "mycelium";
};
};
myceliumd-private =
let
cargoToml = ./myceliumd-private/Cargo.toml;
cargoLock = ./myceliumd-private/Cargo.lock;
manifest = craneLib.crateNameFromCargoToml { inherit cargoToml; };
in
lib.makeOverridable craneLib.buildPackage {
src = nix-filter {
root = ./.;
include = [
./Cargo.toml
./Cargo.lock
./mycelium
./mycelium-api
./mycelium-cli
./mycelium-metrics
./myceliumd
./myceliumd-private
./mobile
./docs
];
};
inherit (manifest) pname version;
inherit cargoToml cargoLock;
sourceRoot = "source/myceliumd-private";
doCheck = false;
nativeBuildInputs = [
final.pkg-config
# openssl base library
final.openssl
# required by openssl-sys
final.perl
];
buildInputs = lib.optionals stdenv.isDarwin [
darwin.apple_sdk.frameworks.Security
darwin.apple_sdk.frameworks.SystemConfiguration
final.libiconv
];
meta = {
mainProgram = "mycelium-private";
};
};
};
} //
flake-utils.lib.eachSystem
[
flake-utils.lib.system.x86_64-linux
flake-utils.lib.system.aarch64-linux
flake-utils.lib.system.x86_64-darwin
flake-utils.lib.system.aarch64-darwin
]
(system:
let
craneLib = crane.mkLib pkgs;
pkgs = import inputs.nixpkgs {
inherit system;
overlays = [ self.overlays.default ];
};
in
{
devShells.default = craneLib.devShell {
packages = [
pkgs.rust-analyzer
];
RUST_SRC_PATH = "${pkgs.rustPlatform.rustLibSrc}";
};
packages = {
default = self.packages.${system}.myceliumd;
inherit (pkgs) myceliumd myceliumd-private;
};
});
}

View File

@@ -0,0 +1,206 @@
{\rtf1\ansi\ansicpg1252\deff0\nouicompat\deflang1033{\fonttbl{\f0\fnil\fcharset0 Calibri;}}
{\colortbl ;\red0\green0\blue255;}
{\*\generator Riched20 10.0.22621}\viewkind4\uc1
\pard\sa200\sl276\slmult1\f0\fs22\lang9 Apache License\par
Version 2.0, January 2004\par
{{\field{\*\fldinst{HYPERLINK http://www.apache.org/licenses/ }}{\fldrslt{http://www.apache.org/licenses/\ul0\cf0}}}}\f0\fs22\par
\par
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\par
\par
1. Definitions.\par
\par
"License" shall mean the terms and conditions for use, reproduction,\par
and distribution as defined by Sections 1 through 9 of this document.\par
\par
"Licensor" shall mean the copyright owner or entity authorized by\par
the copyright owner that is granting the License.\par
\par
"Legal Entity" shall mean the union of the acting entity and all\par
other entities that control, are controlled by, or are under common\par
control with that entity. For the purposes of this definition,\par
"control" means (i) the power, direct or indirect, to cause the\par
direction or management of such entity, whether by contract or\par
otherwise, or (ii) ownership of fifty percent (50%) or more of the\par
outstanding shares, or (iii) beneficial ownership of such entity.\par
\par
"You" (or "Your") shall mean an individual or Legal Entity\par
exercising permissions granted by this License.\par
\par
"Source" form shall mean the preferred form for making modifications,\par
including but not limited to software source code, documentation\par
source, and configuration files.\par
\par
"Object" form shall mean any form resulting from mechanical\par
transformation or translation of a Source form, including but\par
not limited to compiled object code, generated documentation,\par
and conversions to other media types.\par
\par
"Work" shall mean the work of authorship, whether in Source or\par
Object form, made available under the License, as indicated by a\par
copyright notice that is included in or attached to the work\par
(an example is provided in the Appendix below).\par
\par
"Derivative Works" shall mean any work, whether in Source or Object\par
form, that is based on (or derived from) the Work and for which the\par
editorial revisions, annotations, elaborations, or other modifications\par
represent, as a whole, an original work of authorship. For the purposes\par
of this License, Derivative Works shall not include works that remain\par
separable from, or merely link (or bind by name) to the interfaces of,\par
the Work and Derivative Works thereof.\par
\par
"Contribution" shall mean any work of authorship, including\par
the original version of the Work and any modifications or additions\par
to that Work or Derivative Works thereof, that is intentionally\par
submitted to Licensor for inclusion in the Work by the copyright owner\par
or by an individual or Legal Entity authorized to submit on behalf of\par
the copyright owner. For the purposes of this definition, "submitted"\par
means any form of electronic, verbal, or written communication sent\par
to the Licensor or its representatives, including but not limited to\par
communication on electronic mailing lists, source code control systems,\par
and issue tracking systems that are managed by, or on behalf of, the\par
Licensor for the purpose of discussing and improving the Work, but\par
excluding communication that is conspicuously marked or otherwise\par
designated in writing by the copyright owner as "Not a Contribution."\par
\par
"Contributor" shall mean Licensor and any individual or Legal Entity\par
on behalf of whom a Contribution has been received by Licensor and\par
subsequently incorporated within the Work.\par
\par
2. Grant of Copyright License. Subject to the terms and conditions of\par
this License, each Contributor hereby grants to You a perpetual,\par
worldwide, non-exclusive, no-charge, royalty-free, irrevocable\par
copyright license to reproduce, prepare Derivative Works of,\par
publicly display, publicly perform, sublicense, and distribute the\par
Work and such Derivative Works in Source or Object form.\par
\par
3. Grant of Patent License. Subject to the terms and conditions of\par
this License, each Contributor hereby grants to You a perpetual,\par
worldwide, non-exclusive, no-charge, royalty-free, irrevocable\par
(except as stated in this section) patent license to make, have made,\par
use, offer to sell, sell, import, and otherwise transfer the Work,\par
where such license applies only to those patent claims licensable\par
by such Contributor that are necessarily infringed by their\par
Contribution(s) alone or by combination of their Contribution(s)\par
with the Work to which such Contribution(s) was submitted. If You\par
institute patent litigation against any entity (including a\par
cross-claim or counterclaim in a lawsuit) alleging that the Work\par
or a Contribution incorporated within the Work constitutes direct\par
or contributory patent infringement, then any patent licenses\par
granted to You under this License for that Work shall terminate\par
as of the date such litigation is filed.\par
\par
4. Redistribution. You may reproduce and distribute copies of the\par
Work or Derivative Works thereof in any medium, with or without\par
modifications, and in Source or Object form, provided that You\par
meet the following conditions:\par
\par
(a) You must give any other recipients of the Work or\par
Derivative Works a copy of this License; and\par
\par
(b) You must cause any modified files to carry prominent notices\par
stating that You changed the files; and\par
\par
(c) You must retain, in the Source form of any Derivative Works\par
that You distribute, all copyright, patent, trademark, and\par
attribution notices from the Source form of the Work,\par
excluding those notices that do not pertain to any part of\par
the Derivative Works; and\par
\par
(d) If the Work includes a "NOTICE" text file as part of its\par
distribution, then any Derivative Works that You distribute must\par
include a readable copy of the attribution notices contained\par
within such NOTICE file, excluding those notices that do not\par
pertain to any part of the Derivative Works, in at least one\par
of the following places: within a NOTICE text file distributed\par
as part of the Derivative Works; within the Source form or\par
documentation, if provided along with the Derivative Works; or,\par
within a display generated by the Derivative Works, if and\par
wherever such third-party notices normally appear. The contents\par
of the NOTICE file are for informational purposes only and\par
do not modify the License. You may add Your own attribution\par
notices within Derivative Works that You distribute, alongside\par
or as an addendum to the NOTICE text from the Work, provided\par
that such additional attribution notices cannot be construed\par
as modifying the License.\par
\par
You may add Your own copyright statement to Your modifications and\par
may provide additional or different license terms and conditions\par
for use, reproduction, or distribution of Your modifications, or\par
for any such Derivative Works as a whole, provided Your use,\par
reproduction, and distribution of the Work otherwise complies with\par
the conditions stated in this License.\par
\par
5. Submission of Contributions. Unless You explicitly state otherwise,\par
any Contribution intentionally submitted for inclusion in the Work\par
by You to the Licensor shall be under the terms and conditions of\par
this License, without any additional terms or conditions.\par
Notwithstanding the above, nothing herein shall supersede or modify\par
the terms of any separate license agreement you may have executed\par
with Licensor regarding such Contributions.\par
\par
6. Trademarks. This License does not grant permission to use the trade\par
names, trademarks, service marks, or product names of the Licensor,\par
except as required for reasonable and customary use in describing the\par
origin of the Work and reproducing the content of the NOTICE file.\par
\par
7. Disclaimer of Warranty. Unless required by applicable law or\par
agreed to in writing, Licensor provides the Work (and each\par
Contributor provides its Contributions) on an "AS IS" BASIS,\par
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\par
implied, including, without limitation, any warranties or conditions\par
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\par
PARTICULAR PURPOSE. You are solely responsible for determining the\par
appropriateness of using or redistributing the Work and assume any\par
risks associated with Your exercise of permissions under this License.\par
\par
8. Limitation of Liability. In no event and under no legal theory,\par
whether in tort (including negligence), contract, or otherwise,\par
unless required by applicable law (such as deliberate and grossly\par
negligent acts) or agreed to in writing, shall any Contributor be\par
liable to You for damages, including any direct, indirect, special,\par
incidental, or consequential damages of any character arising as a\par
result of this License or out of the use or inability to use the\par
Work (including but not limited to damages for loss of goodwill,\par
work stoppage, computer failure or malfunction, or any and all\par
other commercial damages or losses), even if such Contributor\par
has been advised of the possibility of such damages.\par
\par
9. Accepting Warranty or Additional Liability. While redistributing\par
the Work or Derivative Works thereof, You may choose to offer,\par
and charge a fee for, acceptance of support, warranty, indemnity,\par
or other liability obligations and/or rights consistent with this\par
License. However, in accepting such obligations, You may act only\par
on Your own behalf and on Your sole responsibility, not on behalf\par
of any other Contributor, and only if You agree to indemnify,\par
defend, and hold each Contributor harmless for any liability\par
incurred by, or claims asserted against, such Contributor by reason\par
of your accepting any such warranty or additional liability.\par
\par
END OF TERMS AND CONDITIONS\par
\par
APPENDIX: How to apply the Apache License to your work.\par
\par
To apply the Apache License to your work, attach the following\par
boilerplate notice, with the fields enclosed by brackets "[]"\par
replaced with your own identifying information. (Don't include\par
the brackets!) The text should be enclosed in the appropriate\par
comment syntax for the file format. We also recommend that a\par
file or class name and description of purpose be included on the\par
same "printed page" as the copyright notice for easier\par
identification within third-party archives.\par
\par
Copyright TF Tech NV\par
\par
Licensed under the Apache License, Version 2.0 (the "License");\par
you may not use this file except in compliance with the License.\par
You may obtain a copy of the License at\par
\par
{{\field{\*\fldinst{HYPERLINK http://www.apache.org/licenses/LICENSE-2.0 }}{\fldrslt{http://www.apache.org/licenses/LICENSE-2.0\ul0\cf0}}}}\f0\fs22\par
\par
Unless required by applicable law or agreed to in writing, software\par
distributed under the License is distributed on an "AS IS" BASIS,\par
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\par
See the License for the specific language governing permissions and\par
limitations under the License.\par
}

View File

@@ -0,0 +1,8 @@
<!--
This file contains the declaration of all the localizable strings.
-->
<WixLocalization xmlns="http://wixtoolset.org/schemas/v4/wxl" Culture="en-US">
<String Id="DowngradeError" Value="A newer version of Mycelium is already installed." />
</WixLocalization>

View File

@@ -0,0 +1,93 @@
<?xml version="1.0" encoding="UTF-8"?>
<Wix xmlns="http://wixtoolset.org/schemas/v4/wxs" xmlns:ui="http://wixtoolset.org/schemas/v4/wxs/ui">
<Package
Name="Mycelium"
Version="0.5.4"
Language="1033"
Manufacturer="ThreeFold Tech"
Scope="perMachine"
UpgradeCode="BAF27FEF-C391-4F8C-9CF3-2C4C4DBC7B57" >
<!--Embed the cabinet file into the installer-->
<Media Id="1" Cabinet="MyceliumCabinet" EmbedCab="yes" />
<!--Do not install when having more recent version installed already-->
<MajorUpgrade DowngradeErrorMessage="!(loc.DowngradeError)" />
<Feature Id="Main">
<ComponentGroupRef Id="MyceliumComponentGroup" />
<ComponentRef Id="ConfigFileComponent" />
</Feature>
<UI>
<ui:WixUI Id="WixUI_InstallDir" InstallDirectory="INSTALL_LOC"/>
</UI>
<WixVariable Id="WixUILicenseRtf" Value="installers\windows\wix\LICENSE.rtf"/>
</Package>
<Fragment>
<StandardDirectory Id="AppDataFolder">
<Directory Id="ThreeFoldAppDataFolder" Name="ThreeFold Tech" >
<Directory Id="MyceliumAppDataFolder" Name="Mycelium" />
</Directory>
</StandardDirectory>
<!-- We use our own install location instead of the default INSTALLFOLDER,
as this would result in C:\Program Files\ThreeFold Tech Mycelium instead of just C:\Program Files\Mycelium -->
<StandardDirectory Id="ProgramFiles64Folder">
<Directory Id="INSTALL_LOC" Name="Mycelium"/>
</StandardDirectory>
<ComponentGroup Id="MyceliumComponentGroup" Directory="INSTALL_LOC">
<Component
Id="MainExecutable"
Guid="7A0BF6C6-3DD6-4A10-9DAC-F36A276856EF"
Bitness="always64"
>
<File
Id="mycelium_exe"
Source="myceliumd\target\release\mycelium.exe"
KeyPath="yes"
Vital="yes" />
<ServiceInstall
Name="mycelium"
Description="Mycelium IPv6 overlay network service"
ErrorControl="normal"
Interactive="no"
Vital="yes"
/>
<!--Adds the INSTALLFOLDER to the PATH environment variable-->
<Environment
Id="MyceliumPath"
Name="PATH"
Value="[INSTALL_LOC]"
Permanent="no"
Part="last"
Action="set"
System="yes"
/>
</Component>
<Component
Id="WintunLibrary"
Guid="5DA22A56-9C44-4A1E-89CC-19E7A52F5E8B"
Bitness="always64"
Directory="System64Folder"
>
<File
Id="wintun_dll"
Source="myceliumd\wintun.dll"
KeyPath="yes"
Vital="yes" />
</Component>
</ComponentGroup>
<Component Id="ConfigFileComponent" Guid="*" Directory="MyceliumAppDataFolder">
<File
Id="ConfigFile"
Name="mycelium.toml"
Source="config_example.toml"
KeyPath="yes" />
</Component>
</Fragment>
</Wix>

1
components/mycelium/mobile/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
target/

3426
components/mycelium/mobile/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,26 @@
[package]
name = "mobile"
version = "0.1.0"
edition = "2021"
[features]
mactunfd = ["mycelium/mactunfd"]
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
mycelium = { path = "../mycelium", features = ["vendored-openssl"] }
tokio = { version = "1.44.0", features = ["signal", "rt-multi-thread"] }
thiserror = "2.0.12"
tracing = { version = "0.1.41", features = ["release_max_level_debug"] }
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
once_cell = "1.21.1"
[target.'cfg(target_os = "android")'.dependencies]
tracing-android = "0.2.0"
[target.'cfg(target_os = "ios")'.dependencies]
tracing-oslog = "0.2.0"
[target.'cfg(target_os = "macos")'.dependencies]
tracing-oslog = "0.2.0"

View File

@@ -0,0 +1,3 @@
# mobile crate
This crate will be called from Dart/Flutter, Kotlin(Android), or Swift(iOS)

View File

@@ -0,0 +1,284 @@
use std::convert::TryFrom;
use std::io;
use tracing::{error, info};
use metrics::Metrics;
use mycelium::endpoint::Endpoint;
use mycelium::{crypto, metrics, Config, Node};
use once_cell::sync::Lazy;
use tokio::sync::{mpsc, Mutex};
use tokio::time::{sleep, timeout, Duration};
const CHANNEL_MSG_OK: &str = "ok";
const CHANNEL_TIMEOUT: u64 = 2;
#[cfg(target_os = "android")]
fn setup_logging() {
use tracing::level_filters::LevelFilter;
use tracing_subscriber::filter::Targets;
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
let targets = Targets::new()
.with_default(LevelFilter::INFO)
.with_target("mycelium::router", LevelFilter::WARN);
tracing_subscriber::registry()
.with(tracing_android::layer("mycelium").expect("failed to setup logger"))
.with(targets)
.init();
}
#[cfg(any(target_os = "ios", target_os = "macos"))]
fn setup_logging() {
use tracing::level_filters::LevelFilter;
use tracing_oslog::OsLogger;
use tracing_subscriber::filter::Targets;
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
let targets = Targets::new()
.with_default(LevelFilter::INFO)
.with_target("mycelium::router", LevelFilter::WARN);
tracing_subscriber::registry()
.with(OsLogger::new("mycelium", "default"))
.with(targets)
.init();
}
#[cfg(any(target_os = "android", target_os = "ios", target_os = "macos"))]
static INIT_LOG: Lazy<()> = Lazy::new(|| {
setup_logging();
});
#[cfg(any(target_os = "android", target_os = "ios", target_os = "macos"))]
fn setup_logging_once() {
// Accessing the Lazy value will ensure setup_logging is called exactly once
let _ = &*INIT_LOG;
}
// Declare the channel globally so we can use it on the start & stop mycelium functions
type CommandChannelType = (Mutex<mpsc::Sender<Cmd>>, Mutex<mpsc::Receiver<Cmd>>);
static COMMAND_CHANNEL: Lazy<CommandChannelType> = Lazy::new(|| {
let (tx_cmd, rx_cmd) = mpsc::channel::<Cmd>(1);
(Mutex::new(tx_cmd), Mutex::new(rx_cmd))
});
type ResponseChannelType = (
Mutex<mpsc::Sender<Response>>,
Mutex<mpsc::Receiver<Response>>,
);
static RESPONSE_CHANNEL: Lazy<ResponseChannelType> = Lazy::new(|| {
let (tx_resp, rx_resp) = mpsc::channel::<Response>(1);
(Mutex::new(tx_resp), Mutex::new(rx_resp))
});
#[tokio::main]
#[allow(unused_variables)] // because tun_fd is only used in android and ios
pub async fn start_mycelium(peers: Vec<String>, tun_fd: i32, priv_key: Vec<u8>) {
#[cfg(any(target_os = "android", target_os = "ios", target_os = "macos"))]
setup_logging_once();
info!("starting mycelium");
let endpoints: Vec<Endpoint> = peers
.into_iter()
.filter_map(|peer| peer.parse().ok())
.collect();
let secret_key = build_secret_key(priv_key).await.unwrap();
let config = Config {
node_key: secret_key,
peers: endpoints,
no_tun: false,
tcp_listen_port: DEFAULT_TCP_LISTEN_PORT,
quic_listen_port: None,
peer_discovery_port: None, // disable multicast discovery
#[cfg(any(
target_os = "linux",
all(target_os = "macos", not(feature = "mactunfd")),
target_os = "windows"
))]
tun_name: "tun0".to_string(),
metrics: NoMetrics,
private_network_config: None,
firewall_mark: None,
#[cfg(any(
target_os = "android",
target_os = "ios",
all(target_os = "macos", feature = "mactunfd"),
))]
tun_fd: Some(tun_fd),
update_workers: 1,
cdn_cache: None,
};
let _node = match Node::new(config).await {
Ok(node) => {
info!("node successfully created");
node
}
Err(err) => {
error!("failed to create mycelium node: {err}");
return;
}
};
let mut rx = COMMAND_CHANNEL.1.lock().await;
loop {
tokio::select! {
_ = tokio::signal::ctrl_c() => {
info!("Received SIGINT, stopping mycelium node");
break;
}
cmd = rx.recv() => {
match cmd.unwrap().cmd {
CmdType::Stop => {
info!("Received stop command, stopping mycelium node");
send_response(vec![CHANNEL_MSG_OK.to_string()]).await;
break;
}
CmdType::Status => {
let mut vec: Vec<String> = Vec::new();
for info in _node.peer_info() {
vec.push(info.endpoint.proto().to_string() + ","+ info.endpoint.address().to_string().as_str()+","+ &info.connection_state.to_string());
}
send_response(vec).await;
}
}
}
}
}
info!("mycelium stopped");
}
struct Cmd {
cmd: CmdType,
}
enum CmdType {
Stop,
Status,
}
struct Response {
response: Vec<String>,
}
// stop_mycelium returns string with the status of the command
#[tokio::main]
pub async fn stop_mycelium() -> String {
if let Err(e) = send_command(CmdType::Stop).await {
return e.to_string();
}
match recv_response().await {
Ok(_) => CHANNEL_MSG_OK.to_string(),
Err(e) => e.to_string(),
}
}
// get_peer_status returns vector of string
// first element is always the status of the command (ok or error)
// next elements are the peer status
#[tokio::main]
pub async fn get_peer_status() -> Vec<String> {
if let Err(e) = send_command(CmdType::Status).await {
return vec![e.to_string()];
}
match recv_response().await {
Ok(mut resp) => {
resp.insert(0, CHANNEL_MSG_OK.to_string());
resp
}
Err(e) => vec![e.to_string()],
}
}
#[tokio::main]
pub async fn get_status() -> Result<String, NodeError> {
Err(NodeError::NodeDead)
}
use thiserror::Error;
#[derive(Error, Debug)]
pub enum NodeError {
#[error("err_node_dead")]
NodeDead,
#[error("err_node_timeout")]
NodeTimeout,
}
async fn send_command(cmd_type: CmdType) -> Result<(), NodeError> {
let tx = COMMAND_CHANNEL.0.lock().await;
tokio::select! {
_ = sleep(Duration::from_secs(CHANNEL_TIMEOUT)) => {
Err(NodeError::NodeTimeout)
}
result = tx.send(Cmd { cmd: cmd_type }) => {
match result {
Ok(_) => Ok(()),
Err(_) => Err(NodeError::NodeDead)
}
}
}
}
async fn send_response(resp: Vec<String>) {
let tx = RESPONSE_CHANNEL.0.lock().await;
tokio::select! {
_ = sleep(Duration::from_secs(CHANNEL_TIMEOUT)) => {
error!("send_response timeout");
}
result = tx.send(Response { response: resp }) => {
match result {
Ok(_) => {},
Err(_) =>{error!("send_response failed");},
}
}
}
}
async fn recv_response() -> Result<Vec<String>, NodeError> {
let mut rx = RESPONSE_CHANNEL.1.lock().await;
let duration = Duration::from_secs(CHANNEL_TIMEOUT);
match timeout(duration, rx.recv()).await {
Ok(result) => match result {
Some(resp) => Ok(resp.response),
None => Err(NodeError::NodeDead),
},
Err(_) => Err(NodeError::NodeTimeout),
}
}
#[derive(Clone)]
pub struct NoMetrics;
impl Metrics for NoMetrics {}
/// The default port on the underlay to listen on for incoming TCP connections.
const DEFAULT_TCP_LISTEN_PORT: u16 = 9651;
fn convert_slice_to_array32(slice: &[u8]) -> Result<[u8; 32], std::array::TryFromSliceError> {
<[u8; 32]>::try_from(slice)
}
async fn build_secret_key<T>(bin: Vec<u8>) -> Result<T, io::Error>
where
T: From<[u8; 32]>,
{
Ok(T::from(convert_slice_to_array32(bin.as_slice()).unwrap()))
}
/// generate secret key
/// it is used by android & ios app
pub fn generate_secret_key() -> Vec<u8> {
crypto::SecretKey::new().as_bytes().into()
}
/// generate node_address from secret key
pub fn address_from_secret_key(data: Vec<u8>) -> String {
let data = <[u8; 32]>::try_from(data.as_slice()).unwrap();
let secret_key = crypto::SecretKey::from(data);
crypto::PublicKey::from(&secret_key).address().to_string()
}

View File

@@ -0,0 +1,37 @@
[package]
name = "mycelium-api"
version = "0.6.1"
edition = "2021"
license-file = "../LICENSE"
readme = "../README.md"
[features]
message = ["mycelium/message"]
[dependencies]
axum = { version = "0.8.4", default-features = false, features = [
"http1",
"http2",
"json",
"query",
"tokio",
] }
base64 = "0.22.1"
jsonrpsee = { version = "0.25.1", features = [
"server",
"macros",
"jsonrpsee-types",
] }
serde_json = "1.0.140"
tracing = "0.1.41"
tokio = { version = "1.46.1", default-features = false, features = [
"net",
"rt",
] }
mycelium = { path = "../mycelium" }
mycelium-metrics = { path = "../mycelium-metrics", features = ["prometheus"] }
serde = { version = "1.0.219", features = ["derive"] }
async-trait = "0.1.88"
[dev-dependencies]
serde_json = "1.0.140"

View File

@@ -0,0 +1,492 @@
use core::fmt;
use std::{net::IpAddr, net::SocketAddr, str::FromStr, sync::Arc};
use axum::{
extract::{Path, State},
http::StatusCode,
routing::{delete, get},
Json, Router,
};
use serde::{de, Deserialize, Deserializer, Serialize};
use tokio::{sync::Mutex, time::Instant};
use tracing::{debug, error};
use mycelium::{
crypto::PublicKey,
endpoint::Endpoint,
metrics::Metrics,
peer_manager::{PeerExists, PeerNotFound, PeerStats},
};
const INFINITE_STR: &str = "infinite";
#[cfg(feature = "message")]
mod message;
#[cfg(feature = "message")]
pub use message::{MessageDestination, MessageReceiveInfo, MessageSendInfo, PushMessageResponse};
pub use rpc::JsonRpc;
// JSON-RPC API implementation
pub mod rpc;
/// Http API server handle. The server is spawned in a background task. If this handle is dropped,
/// the server is terminated.
pub struct Http {
/// Channel to send cancellation to the http api server. We just keep a reference to it since
/// dropping it will also cancel the receiver and thus the server.
_cancel_tx: tokio::sync::oneshot::Sender<()>,
}
#[derive(Clone)]
/// Shared state accessible in HTTP endpoint handlers.
pub struct ServerState<M> {
/// Access to the (`node`)(mycelium::Node) state.
pub node: Arc<Mutex<mycelium::Node<M>>>,
}
impl Http {
/// Spawns a new HTTP API server on the provided listening address.
pub fn spawn<M>(node: Arc<Mutex<mycelium::Node<M>>>, listen_addr: SocketAddr) -> Self
where
M: Metrics + Clone + Send + Sync + 'static,
{
let server_state = ServerState { node };
let admin_routes = Router::new()
.route("/admin", get(get_info))
.route("/admin/peers", get(get_peers).post(add_peer))
.route("/admin/peers/{endpoint}", delete(delete_peer))
.route("/admin/routes/selected", get(get_selected_routes))
.route("/admin/routes/fallback", get(get_fallback_routes))
.route("/admin/routes/queried", get(get_queried_routes))
.route("/admin/routes/no_route", get(get_no_route_entries))
.route("/pubkey/{ip}", get(get_pubk_from_ip))
.with_state(server_state.clone());
let app = Router::new().nest("/api/v1", admin_routes);
#[cfg(feature = "message")]
let app = app.nest("/api/v1", message::message_router_v1(server_state));
let (_cancel_tx, cancel_rx) = tokio::sync::oneshot::channel();
tokio::spawn(async move {
let listener = match tokio::net::TcpListener::bind(listen_addr).await {
Ok(listener) => listener,
Err(e) => {
error!(err=%e, "Failed to bind listener for Http Api server");
error!("API disabled");
return;
}
};
let server =
axum::serve(listener, app.into_make_service()).with_graceful_shutdown(async {
cancel_rx.await.ok();
});
if let Err(e) = server.await {
error!(err=%e, "Http API server error");
}
});
Http { _cancel_tx }
}
}
/// Get the stats of the current known peers
async fn get_peers<M>(State(state): State<ServerState<M>>) -> Json<Vec<PeerStats>>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Fetching peer stats");
Json(state.node.lock().await.peer_info())
}
/// Payload of an add_peer request
#[derive(Deserialize, Serialize)]
pub struct AddPeer {
/// The endpoint used to connect to the peer
pub endpoint: String,
}
/// Add a new peer to the system
async fn add_peer<M>(
State(state): State<ServerState<M>>,
Json(payload): Json<AddPeer>,
) -> Result<StatusCode, (StatusCode, String)>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!(
peer.endpoint = payload.endpoint,
"Attempting to add peer to the system"
);
let endpoint = match Endpoint::from_str(&payload.endpoint) {
Ok(endpoint) => endpoint,
Err(e) => return Err((StatusCode::BAD_REQUEST, e.to_string())),
};
match state.node.lock().await.add_peer(endpoint) {
Ok(()) => Ok(StatusCode::NO_CONTENT),
Err(PeerExists) => Err((
StatusCode::CONFLICT,
"A peer identified by that endpoint already exists".to_string(),
)),
}
}
/// remove an existing peer from the system
async fn delete_peer<M>(
State(state): State<ServerState<M>>,
Path(endpoint): Path<String>,
) -> Result<StatusCode, (StatusCode, String)>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!(peer.endpoint=%endpoint, "Attempting to remove peer from the system");
let endpoint = match Endpoint::from_str(&endpoint) {
Ok(endpoint) => endpoint,
Err(e) => return Err((StatusCode::BAD_REQUEST, e.to_string())),
};
match state.node.lock().await.remove_peer(endpoint) {
Ok(()) => Ok(StatusCode::NO_CONTENT),
Err(PeerNotFound) => Err((
StatusCode::NOT_FOUND,
"A peer identified by that endpoint does not exist".to_string(),
)),
}
}
/// Alias to a [`Metric`](crate::metric::Metric) for serialization in the API.
#[derive(Clone, Debug, PartialEq, PartialOrd, Eq, Ord)]
pub enum Metric {
/// Finite metric
Value(u16),
/// Infinite metric
Infinite,
}
/// Info about a route. This uses base types only to avoid having to introduce too many Serialize
/// bounds in the core types.
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, PartialOrd, Eq, Ord)]
#[serde(rename_all = "camelCase")]
pub struct Route {
/// We convert the [`subnet`](Subnet) to a string to avoid introducing a bound on the actual
/// type.
pub subnet: String,
/// Next hop of the route, in the underlay.
pub next_hop: String,
/// Computed metric of the route.
pub metric: Metric,
/// Sequence number of the route.
pub seqno: u16,
}
/// List all currently selected routes.
async fn get_selected_routes<M>(State(state): State<ServerState<M>>) -> Json<Vec<Route>>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Loading selected routes");
let routes = state
.node
.lock()
.await
.selected_routes()
.into_iter()
.map(|sr| Route {
subnet: sr.source().subnet().to_string(),
next_hop: sr.neighbour().connection_identifier().clone(),
metric: if sr.metric().is_infinite() {
Metric::Infinite
} else {
Metric::Value(sr.metric().into())
},
seqno: sr.seqno().into(),
})
.collect();
Json(routes)
}
/// List all active fallback routes.
async fn get_fallback_routes<M>(State(state): State<ServerState<M>>) -> Json<Vec<Route>>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Loading fallback routes");
let routes = state
.node
.lock()
.await
.fallback_routes()
.into_iter()
.map(|sr| Route {
subnet: sr.source().subnet().to_string(),
next_hop: sr.neighbour().connection_identifier().clone(),
metric: if sr.metric().is_infinite() {
Metric::Infinite
} else {
Metric::Value(sr.metric().into())
},
seqno: sr.seqno().into(),
})
.collect();
Json(routes)
}
/// Info about a queried subnet. This uses base types only to avoid having to introduce too
/// many Serialize bounds in the core types.
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, PartialOrd, Eq, Ord)]
#[serde(rename_all = "camelCase")]
pub struct QueriedSubnet {
/// We convert the [`subnet`](Subnet) to a string to avoid introducing a bound on the actual
/// type.
pub subnet: String,
/// The amount of time left before the query expires.
pub expiration: String,
}
async fn get_queried_routes<M>(State(state): State<ServerState<M>>) -> Json<Vec<QueriedSubnet>>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Loading queried subnets");
let queries = state
.node
.lock()
.await
.queried_subnets()
.into_iter()
.map(|qs| QueriedSubnet {
subnet: qs.subnet().to_string(),
expiration: qs
.query_expires()
.duration_since(Instant::now())
.as_secs()
.to_string(),
})
.collect();
Json(queries)
}
/// Info about a subnet with no route. This uses base types only to avoid having to introduce too
/// many Serialize bounds in the core types.
#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, PartialOrd, Eq, Ord)]
#[serde(rename_all = "camelCase")]
pub struct NoRouteSubnet {
/// We convert the [`subnet`](Subnet) to a string to avoid introducing a bound on the actual
/// type.
pub subnet: String,
/// The amount of time left before the query expires.
pub expiration: String,
}
async fn get_no_route_entries<M>(State(state): State<ServerState<M>>) -> Json<Vec<NoRouteSubnet>>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Loading queried subnets");
let queries = state
.node
.lock()
.await
.no_route_entries()
.into_iter()
.map(|nrs| NoRouteSubnet {
subnet: nrs.subnet().to_string(),
expiration: nrs
.entry_expires()
.duration_since(Instant::now())
.as_secs()
.to_string(),
})
.collect();
Json(queries)
}
/// General info about a node.
#[derive(Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Info {
/// The overlay subnet in use by the node.
pub node_subnet: String,
/// The public key of the node
pub node_pubkey: PublicKey,
}
/// Get general info about the node.
async fn get_info<M>(State(state): State<ServerState<M>>) -> Json<Info>
where
M: Metrics + Clone + Send + Sync + 'static,
{
let info = state.node.lock().await.info();
Json(Info {
node_subnet: info.node_subnet.to_string(),
node_pubkey: info.node_pubkey,
})
}
/// Public key from a node.
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
pub struct PubKey {
/// The public key from the node
pub public_key: PublicKey,
}
/// Get public key from IP.
async fn get_pubk_from_ip<M>(
State(state): State<ServerState<M>>,
Path(ip): Path<IpAddr>,
) -> Result<Json<PubKey>, StatusCode>
where
M: Metrics + Clone + Send + Sync + 'static,
{
match state.node.lock().await.get_pubkey_from_ip(ip) {
Some(pubkey) => Ok(Json(PubKey { public_key: pubkey })),
None => Err(StatusCode::NOT_FOUND),
}
}
impl Serialize for Metric {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
match self {
Self::Infinite => serializer.serialize_str(INFINITE_STR),
Self::Value(v) => serializer.serialize_u16(*v),
}
}
}
impl<'de> Deserialize<'de> for Metric {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct MetricVisitor;
impl serde::de::Visitor<'_> for MetricVisitor {
type Value = Metric;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("a string or a u16")
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
match value {
INFINITE_STR => Ok(Metric::Infinite),
_ => Err(serde::de::Error::invalid_value(
serde::de::Unexpected::Str(value),
&format!("expected '{INFINITE_STR}'").as_str(),
)),
}
}
fn visit_u64<E>(self, value: u64) -> Result<Self::Value, E>
where
E: de::Error,
{
if value <= u16::MAX as u64 {
Ok(Metric::Value(value as u16))
} else {
Err(E::invalid_value(
de::Unexpected::Unsigned(value),
&"expected a non-negative integer within the range of u16",
))
}
}
}
deserializer.deserialize_any(MetricVisitor)
}
}
impl fmt::Display for Metric {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Value(val) => write!(f, "{val}"),
Self::Infinite => write!(f, "{INFINITE_STR}"),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json::json;
#[test]
fn finite_metric_serialization() {
let metric = super::Metric::Value(10);
let s = serde_json::to_string(&metric).expect("can encode finite metric");
assert_eq!("10", s);
}
#[test]
fn infinite_metric_serialization() {
let metric = super::Metric::Infinite;
let s = serde_json::to_string(&metric).expect("can encode infinite metric");
assert_eq!(format!("\"{INFINITE_STR}\""), s);
}
#[test]
fn test_deserialize_metric() {
// Test deserialization of a Metric::Value
let json_value = json!(20);
let metric: Metric = serde_json::from_value(json_value).unwrap();
assert_eq!(metric, Metric::Value(20));
// Test deserialization of a Metric::Infinite
let json_infinite = json!(INFINITE_STR);
let metric: Metric = serde_json::from_value(json_infinite).unwrap();
assert_eq!(metric, Metric::Infinite);
// Test deserialization of an invalid metric
let json_invalid = json!("invalid");
let result: Result<Metric, _> = serde_json::from_value(json_invalid);
assert!(result.is_err());
}
#[test]
fn test_deserialize_route() {
let json_data = r#"
[
{"subnet":"406:1d77:2438:aa7c::/64","nextHop":"TCP [2a02:1811:d584:7400:c503:ff39:de03:9e44]:45694 <-> [2a01:4f8:212:fa6::2]:9651","metric":20,"seqno":0},
{"subnet":"407:8458:dbf5:4ed7::/64","nextHop":"TCP [2a02:1811:d584:7400:c503:ff39:de03:9e44]:45694 <-> [2a01:4f8:212:fa6::2]:9651","metric":174,"seqno":0},
{"subnet":"408:7ba3:3a4d:808a::/64","nextHop":"TCP [2a02:1811:d584:7400:c503:ff39:de03:9e44]:45694 <-> [2a01:4f8:212:fa6::2]:9651","metric":"infinite","seqno":0}
]
"#;
let routes: Vec<Route> = serde_json::from_str(json_data).unwrap();
assert_eq!(routes[0], Route {
subnet: "406:1d77:2438:aa7c::/64".to_string(),
next_hop: "TCP [2a02:1811:d584:7400:c503:ff39:de03:9e44]:45694 <-> [2a01:4f8:212:fa6::2]:9651".to_string(),
metric: Metric::Value(20),
seqno: 0
});
assert_eq!(routes[1], Route {
subnet: "407:8458:dbf5:4ed7::/64".to_string(),
next_hop: "TCP [2a02:1811:d584:7400:c503:ff39:de03:9e44]:45694 <-> [2a01:4f8:212:fa6::2]:9651".to_string(),
metric: Metric::Value(174),
seqno: 0
});
assert_eq!(routes[2], Route {
subnet: "408:7ba3:3a4d:808a::/64".to_string(),
next_hop: "TCP [2a02:1811:d584:7400:c503:ff39:de03:9e44]:45694 <-> [2a01:4f8:212:fa6::2]:9651".to_string(),
metric: Metric::Infinite,
seqno: 0
});
}
}

View File

@@ -0,0 +1,652 @@
use std::{net::IpAddr, ops::Deref, time::Duration};
use axum::{
extract::{Path, Query, State},
http::StatusCode,
routing::{delete, get, post},
Json, Router,
};
use serde::{Deserialize, Serialize};
use tracing::debug;
use mycelium::{
crypto::PublicKey,
message::{MessageId, MessageInfo},
metrics::Metrics,
subnet::Subnet,
};
use std::path::PathBuf;
use super::ServerState;
/// Default amount of time to try and send a message if it is not explicitly specified.
const DEFAULT_MESSAGE_TRY_DURATION: Duration = Duration::from_secs(60 * 5);
/// Return a router which has message endpoints and their handlers mounted.
pub fn message_router_v1<M>(server_state: ServerState<M>) -> Router
where
M: Metrics + Clone + Send + Sync + 'static,
{
Router::new()
.route("/messages", get(get_message).post(push_message))
.route("/messages/status/{id}", get(message_status))
.route("/messages/reply/{id}", post(reply_message))
// Topic configuration endpoints
.route(
"/messages/topics/default",
get(get_default_topic_action).put(set_default_topic_action),
)
.route("/messages/topics", get(get_topics).post(add_topic))
.route("/messages/topics/{topic}", delete(remove_topic))
.route(
"/messages/topics/{topic}/sources",
get(get_topic_sources).post(add_topic_source),
)
.route(
"/messages/topics/{topic}/sources/{subnet}",
delete(remove_topic_source),
)
.route(
"/messages/topics/{topic}/forward",
get(get_topic_forward_socket)
.put(set_topic_forward_socket)
.delete(remove_topic_forward_socket),
)
.with_state(server_state)
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct MessageSendInfo {
pub dst: MessageDestination,
#[serde(default)]
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(with = "base64::optional_binary")]
pub topic: Option<Vec<u8>>,
#[serde(with = "base64::binary")]
pub payload: Vec<u8>,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum MessageDestination {
Ip(IpAddr),
Pk(PublicKey),
}
#[derive(Clone, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct MessageReceiveInfo {
pub id: MessageId,
pub src_ip: IpAddr,
pub src_pk: PublicKey,
pub dst_ip: IpAddr,
pub dst_pk: PublicKey,
#[serde(default)]
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(with = "base64::optional_binary")]
pub topic: Option<Vec<u8>>,
#[serde(with = "base64::binary")]
pub payload: Vec<u8>,
}
impl MessageDestination {
/// Get the IP address of the destination.
fn ip(self) -> IpAddr {
match self {
MessageDestination::Ip(ip) => ip,
MessageDestination::Pk(pk) => IpAddr::V6(pk.address()),
}
}
}
#[derive(Deserialize)]
struct GetMessageQuery {
peek: Option<bool>,
timeout: Option<u64>,
/// Optional filter for start of the message, base64 encoded.
#[serde(default)]
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(with = "base64::optional_binary")]
topic: Option<Vec<u8>>,
}
impl GetMessageQuery {
/// Did the query indicate we should peek the message instead of pop?
fn peek(&self) -> bool {
matches!(self.peek, Some(true))
}
/// Amount of seconds to hold and try and get values.
fn timeout_secs(&self) -> u64 {
self.timeout.unwrap_or(0)
}
}
async fn get_message<M>(
State(state): State<ServerState<M>>,
Query(query): Query<GetMessageQuery>,
) -> Result<Json<MessageReceiveInfo>, StatusCode>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!(
"Attempt to get message, peek {}, timeout {} seconds",
query.peek(),
query.timeout_secs()
);
// A timeout of 0 seconds essentially means get a message if there is one, and return
// immediatly if there isn't. This is the result of the implementation of Timeout, which does a
// poll of the internal future first, before polling the delay.
tokio::time::timeout(
Duration::from_secs(query.timeout_secs()),
state
.node
.lock()
.await
.get_message(!query.peek(), query.topic),
)
.await
.or(Err(StatusCode::NO_CONTENT))
.map(|m| {
Json(MessageReceiveInfo {
id: m.id,
src_ip: m.src_ip,
src_pk: m.src_pk,
dst_ip: m.dst_ip,
dst_pk: m.dst_pk,
topic: if m.topic.is_empty() {
None
} else {
Some(m.topic)
},
payload: m.data,
})
})
}
#[derive(Clone, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct MessageIdReply {
pub id: MessageId,
}
#[derive(Clone, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
#[serde(untagged)]
pub enum PushMessageResponse {
Reply(MessageReceiveInfo),
Id(MessageIdReply),
}
#[derive(Clone, Deserialize)]
struct PushMessageQuery {
reply_timeout: Option<u64>,
}
impl PushMessageQuery {
/// The user requested to wait for the reply or not.
fn await_reply(&self) -> bool {
self.reply_timeout.is_some()
}
/// Amount of seconds to wait for the reply.
fn timeout(&self) -> u64 {
self.reply_timeout.unwrap_or(0)
}
}
async fn push_message<M>(
State(state): State<ServerState<M>>,
Query(query): Query<PushMessageQuery>,
Json(message_info): Json<MessageSendInfo>,
) -> Result<(StatusCode, Json<PushMessageResponse>), StatusCode>
where
M: Metrics + Clone + Send + Sync + 'static,
{
let dst = message_info.dst.ip();
debug!(
message.dst=%dst,
message.len=message_info.payload.len(),
"Pushing new message to stack",
);
let (id, sub) = match state.node.lock().await.push_message(
dst,
message_info.payload,
message_info.topic,
DEFAULT_MESSAGE_TRY_DURATION,
query.await_reply(),
) {
Ok((id, sub)) => (id, sub),
Err(_) => {
return Err(StatusCode::BAD_REQUEST);
}
};
if !query.await_reply() {
// If we don't wait for the reply just return here.
return Ok((
StatusCode::CREATED,
Json(PushMessageResponse::Id(MessageIdReply { id })),
));
}
let mut sub = sub.unwrap();
tokio::select! {
sub_res = sub.changed() => {
match sub_res {
Ok(_) => {
if let Some(m) = sub.borrow().deref() {
Ok((StatusCode::OK, Json(PushMessageResponse::Reply(MessageReceiveInfo {
id: m.id,
src_ip: m.src_ip,
src_pk: m.src_pk,
dst_ip: m.dst_ip,
dst_pk: m.dst_pk,
topic: if m.topic.is_empty() { None } else { Some(m.topic.clone()) },
payload: m.data.clone(),
}))))
} else {
// This happens if a none value is send, which should not happen.
Err(StatusCode::INTERNAL_SERVER_ERROR)
}
}
Err(_) => {
// This happens if the sender drops, which should not happen.
Err(StatusCode::INTERNAL_SERVER_ERROR)
}
}
},
_ = tokio::time::sleep(Duration::from_secs(query.timeout())) => {
// Timeout expired while waiting for reply
Ok((StatusCode::REQUEST_TIMEOUT, Json(PushMessageResponse::Id(MessageIdReply { id }))))
}
}
}
async fn reply_message<M>(
State(state): State<ServerState<M>>,
Path(id): Path<MessageId>,
Json(message_info): Json<MessageSendInfo>,
) -> StatusCode
where
M: Metrics + Clone + Send + Sync + 'static,
{
let dst = message_info.dst.ip();
debug!(
message.id=id.as_hex(),
message.dst=%dst,
message.len=message_info.payload.len(),
"Pushing new reply to message stack",
);
state.node.lock().await.reply_message(
id,
dst,
message_info.payload,
DEFAULT_MESSAGE_TRY_DURATION,
);
StatusCode::NO_CONTENT
}
async fn message_status<M>(
State(state): State<ServerState<M>>,
Path(id): Path<MessageId>,
) -> Result<Json<MessageInfo>, StatusCode>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!(message.id=%id.as_hex(), "Fetching message status");
state
.node
.lock()
.await
.message_status(id)
.ok_or(StatusCode::NOT_FOUND)
.map(Json)
}
/// Module to implement base64 decoding and encoding
/// Sourced from https://users.rust-lang.org/t/serialize-a-vec-u8-to-json-as-base64/57781, with some
/// addaptions to work with the new version of the base64 crate
mod base64 {
use base64::engine::{GeneralPurpose, GeneralPurposeConfig};
use base64::{alphabet, Engine};
const B64ENGINE: GeneralPurpose = base64::engine::general_purpose::GeneralPurpose::new(
&alphabet::STANDARD,
GeneralPurposeConfig::new(),
);
pub fn encode(input: &[u8]) -> String {
B64ENGINE.encode(input)
}
pub fn decode(input: &[u8]) -> Result<Vec<u8>, base64::DecodeError> {
B64ENGINE.decode(input)
}
pub mod binary {
use super::B64ENGINE;
use base64::Engine;
use serde::{Deserialize, Serialize};
use serde::{Deserializer, Serializer};
pub fn serialize<S: Serializer>(v: &Vec<u8>, s: S) -> Result<S::Ok, S::Error> {
let base64 = B64ENGINE.encode(v);
String::serialize(&base64, s)
}
pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result<Vec<u8>, D::Error> {
let base64 = String::deserialize(d)?;
B64ENGINE
.decode(base64.as_bytes())
.map_err(serde::de::Error::custom)
}
}
pub mod optional_binary {
use super::B64ENGINE;
use base64::Engine;
use serde::{Deserialize, Serialize};
use serde::{Deserializer, Serializer};
pub fn serialize<S: Serializer>(v: &Option<Vec<u8>>, s: S) -> Result<S::Ok, S::Error> {
if let Some(v) = v {
let base64 = B64ENGINE.encode(v);
String::serialize(&base64, s)
} else {
<Option<String>>::serialize(&None, s)
}
}
pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result<Option<Vec<u8>>, D::Error> {
if let Some(base64) = <Option<String>>::deserialize(d)? {
B64ENGINE
.decode(base64.as_bytes())
.map_err(serde::de::Error::custom)
.map(Option::Some)
} else {
Ok(None)
}
}
}
}
// Topic configuration API
/// Response for the default topic action
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct DefaultTopicActionResponse {
accept: bool,
}
/// Request to set the default topic action
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct DefaultTopicActionRequest {
accept: bool,
}
/// Request to add a source to a topic whitelist
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct TopicSourceRequest {
subnet: String,
}
/// Request to set a forward socket for a topic
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct TopicForwardSocketRequest {
socket_path: String,
}
/// Get the default topic action (accept or reject)
async fn get_default_topic_action<M>(
State(state): State<ServerState<M>>,
) -> Json<DefaultTopicActionResponse>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Getting default topic action");
let accept = state.node.lock().await.unconfigure_topic_action();
Json(DefaultTopicActionResponse { accept })
}
/// Set the default topic action (accept or reject)
async fn set_default_topic_action<M>(
State(state): State<ServerState<M>>,
Json(request): Json<DefaultTopicActionRequest>,
) -> StatusCode
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!(accept=%request.accept, "Setting default topic action");
state
.node
.lock()
.await
.accept_unconfigured_topic(request.accept);
StatusCode::NO_CONTENT
}
/// Get all whitelisted topics
async fn get_topics<M>(State(state): State<ServerState<M>>) -> Json<Vec<String>>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Getting all whitelisted topics");
let node = state.node.lock().await;
// Get the whitelist from the node
let topics = node.topics();
// Convert to TopicInfo structs
let topics: Vec<String> = topics.iter().map(|topic| base64::encode(topic)).collect();
Json(topics)
}
/// Add a topic to the whitelist
async fn add_topic<M>(
State(state): State<ServerState<M>>,
Json(topic_info): Json<Vec<u8>>,
) -> StatusCode
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Adding topic to whitelist");
state.node.lock().await.add_topic_whitelist(topic_info);
StatusCode::CREATED
}
/// Remove a topic from the whitelist
async fn remove_topic<M>(
State(state): State<ServerState<M>>,
Path(topic): Path<String>,
) -> Result<StatusCode, StatusCode>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Removing topic from whitelist");
// Decode the base64 topic
let topic_bytes = match base64::decode(topic.as_bytes()) {
Ok(bytes) => bytes,
Err(_) => return Err(StatusCode::BAD_REQUEST),
};
state.node.lock().await.remove_topic_whitelist(topic_bytes);
Ok(StatusCode::NO_CONTENT)
}
/// Get all sources for a topic
async fn get_topic_sources<M>(
State(state): State<ServerState<M>>,
Path(topic): Path<String>,
) -> Result<Json<Vec<String>>, StatusCode>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Getting sources for topic");
// Decode the base64 topic
let topic_bytes = match base64::decode(topic.as_bytes()) {
Ok(bytes) => bytes,
Err(_) => return Err(StatusCode::BAD_REQUEST),
};
let node = state.node.lock().await;
// Get the whitelist from the node
let sources = node.topic_allowed_sources(&topic_bytes);
// Find the topic in the whitelist
if let Some(sources) = sources {
let sources = sources.into_iter().map(|s| s.to_string()).collect();
Ok(Json(sources))
} else {
Err(StatusCode::NOT_FOUND)
}
}
/// Add a source to a topic whitelist
async fn add_topic_source<M>(
State(state): State<ServerState<M>>,
Path(topic): Path<String>,
Json(request): Json<TopicSourceRequest>,
) -> Result<StatusCode, StatusCode>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Adding source to topic whitelist");
// Decode the base64 topic
let topic_bytes = match base64::decode(topic.as_bytes()) {
Ok(bytes) => bytes,
Err(_) => return Err(StatusCode::BAD_REQUEST),
};
// Parse the subnet
let subnet = match request.subnet.parse::<Subnet>() {
Ok(subnet) => subnet,
Err(_) => return Err(StatusCode::BAD_REQUEST),
};
state
.node
.lock()
.await
.add_topic_whitelist_src(topic_bytes, subnet);
Ok(StatusCode::CREATED)
}
/// Remove a source from a topic whitelist
async fn remove_topic_source<M>(
State(state): State<ServerState<M>>,
Path((topic, subnet_str)): Path<(String, String)>,
) -> Result<StatusCode, StatusCode>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Removing source from topic whitelist");
// Decode the base64 topic
let topic_bytes = match base64::decode(topic.as_bytes()) {
Ok(bytes) => bytes,
Err(_) => return Err(StatusCode::BAD_REQUEST),
};
// Parse the subnet
let subnet = match subnet_str.parse::<Subnet>() {
Ok(subnet) => subnet,
Err(_) => return Err(StatusCode::BAD_REQUEST),
};
state
.node
.lock()
.await
.remove_topic_whitelist_src(topic_bytes, subnet);
Ok(StatusCode::NO_CONTENT)
}
/// Get the forward socket for a topic
async fn get_topic_forward_socket<M>(
State(state): State<ServerState<M>>,
Path(topic): Path<String>,
) -> Result<Json<Option<String>>, StatusCode>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Getting forward socket for topic");
// Decode the base64 topic
let topic_bytes = match base64::decode(topic.as_bytes()) {
Ok(bytes) => bytes,
Err(_) => return Err(StatusCode::BAD_REQUEST),
};
let node = state.node.lock().await;
let socket_path = node
.get_topic_forward_socket(&topic_bytes)
.map(|p| p.to_string_lossy().to_string());
Ok(Json(socket_path))
}
/// Set the forward socket for a topic
async fn set_topic_forward_socket<M>(
State(state): State<ServerState<M>>,
Path(topic): Path<String>,
Json(request): Json<TopicForwardSocketRequest>,
) -> Result<StatusCode, StatusCode>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Setting forward socket for topic");
// Decode the base64 topic
let topic_bytes = match base64::decode(topic.as_bytes()) {
Ok(bytes) => bytes,
Err(_) => return Err(StatusCode::BAD_REQUEST),
};
let socket_path = PathBuf::from(request.socket_path);
state
.node
.lock()
.await
.set_topic_forward_socket(topic_bytes, socket_path);
Ok(StatusCode::NO_CONTENT)
}
/// Remove the forward socket for a topic
async fn remove_topic_forward_socket<M>(
State(state): State<ServerState<M>>,
Path(topic): Path<String>,
) -> Result<StatusCode, StatusCode>
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!("Removing forward socket for topic");
// Decode the base64 topic
let topic_bytes = match base64::decode(topic.as_bytes()) {
Ok(bytes) => bytes,
Err(_) => return Err(StatusCode::BAD_REQUEST),
};
state
.node
.lock()
.await
.delete_topic_forward_socket(topic_bytes);
Ok(StatusCode::NO_CONTENT)
}

View File

@@ -0,0 +1,752 @@
//! JSON-RPC API implementation for Mycelium
mod spec;
use std::net::SocketAddr;
#[cfg(feature = "message")]
use std::ops::Deref;
use std::str::FromStr;
use std::sync::Arc;
#[cfg(feature = "message")]
use base64::Engine;
use jsonrpsee::core::RpcResult;
use jsonrpsee::proc_macros::rpc;
use jsonrpsee::server::{ServerBuilder, ServerHandle};
use jsonrpsee::types::{ErrorCode, ErrorObject};
#[cfg(feature = "message")]
use mycelium::subnet::Subnet;
#[cfg(feature = "message")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "message")]
use std::path::PathBuf;
use tokio::sync::Mutex;
#[cfg(feature = "message")]
use tokio::time::Duration;
use tracing::debug;
use crate::{Info, Metric, NoRouteSubnet, QueriedSubnet, Route, ServerState};
use mycelium::crypto::PublicKey;
use mycelium::endpoint::Endpoint;
use mycelium::metrics::Metrics;
use mycelium::peer_manager::{PeerExists, PeerNotFound, PeerStats};
use self::spec::OPENRPC_SPEC;
// Topic configuration struct for RPC API
#[cfg(feature = "message")]
#[derive(Clone, Serialize, Deserialize)]
struct TopicInfo {
topic: String,
sources: Vec<String>,
#[serde(skip_serializing_if = "Option::is_none")]
forward_socket: Option<String>,
}
// Define the base RPC API trait using jsonrpsee macros
#[rpc(server)]
pub trait MyceliumApi {
// Admin methods
#[method(name = "getInfo")]
async fn get_info(&self) -> RpcResult<Info>;
#[method(name = "getPublicKeyFromIp")]
async fn get_pubkey_from_ip(&self, ip: String) -> RpcResult<PublicKey>;
// Peer methods
#[method(name = "getPeers")]
async fn get_peers(&self) -> RpcResult<Vec<PeerStats>>;
#[method(name = "addPeer")]
async fn add_peer(&self, endpoint: String) -> RpcResult<bool>;
#[method(name = "deletePeer")]
async fn delete_peer(&self, endpoint: String) -> RpcResult<bool>;
// Route methods
#[method(name = "getSelectedRoutes")]
async fn get_selected_routes(&self) -> RpcResult<Vec<Route>>;
#[method(name = "getFallbackRoutes")]
async fn get_fallback_routes(&self) -> RpcResult<Vec<Route>>;
#[method(name = "getQueriedSubnets")]
async fn get_queried_subnets(&self) -> RpcResult<Vec<QueriedSubnet>>;
#[method(name = "getNoRouteEntries")]
async fn get_no_route_entries(&self) -> RpcResult<Vec<NoRouteSubnet>>;
// OpenRPC discovery
#[method(name = "rpc.discover")]
async fn discover(&self) -> RpcResult<serde_json::Value>;
}
// Define a separate message API trait that is only compiled when the message feature is enabled
#[cfg(feature = "message")]
#[rpc(server)]
pub trait MyceliumMessageApi {
// Message methods
#[method(name = "popMessage")]
async fn pop_message(
&self,
peek: Option<bool>,
timeout: Option<u64>,
topic: Option<String>,
) -> RpcResult<crate::message::MessageReceiveInfo>;
#[method(name = "pushMessage")]
async fn push_message(
&self,
message: crate::message::MessageSendInfo,
reply_timeout: Option<u64>,
) -> RpcResult<crate::message::PushMessageResponse>;
#[method(name = "pushMessageReply")]
async fn push_message_reply(
&self,
id: String,
message: crate::message::MessageSendInfo,
) -> RpcResult<bool>;
#[method(name = "getMessageInfo")]
async fn get_message_info(&self, id: String) -> RpcResult<mycelium::message::MessageInfo>;
// Topic configuration methods
#[method(name = "getDefaultTopicAction")]
async fn get_default_topic_action(&self) -> RpcResult<bool>;
#[method(name = "setDefaultTopicAction")]
async fn set_default_topic_action(&self, accept: bool) -> RpcResult<bool>;
#[method(name = "getTopics")]
async fn get_topics(&self) -> RpcResult<Vec<String>>;
#[method(name = "addTopic")]
async fn add_topic(&self, topic: String) -> RpcResult<bool>;
#[method(name = "removeTopic")]
async fn remove_topic(&self, topic: String) -> RpcResult<bool>;
#[method(name = "getTopicSources")]
async fn get_topic_sources(&self, topic: String) -> RpcResult<Vec<String>>;
#[method(name = "addTopicSource")]
async fn add_topic_source(&self, topic: String, subnet: String) -> RpcResult<bool>;
#[method(name = "removeTopicSource")]
async fn remove_topic_source(&self, topic: String, subnet: String) -> RpcResult<bool>;
#[method(name = "getTopicForwardSocket")]
async fn get_topic_forward_socket(&self, topic: String) -> RpcResult<Option<String>>;
#[method(name = "setTopicForwardSocket")]
async fn set_topic_forward_socket(&self, topic: String, socket_path: String)
-> RpcResult<bool>;
#[method(name = "removeTopicForwardSocket")]
async fn remove_topic_forward_socket(&self, topic: String) -> RpcResult<bool>;
}
// Implement the API trait
#[derive(Clone)]
struct RPCApi<M> {
state: Arc<ServerState<M>>,
}
// Implement the base API trait
#[async_trait::async_trait]
impl<M> MyceliumApiServer for RPCApi<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
async fn get_info(&self) -> RpcResult<Info> {
debug!("Getting node info via RPC");
let node_info = self.state.node.lock().await.info();
Ok(Info {
node_subnet: node_info.node_subnet.to_string(),
node_pubkey: node_info.node_pubkey,
})
}
async fn get_pubkey_from_ip(&self, ip_str: String) -> RpcResult<PublicKey> {
debug!(ip = %ip_str, "Getting public key from IP via RPC");
let ip = std::net::IpAddr::from_str(&ip_str)
.map_err(|_| ErrorObject::from(ErrorCode::from(-32007)))?;
let pubkey = self.state.node.lock().await.get_pubkey_from_ip(ip);
match pubkey {
Some(pk) => Ok(pk),
None => Err(ErrorObject::from(ErrorCode::from(-32008))),
}
}
async fn get_peers(&self) -> RpcResult<Vec<PeerStats>> {
debug!("Fetching peer stats via RPC");
let peers = self.state.node.lock().await.peer_info();
Ok(peers)
}
async fn add_peer(&self, endpoint_str: String) -> RpcResult<bool> {
debug!(
peer.endpoint = endpoint_str,
"Attempting to add peer to the system via RPC"
);
let endpoint = Endpoint::from_str(&endpoint_str)
.map_err(|_| ErrorObject::from(ErrorCode::from(-32009)))?;
match self.state.node.lock().await.add_peer(endpoint) {
Ok(()) => Ok(true),
Err(PeerExists) => Err(ErrorObject::from(ErrorCode::from(-32010))),
}
}
async fn delete_peer(&self, endpoint_str: String) -> RpcResult<bool> {
debug!(
peer.endpoint = endpoint_str,
"Attempting to remove peer from the system via RPC"
);
let endpoint = Endpoint::from_str(&endpoint_str)
.map_err(|_| ErrorObject::from(ErrorCode::from(-32012)))?;
match self.state.node.lock().await.remove_peer(endpoint) {
Ok(()) => Ok(true),
Err(PeerNotFound) => Err(ErrorObject::from(ErrorCode::from(-32011))),
}
}
async fn get_selected_routes(&self) -> RpcResult<Vec<Route>> {
debug!("Loading selected routes via RPC");
let routes = self
.state
.node
.lock()
.await
.selected_routes()
.into_iter()
.map(|sr| Route {
subnet: sr.source().subnet().to_string(),
next_hop: sr.neighbour().connection_identifier().clone(),
metric: if sr.metric().is_infinite() {
Metric::Infinite
} else {
Metric::Value(sr.metric().into())
},
seqno: sr.seqno().into(),
})
.collect();
Ok(routes)
}
async fn get_fallback_routes(&self) -> RpcResult<Vec<Route>> {
debug!("Loading fallback routes via RPC");
let routes = self
.state
.node
.lock()
.await
.fallback_routes()
.into_iter()
.map(|sr| Route {
subnet: sr.source().subnet().to_string(),
next_hop: sr.neighbour().connection_identifier().clone(),
metric: if sr.metric().is_infinite() {
Metric::Infinite
} else {
Metric::Value(sr.metric().into())
},
seqno: sr.seqno().into(),
})
.collect();
Ok(routes)
}
async fn get_queried_subnets(&self) -> RpcResult<Vec<QueriedSubnet>> {
debug!("Loading queried subnets via RPC");
let queries = self
.state
.node
.lock()
.await
.queried_subnets()
.into_iter()
.map(|qs| QueriedSubnet {
subnet: qs.subnet().to_string(),
expiration: qs
.query_expires()
.duration_since(tokio::time::Instant::now())
.as_secs()
.to_string(),
})
.collect();
Ok(queries)
}
async fn get_no_route_entries(&self) -> RpcResult<Vec<NoRouteSubnet>> {
debug!("Loading no route entries via RPC");
let entries = self
.state
.node
.lock()
.await
.no_route_entries()
.into_iter()
.map(|nrs| NoRouteSubnet {
subnet: nrs.subnet().to_string(),
expiration: nrs
.entry_expires()
.duration_since(tokio::time::Instant::now())
.as_secs()
.to_string(),
})
.collect();
Ok(entries)
}
async fn discover(&self) -> RpcResult<serde_json::Value> {
let spec = serde_json::from_str::<serde_json::Value>(OPENRPC_SPEC)
.expect("Failed to parse OpenRPC spec");
Ok(spec)
}
}
// Implement the message API trait only when the message feature is enabled
#[cfg(feature = "message")]
#[async_trait::async_trait]
impl<M> MyceliumMessageApiServer for RPCApi<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
async fn pop_message(
&self,
peek: Option<bool>,
timeout: Option<u64>,
topic: Option<String>,
) -> RpcResult<crate::message::MessageReceiveInfo> {
debug!(
"Attempt to get message via RPC, peek {}, timeout {} seconds",
peek.unwrap_or(false),
timeout.unwrap_or(0)
);
let topic_bytes = if let Some(topic_str) = topic {
Some(
base64::engine::general_purpose::STANDARD
.decode(topic_str.as_bytes())
.map_err(|_| ErrorObject::from(ErrorCode::from(-32013)))?,
)
} else {
None
};
// A timeout of 0 seconds essentially means get a message if there is one, and return
// immediately if there isn't.
let result = tokio::time::timeout(
Duration::from_secs(timeout.unwrap_or(0)),
self.state
.node
.lock()
.await
.get_message(!peek.unwrap_or(false), topic_bytes),
)
.await;
match result {
Ok(m) => Ok(crate::message::MessageReceiveInfo {
id: m.id,
src_ip: m.src_ip,
src_pk: m.src_pk,
dst_ip: m.dst_ip,
dst_pk: m.dst_pk,
topic: if m.topic.is_empty() {
None
} else {
Some(m.topic)
},
payload: m.data,
}),
_ => Err(ErrorObject::from(ErrorCode::from(-32014))),
}
}
async fn push_message(
&self,
message: crate::message::MessageSendInfo,
reply_timeout: Option<u64>,
) -> RpcResult<crate::message::PushMessageResponse> {
let dst = match message.dst {
crate::message::MessageDestination::Ip(ip) => ip,
crate::message::MessageDestination::Pk(pk) => pk.address().into(),
};
debug!(
message.dst=%dst,
message.len=message.payload.len(),
"Pushing new message via RPC",
);
// Default message try duration
const DEFAULT_MESSAGE_TRY_DURATION: Duration = Duration::from_secs(60 * 5);
let result = self.state.node.lock().await.push_message(
dst,
message.payload,
message.topic,
DEFAULT_MESSAGE_TRY_DURATION,
reply_timeout.is_some(),
);
let (id, sub) = match result {
Ok((id, sub)) => (id, sub),
Err(_) => {
return Err(ErrorObject::from(ErrorCode::from(-32015)));
}
};
if reply_timeout.is_none() {
// If we don't wait for the reply just return here.
return Ok(crate::message::PushMessageResponse::Id(
crate::message::MessageIdReply { id },
));
}
let mut sub = sub.unwrap();
// Wait for reply with timeout
tokio::select! {
sub_res = sub.changed() => {
match sub_res {
Ok(_) => {
if let Some(m) = sub.borrow().deref() {
Ok(crate::message::PushMessageResponse::Reply(crate::message::MessageReceiveInfo {
id: m.id,
src_ip: m.src_ip,
src_pk: m.src_pk,
dst_ip: m.dst_ip,
dst_pk: m.dst_pk,
topic: if m.topic.is_empty() { None } else { Some(m.topic.clone()) },
payload: m.data.clone(),
}))
} else {
// This happens if a none value is send, which should not happen.
Err(ErrorObject::from(ErrorCode::from(-32016)))
}
}
Err(_) => {
// This happens if the sender drops, which should not happen.
Err(ErrorObject::from(ErrorCode::from(-32017)))
}
}
},
_ = tokio::time::sleep(Duration::from_secs(reply_timeout.unwrap_or(0))) => {
// Timeout expired while waiting for reply
Ok(crate::message::PushMessageResponse::Id(crate::message::MessageIdReply { id }))
}
}
}
async fn push_message_reply(
&self,
id: String,
message: crate::message::MessageSendInfo,
) -> RpcResult<bool> {
let message_id = match mycelium::message::MessageId::from_hex(id.as_bytes()) {
Ok(id) => id,
Err(_) => {
return Err(ErrorObject::from(ErrorCode::from(-32018)));
}
};
let dst = match message.dst {
crate::message::MessageDestination::Ip(ip) => ip,
crate::message::MessageDestination::Pk(pk) => pk.address().into(),
};
debug!(
message.id=id,
message.dst=%dst,
message.len=message.payload.len(),
"Pushing new reply to message via RPC",
);
// Default message try duration
const DEFAULT_MESSAGE_TRY_DURATION: Duration = Duration::from_secs(60 * 5);
self.state.node.lock().await.reply_message(
message_id,
dst,
message.payload,
DEFAULT_MESSAGE_TRY_DURATION,
);
Ok(true)
}
async fn get_message_info(&self, id: String) -> RpcResult<mycelium::message::MessageInfo> {
let message_id = match mycelium::message::MessageId::from_hex(id.as_bytes()) {
Ok(id) => id,
Err(_) => {
return Err(ErrorObject::from(ErrorCode::from(-32020)));
}
};
debug!(message.id=%id, "Fetching message status via RPC");
let result = self.state.node.lock().await.message_status(message_id);
match result {
Some(info) => Ok(info),
None => Err(ErrorObject::from(ErrorCode::from(-32019))),
}
}
// Topic configuration methods implementation
async fn get_default_topic_action(&self) -> RpcResult<bool> {
debug!("Getting default topic action via RPC");
let accept = self.state.node.lock().await.unconfigure_topic_action();
Ok(accept)
}
async fn set_default_topic_action(&self, accept: bool) -> RpcResult<bool> {
debug!(accept=%accept, "Setting default topic action via RPC");
self.state
.node
.lock()
.await
.accept_unconfigured_topic(accept);
Ok(true)
}
async fn get_topics(&self) -> RpcResult<Vec<String>> {
debug!("Getting all whitelisted topics via RPC");
let topics = self
.state
.node
.lock()
.await
.topics()
.into_iter()
.map(|topic| base64::engine::general_purpose::STANDARD.encode(&topic))
.collect();
// For now, we'll return an empty list
Ok(topics)
}
async fn add_topic(&self, topic: String) -> RpcResult<bool> {
debug!("Adding topic to whitelist via RPC");
// Decode the base64 topic
let topic_bytes = base64::engine::general_purpose::STANDARD
.decode(topic.as_bytes())
.map_err(|_| ErrorObject::from(ErrorCode::from(-32021)))?;
self.state
.node
.lock()
.await
.add_topic_whitelist(topic_bytes);
Ok(true)
}
async fn remove_topic(&self, topic: String) -> RpcResult<bool> {
debug!("Removing topic from whitelist via RPC");
// Decode the base64 topic
let topic_bytes = base64::engine::general_purpose::STANDARD
.decode(topic.as_bytes())
.map_err(|_| ErrorObject::from(ErrorCode::from(-32021)))?;
self.state
.node
.lock()
.await
.remove_topic_whitelist(topic_bytes);
Ok(true)
}
async fn get_topic_sources(&self, topic: String) -> RpcResult<Vec<String>> {
debug!("Getting sources for topic via RPC");
// Decode the base64 topic
let topic_bytes = base64::engine::general_purpose::STANDARD
.decode(topic.as_bytes())
.map_err(|_| ErrorObject::from(ErrorCode::from(-32021)))?;
let subnets = self
.state
.node
.lock()
.await
.topic_allowed_sources(&topic_bytes)
.ok_or(ErrorObject::from(ErrorCode::from(-32030)))?
.into_iter()
.map(|subnet| subnet.to_string())
.collect();
Ok(subnets)
}
async fn add_topic_source(&self, topic: String, subnet: String) -> RpcResult<bool> {
debug!("Adding source to topic whitelist via RPC");
// Decode the base64 topic
let topic_bytes = base64::engine::general_purpose::STANDARD
.decode(topic.as_bytes())
.map_err(|_| ErrorObject::from(ErrorCode::from(-32021)))?;
// Parse the subnet
let subnet_obj = subnet
.parse::<Subnet>()
.map_err(|_| ErrorObject::from(ErrorCode::from(-32023)))?;
self.state
.node
.lock()
.await
.add_topic_whitelist_src(topic_bytes, subnet_obj);
Ok(true)
}
async fn remove_topic_source(&self, topic: String, subnet: String) -> RpcResult<bool> {
debug!("Removing source from topic whitelist via RPC");
// Decode the base64 topic
let topic_bytes = base64::engine::general_purpose::STANDARD
.decode(topic.as_bytes())
.map_err(|_| ErrorObject::from(ErrorCode::from(-32021)))?;
// Parse the subnet
let subnet_obj = subnet
.parse::<Subnet>()
.map_err(|_| ErrorObject::from(ErrorCode::from(-32023)))?;
self.state
.node
.lock()
.await
.remove_topic_whitelist_src(topic_bytes, subnet_obj);
Ok(true)
}
async fn get_topic_forward_socket(&self, topic: String) -> RpcResult<Option<String>> {
debug!("Getting forward socket for topic via RPC");
// Decode the base64 topic
let topic_bytes = base64::engine::general_purpose::STANDARD
.decode(topic.as_bytes())
.map_err(|_| ErrorObject::from(ErrorCode::from(-32021)))?;
let node = self.state.node.lock().await;
let socket_path = node
.get_topic_forward_socket(&topic_bytes)
.map(|p| p.to_string_lossy().to_string());
Ok(socket_path)
}
async fn set_topic_forward_socket(
&self,
topic: String,
socket_path: String,
) -> RpcResult<bool> {
debug!("Setting forward socket for topic via RPC");
// Decode the base64 topic
let topic_bytes = base64::engine::general_purpose::STANDARD
.decode(topic.as_bytes())
.map_err(|_| ErrorObject::from(ErrorCode::from(-32021)))?;
let path = PathBuf::from(socket_path);
self.state
.node
.lock()
.await
.set_topic_forward_socket(topic_bytes, path);
Ok(true)
}
async fn remove_topic_forward_socket(&self, topic: String) -> RpcResult<bool> {
debug!("Removing forward socket for topic via RPC");
// Decode the base64 topic
let topic_bytes = base64::engine::general_purpose::STANDARD
.decode(topic.as_bytes())
.map_err(|_| ErrorObject::from(ErrorCode::from(-32021)))?;
self.state
.node
.lock()
.await
.delete_topic_forward_socket(topic_bytes);
Ok(true)
}
}
/// JSON-RPC API server handle. The server is spawned in a background task. If this handle is dropped,
/// the server is terminated.
pub struct JsonRpc {
/// JSON-RPC server handle
_server: ServerHandle,
}
impl JsonRpc {
/// Spawns a new JSON-RPC API server on the provided listening address.
///
/// # Arguments
///
/// * `node` - The Mycelium node to use for the JSON-RPC API
/// * `listen_addr` - The address to listen on for JSON-RPC requests
///
/// # Returns
///
/// A `JsonRpc` instance that will be dropped when the server is terminated
pub async fn spawn<M>(node: Arc<Mutex<mycelium::Node<M>>>, listen_addr: SocketAddr) -> Self
where
M: Metrics + Clone + Send + Sync + 'static,
{
debug!(%listen_addr, "Starting JSON-RPC server");
let server_state = Arc::new(ServerState { node });
// Create the server builder
let server = ServerBuilder::default()
.build(listen_addr)
.await
.expect("Failed to build JSON-RPC server");
// Create the API implementation
let api = RPCApi {
state: server_state,
};
// Register the API implementation
// Create the RPC module
#[allow(unused_mut)]
let mut methods = MyceliumApiServer::into_rpc(api.clone());
// When the message feature is enabled, merge the message RPC module
#[cfg(feature = "message")]
{
let message_methods = MyceliumMessageApiServer::into_rpc(api);
methods
.merge(message_methods)
.expect("Can merge message API into base API");
}
// Start the server with the appropriate module(s)
let handle = server.start(methods);
debug!(%listen_addr, "JSON-RPC server started successfully");
JsonRpc { _server: handle }
}
}

View File

@@ -0,0 +1,64 @@
//! Admin-related JSON-RPC methods for the Mycelium API
use jsonrpc_core::{Error, ErrorCode, Result as RpcResult};
use std::net::IpAddr;
use std::str::FromStr;
use tracing::debug;
use mycelium::crypto::PublicKey;
use mycelium::metrics::Metrics;
use crate::HttpServerState;
use crate::Info;
use crate::rpc::models::error_codes;
use crate::rpc::traits::AdminApi;
/// Implementation of Admin-related JSON-RPC methods
pub struct AdminRpc<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
state: HttpServerState<M>,
}
impl<M> AdminRpc<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
/// Create a new AdminRpc instance
pub fn new(state: HttpServerState<M>) -> Self {
Self { state }
}
}
impl<M> AdminApi for AdminRpc<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
fn get_info(&self) -> RpcResult<Info> {
debug!("Getting node info via RPC");
let info = self.state.node.blocking_lock().info();
Ok(Info {
node_subnet: info.node_subnet.to_string(),
node_pubkey: info.node_pubkey,
})
}
fn get_pubkey_from_ip(&self, mycelium_ip: String) -> RpcResult<PublicKey> {
debug!(ip = %mycelium_ip, "Getting public key from IP via RPC");
let ip = IpAddr::from_str(&mycelium_ip).map_err(|e| Error {
code: ErrorCode::InvalidParams,
message: format!("Invalid IP address: {}", e),
data: None,
})?;
match self.state.node.blocking_lock().get_pubkey_from_ip(ip) {
Some(pubkey) => Ok(pubkey),
None => Err(Error {
code: ErrorCode::ServerError(error_codes::PUBKEY_NOT_FOUND),
message: "Public key not found".to_string(),
data: None,
}),
}
}
}

View File

@@ -0,0 +1,263 @@
//! Message-related JSON-RPC methods for the Mycelium API
use jsonrpc_core::{Error, ErrorCode, Result as RpcResult};
use std::time::Duration;
use tracing::debug;
use mycelium::metrics::Metrics;
use mycelium::message::{MessageId, MessageInfo};
use crate::HttpServerState;
use crate::message::{MessageReceiveInfo, MessageSendInfo, PushMessageResponse};
use crate::rpc::models::error_codes;
use crate::rpc::traits::MessageApi;
/// Implementation of Message-related JSON-RPC methods
pub struct MessageRpc<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
state: HttpServerState<M>,
}
impl<M> MessageRpc<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
/// Create a new MessageRpc instance
pub fn new(state: HttpServerState<M>) -> Self {
Self { state }
}
/// Convert a base64 string to bytes
fn decode_base64(&self, s: &str) -> Result<Vec<u8>, Error> {
base64::engine::general_purpose::STANDARD.decode(s.as_bytes())
.map_err(|e| Error {
code: ErrorCode::InvalidParams,
message: format!("Invalid base64 encoding: {}", e),
data: None,
})
}
}
impl<M> MessageApi for MessageRpc<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
fn pop_message(&self, peek: Option<bool>, timeout: Option<u64>, topic: Option<String>) -> RpcResult<MessageReceiveInfo> {
debug!(
"Attempt to get message via RPC, peek {}, timeout {} seconds",
peek.unwrap_or(false),
timeout.unwrap_or(0)
);
let topic_bytes = if let Some(topic_str) = topic {
Some(self.decode_base64(&topic_str)?)
} else {
None
};
// A timeout of 0 seconds essentially means get a message if there is one, and return
// immediately if there isn't.
let result = tokio::task::block_in_place(|| {
tokio::runtime::Handle::current().block_on(async {
tokio::time::timeout(
Duration::from_secs(timeout.unwrap_or(0)),
self.state
.node
.lock()
.await
.get_message(!peek.unwrap_or(false), topic_bytes),
)
.await
})
});
match result {
Ok(Ok(m)) => Ok(MessageReceiveInfo {
id: m.id,
src_ip: m.src_ip,
src_pk: m.src_pk,
dst_ip: m.dst_ip,
dst_pk: m.dst_pk,
topic: if m.topic.is_empty() {
None
} else {
Some(m.topic)
},
payload: m.data,
}),
_ => Err(Error {
code: ErrorCode::ServerError(error_codes::NO_MESSAGE_READY),
message: "No message ready".to_string(),
data: None,
}),
}
}
fn push_message(&self, message: MessageSendInfo, reply_timeout: Option<u64>) -> RpcResult<PushMessageResponse> {
let dst = match message.dst {
crate::message::MessageDestination::Ip(ip) => ip,
crate::message::MessageDestination::Pk(pk) => pk.address().into(),
};
debug!(
message.dst=%dst,
message.len=message.payload.len(),
"Pushing new message via RPC",
);
// Default message try duration
const DEFAULT_MESSAGE_TRY_DURATION: Duration = Duration::from_secs(60 * 5);
let result = tokio::task::block_in_place(|| {
tokio::runtime::Handle::current().block_on(async {
self.state.node.lock().await.push_message(
dst,
message.payload,
message.topic,
DEFAULT_MESSAGE_TRY_DURATION,
reply_timeout.is_some(),
)
})
});
let (id, sub) = match result {
Ok((id, sub)) => (id, sub),
Err(_) => {
return Err(Error {
code: ErrorCode::InvalidParams,
message: "Failed to push message".to_string(),
data: None,
});
}
};
if reply_timeout.is_none() {
// If we don't wait for the reply just return here.
return Ok(PushMessageResponse::Id(crate::message::MessageIdReply { id }));
}
let mut sub = sub.unwrap();
// Wait for reply with timeout
let reply_result = tokio::task::block_in_place(|| {
tokio::runtime::Handle::current().block_on(async {
tokio::select! {
sub_res = sub.changed() => {
match sub_res {
Ok(_) => {
if let Some(m) = sub.borrow().deref() {
Ok(PushMessageResponse::Reply(MessageReceiveInfo {
id: m.id,
src_ip: m.src_ip,
src_pk: m.src_pk,
dst_ip: m.dst_ip,
dst_pk: m.dst_pk,
topic: if m.topic.is_empty() { None } else { Some(m.topic.clone()) },
payload: m.data.clone(),
}))
} else {
// This happens if a none value is send, which should not happen.
Err(Error {
code: ErrorCode::InternalError,
message: "Internal error while waiting for reply".to_string(),
data: None,
})
}
}
Err(_) => {
// This happens if the sender drops, which should not happen.
Err(Error {
code: ErrorCode::InternalError,
message: "Internal error while waiting for reply".to_string(),
data: None,
})
}
}
},
_ = tokio::time::sleep(Duration::from_secs(reply_timeout.unwrap_or(0))) => {
// Timeout expired while waiting for reply
Ok(PushMessageResponse::Id(crate::message::MessageIdReply { id }))
}
}
})
});
match reply_result {
Ok(response) => Ok(response),
Err(e) => Err(e),
}
}
fn push_message_reply(&self, id: String, message: MessageSendInfo) -> RpcResult<bool> {
let message_id = match MessageId::from_hex(&id) {
Ok(id) => id,
Err(_) => {
return Err(Error {
code: ErrorCode::InvalidParams,
message: "Invalid message ID".to_string(),
data: None,
});
}
};
let dst = match message.dst {
crate::message::MessageDestination::Ip(ip) => ip,
crate::message::MessageDestination::Pk(pk) => pk.address().into(),
};
debug!(
message.id=id,
message.dst=%dst,
message.len=message.payload.len(),
"Pushing new reply to message via RPC",
);
// Default message try duration
const DEFAULT_MESSAGE_TRY_DURATION: Duration = Duration::from_secs(60 * 5);
tokio::task::block_in_place(|| {
tokio::runtime::Handle::current().block_on(async {
self.state.node.lock().await.reply_message(
message_id,
dst,
message.payload,
DEFAULT_MESSAGE_TRY_DURATION,
);
})
});
Ok(true)
}
fn get_message_info(&self, id: String) -> RpcResult<MessageInfo> {
let message_id = match MessageId::from_hex(&id) {
Ok(id) => id,
Err(_) => {
return Err(Error {
code: ErrorCode::InvalidParams,
message: "Invalid message ID".to_string(),
data: None,
});
}
};
debug!(message.id=%id, "Fetching message status via RPC");
let result = tokio::task::block_in_place(|| {
tokio::runtime::Handle::current().block_on(async {
self.state.node.lock().await.message_status(message_id)
})
});
match result {
Some(info) => Ok(info),
None => Err(Error {
code: ErrorCode::ServerError(error_codes::MESSAGE_NOT_FOUND),
message: "Message not found".to_string(),
data: None,
}),
}
}
}

View File

@@ -0,0 +1,30 @@
//! Models for the Mycelium JSON-RPC API
use serde::{Deserialize, Serialize};
// Define any additional models needed for the JSON-RPC API
// Most models can be reused from the existing REST API
/// Error codes for the JSON-RPC API
pub mod error_codes {
/// Invalid parameters error code
pub const INVALID_PARAMS: i64 = -32602;
/// Peer already exists error code
pub const PEER_EXISTS: i64 = 409;
/// Peer not found error code
pub const PEER_NOT_FOUND: i64 = 404;
/// Message not found error code
pub const MESSAGE_NOT_FOUND: i64 = 404;
/// Public key not found error code
pub const PUBKEY_NOT_FOUND: i64 = 404;
/// No message ready error code
pub const NO_MESSAGE_READY: i64 = 204;
/// Timeout waiting for reply error code
pub const TIMEOUT_WAITING_FOR_REPLY: i64 = 408;
}

View File

@@ -0,0 +1,86 @@
//! Peer-related JSON-RPC methods for the Mycelium API
use jsonrpc_core::{Error, ErrorCode, Result as RpcResult};
use std::str::FromStr;
use tracing::debug;
use mycelium::endpoint::Endpoint;
use mycelium::metrics::Metrics;
use mycelium::peer_manager::{PeerExists, PeerNotFound, PeerStats};
use crate::rpc::models::error_codes;
use crate::rpc::traits::PeerApi;
use crate::HttpServerState;
/// Implementation of Peer-related JSON-RPC methods
pub struct PeerRpc<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
state: HttpServerState<M>,
}
impl<M> PeerRpc<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
/// Create a new PeerRpc instance
pub fn new(state: HttpServerState<M>) -> Self {
Self { state }
}
}
impl<M> PeerApi for PeerRpc<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
fn get_peers(&self) -> RpcResult<Vec<PeerStats>> {
debug!("Fetching peer stats via RPC");
Ok(self.state.node.blocking_lock().peer_info())
}
fn add_peer(&self, endpoint: String) -> RpcResult<bool> {
debug!(
peer.endpoint = endpoint,
"Attempting to add peer to the system via RPC"
);
let endpoint = Endpoint::from_str(&endpoint).map_err(|e| Error {
code: ErrorCode::InvalidParams,
message: e.to_string(),
data: None,
})?;
match self.state.node.blocking_lock().add_peer(endpoint) {
Ok(()) => Ok(true),
Err(PeerExists) => Err(Error {
code: ErrorCode::ServerError(error_codes::PEER_EXISTS),
message: "A peer identified by that endpoint already exists".to_string(),
data: None,
}),
}
}
fn delete_peer(&self, endpoint: String) -> RpcResult<bool> {
debug!(
peer.endpoint = endpoint,
"Attempting to remove peer from the system via RPC"
);
let endpoint = Endpoint::from_str(&endpoint).map_err(|e| Error {
code: ErrorCode::InvalidParams,
message: e.to_string(),
data: None,
})?;
match self.state.node.blocking_lock().remove_peer(endpoint) {
Ok(()) => Ok(true),
Err(PeerNotFound) => Err(Error {
code: ErrorCode::ServerError(error_codes::PEER_NOT_FOUND),
message: "A peer identified by that endpoint does not exist".to_string(),
data: None,
}),
}
}
}

View File

@@ -0,0 +1,120 @@
//! Route-related JSON-RPC methods for the Mycelium API
use jsonrpc_core::Result as RpcResult;
use tracing::debug;
use mycelium::metrics::Metrics;
use crate::HttpServerState;
use crate::Route;
use crate::QueriedSubnet;
use crate::NoRouteSubnet;
use crate::Metric;
use crate::rpc::traits::RouteApi;
/// Implementation of Route-related JSON-RPC methods
pub struct RouteRpc<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
state: HttpServerState<M>,
}
impl<M> RouteRpc<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
/// Create a new RouteRpc instance
pub fn new(state: HttpServerState<M>) -> Self {
Self { state }
}
}
impl<M> RouteApi for RouteRpc<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
fn get_selected_routes(&self) -> RpcResult<Vec<Route>> {
debug!("Loading selected routes via RPC");
let routes = self.state
.node
.blocking_lock()
.selected_routes()
.into_iter()
.map(|sr| Route {
subnet: sr.source().subnet().to_string(),
next_hop: sr.neighbour().connection_identifier().clone(),
metric: if sr.metric().is_infinite() {
Metric::Infinite
} else {
Metric::Value(sr.metric().into())
},
seqno: sr.seqno().into(),
})
.collect();
Ok(routes)
}
fn get_fallback_routes(&self) -> RpcResult<Vec<Route>> {
debug!("Loading fallback routes via RPC");
let routes = self.state
.node
.blocking_lock()
.fallback_routes()
.into_iter()
.map(|sr| Route {
subnet: sr.source().subnet().to_string(),
next_hop: sr.neighbour().connection_identifier().clone(),
metric: if sr.metric().is_infinite() {
Metric::Infinite
} else {
Metric::Value(sr.metric().into())
},
seqno: sr.seqno().into(),
})
.collect();
Ok(routes)
}
fn get_queried_subnets(&self) -> RpcResult<Vec<QueriedSubnet>> {
debug!("Loading queried subnets via RPC");
let queries = self.state
.node
.blocking_lock()
.queried_subnets()
.into_iter()
.map(|qs| QueriedSubnet {
subnet: qs.subnet().to_string(),
expiration: qs
.query_expires()
.duration_since(tokio::time::Instant::now())
.as_secs()
.to_string(),
})
.collect();
Ok(queries)
}
fn get_no_route_entries(&self) -> RpcResult<Vec<NoRouteSubnet>> {
debug!("Loading no route entries via RPC");
let entries = self.state
.node
.blocking_lock()
.no_route_entries()
.into_iter()
.map(|nrs| NoRouteSubnet {
subnet: nrs.subnet().to_string(),
expiration: nrs
.entry_expires()
.duration_since(tokio::time::Instant::now())
.as_secs()
.to_string(),
})
.collect();
Ok(entries)
}
}

View File

@@ -0,0 +1,4 @@
//! OpenRPC specification for the Mycelium JSON-RPC API
/// The OpenRPC specification for the Mycelium JSON-RPC API
pub const OPENRPC_SPEC: &str = include_str!("../../../docs/openrpc.json");

View File

@@ -0,0 +1,80 @@
//! RPC trait definitions for the Mycelium JSON-RPC API
use jsonrpc_core::Result as RpcResult;
use jsonrpc_derive::rpc;
use crate::Info;
use crate::Route;
use crate::QueriedSubnet;
use crate::NoRouteSubnet;
use mycelium::crypto::PublicKey;
use mycelium::peer_manager::PeerStats;
use mycelium::message::{MessageId, MessageInfo};
// Admin-related RPC methods
#[rpc]
pub trait AdminApi {
/// Get general info about the node
#[rpc(name = "getInfo")]
fn get_info(&self) -> RpcResult<Info>;
/// Get the pubkey from node ip
#[rpc(name = "getPublicKeyFromIp")]
fn get_pubkey_from_ip(&self, mycelium_ip: String) -> RpcResult<PublicKey>;
}
// Peer-related RPC methods
#[rpc]
pub trait PeerApi {
/// List known peers
#[rpc(name = "getPeers")]
fn get_peers(&self) -> RpcResult<Vec<PeerStats>>;
/// Add a new peer
#[rpc(name = "addPeer")]
fn add_peer(&self, endpoint: String) -> RpcResult<bool>;
/// Remove an existing peer
#[rpc(name = "deletePeer")]
fn delete_peer(&self, endpoint: String) -> RpcResult<bool>;
}
// Route-related RPC methods
#[rpc]
pub trait RouteApi {
/// List all selected routes
#[rpc(name = "getSelectedRoutes")]
fn get_selected_routes(&self) -> RpcResult<Vec<Route>>;
/// List all active fallback routes
#[rpc(name = "getFallbackRoutes")]
fn get_fallback_routes(&self) -> RpcResult<Vec<Route>>;
/// List all currently queried subnets
#[rpc(name = "getQueriedSubnets")]
fn get_queried_subnets(&self) -> RpcResult<Vec<QueriedSubnet>>;
/// List all subnets which are explicitly marked as no route
#[rpc(name = "getNoRouteEntries")]
fn get_no_route_entries(&self) -> RpcResult<Vec<NoRouteSubnet>>;
}
// Message-related RPC methods
#[rpc]
pub trait MessageApi {
/// Get a message from the inbound message queue
#[rpc(name = "popMessage")]
fn pop_message(&self, peek: Option<bool>, timeout: Option<u64>, topic: Option<String>) -> RpcResult<crate::message::MessageReceiveInfo>;
/// Submit a new message to the system
#[rpc(name = "pushMessage")]
fn push_message(&self, message: crate::message::MessageSendInfo, reply_timeout: Option<u64>) -> RpcResult<crate::message::PushMessageResponse>;
/// Reply to a message with the given ID
#[rpc(name = "pushMessageReply")]
fn push_message_reply(&self, id: String, message: crate::message::MessageSendInfo) -> RpcResult<bool>;
/// Get the status of an outbound message
#[rpc(name = "getMessageInfo")]
fn get_message_info(&self, id: String) -> RpcResult<MessageInfo>;
}

View File

@@ -0,0 +1,26 @@
[package]
name = "mycelium-cli"
version = "0.6.1"
edition = "2021"
license-file = "../LICENSE"
readme = "./README.md"
[features]
message = ["mycelium/message", "mycelium-api/message"]
[dependencies]
mycelium = { path = "../mycelium" }
mycelium-api = { path = "../mycelium-api" }
serde = { version = "1.0.219", features = ["derive"] }
serde_json = "1.0.140"
base64 = "0.22.1"
prettytable-rs = "0.10.0"
tracing = "0.1.41"
tokio = { version = "1.46.1", default-features = false, features = [
"net",
"rt",
"fs",
] }
reqwest = { version = "0.12.22", default-features = false, features = ["json"] }
byte-unit = "5.1.6"
urlencoding = "2.1.3"

View File

@@ -0,0 +1,30 @@
use std::net::IpAddr;
use mycelium::crypto::PublicKey;
use serde::Serialize;
#[derive(Debug, Serialize)]
struct InspectOutput {
#[serde(rename = "publicKey")]
public_key: PublicKey,
address: IpAddr,
}
/// Inspect the given pubkey, or the local key if no pubkey is given
pub fn inspect(pubkey: PublicKey, json: bool) -> Result<(), Box<dyn std::error::Error>> {
let address = pubkey.address().into();
if json {
let out = InspectOutput {
public_key: pubkey,
address,
};
let out_string = serde_json::to_string_pretty(&out)?;
println!("{out_string}");
} else {
println!("Public key: {pubkey}");
println!("Address: {address}");
}
Ok(())
}

View File

@@ -0,0 +1,13 @@
mod inspect;
#[cfg(feature = "message")]
mod message;
mod peer;
mod routes;
pub use inspect::inspect;
#[cfg(feature = "message")]
pub use message::{recv_msg, send_msg};
pub use peer::{add_peers, list_peers, remove_peers};
pub use routes::{
list_fallback_routes, list_no_route_entries, list_queried_subnets, list_selected_routes,
};

View File

@@ -0,0 +1,306 @@
use std::{
io::Write,
mem,
net::{IpAddr, SocketAddr},
path::PathBuf,
};
use base64::{
alphabet,
engine::{GeneralPurpose, GeneralPurposeConfig},
Engine,
};
use mycelium::{crypto::PublicKey, message::MessageId, subnet::Subnet};
use serde::{Serialize, Serializer};
use tracing::{debug, error};
use mycelium_api::{MessageDestination, MessageReceiveInfo, MessageSendInfo, PushMessageResponse};
enum Payload {
Readable(String),
NotReadable(Vec<u8>),
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
struct CliMessage {
id: MessageId,
src_ip: IpAddr,
src_pk: PublicKey,
dst_ip: IpAddr,
dst_pk: PublicKey,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(serialize_with = "serialize_payload")]
topic: Option<Payload>,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(serialize_with = "serialize_payload")]
payload: Option<Payload>,
}
const B64ENGINE: GeneralPurpose = base64::engine::general_purpose::GeneralPurpose::new(
&alphabet::STANDARD,
GeneralPurposeConfig::new(),
);
fn serialize_payload<S: Serializer>(p: &Option<Payload>, s: S) -> Result<S::Ok, S::Error> {
let base64 = match p {
None => None,
Some(Payload::Readable(data)) => Some(data.clone()),
Some(Payload::NotReadable(data)) => Some(B64ENGINE.encode(data)),
};
<Option<String>>::serialize(&base64, s)
}
/// Encode arbitrary data in standard base64.
pub fn encode_base64(input: &[u8]) -> String {
B64ENGINE.encode(input)
}
/// Send a message to a receiver.
#[allow(clippy::too_many_arguments)]
pub async fn send_msg(
destination: String,
msg: Option<String>,
wait: bool,
timeout: Option<u64>,
reply_to: Option<String>,
topic: Option<String>,
msg_path: Option<PathBuf>,
server_addr: SocketAddr,
) -> Result<(), Box<dyn std::error::Error>> {
if reply_to.is_some() && wait {
error!("Can't wait on a reply for a reply, either use --reply-to or --wait");
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"Only one of --reply-to or --wait is allowed",
)
.into());
}
let destination = if destination.len() == 64 {
// Public key in hex format
match PublicKey::try_from(&*destination) {
Err(_) => {
error!("{destination} is not a valid hex encoded public key");
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"Invalid hex encoded public key",
)
.into());
}
Ok(pk) => MessageDestination::Pk(pk),
}
} else {
match destination.parse() {
Err(e) => {
error!("{destination} is not a valid IPv6 address: {e}");
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"Invalid IPv6 address",
)
.into());
}
Ok(ip) => {
let global_subnet = Subnet::new(
mycelium::GLOBAL_SUBNET_ADDRESS,
mycelium::GLOBAL_SUBNET_PREFIX_LEN,
)
.unwrap();
if !global_subnet.contains_ip(ip) {
error!("{destination} is not a part of {global_subnet}");
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"IPv6 address is not part of the mycelium subnet",
)
.into());
}
MessageDestination::Ip(ip)
}
}
};
// Load msg, files have prio.
let msg = if let Some(path) = msg_path {
match tokio::fs::read(&path).await {
Err(e) => {
error!("Could not read file at {:?}: {e}", path);
return Err(e.into());
}
Ok(data) => data,
}
} else if let Some(msg) = msg {
msg.into_bytes()
} else {
error!("Message is a required argument if `--msg-path` is not provided");
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"Message is a required argument if `--msg-path` is not provided",
)
.into());
};
let mut url = format!("http://{server_addr}/api/v1/messages");
if let Some(reply_to) = reply_to {
url.push_str(&format!("/reply/{reply_to}"));
}
if wait {
// A year should be sufficient to wait
let reply_timeout = timeout.unwrap_or(60 * 60 * 24 * 365);
url.push_str(&format!("?reply_timeout={reply_timeout}"));
}
match reqwest::Client::new()
.post(url)
.json(&MessageSendInfo {
dst: destination,
topic: topic.map(String::into_bytes),
payload: msg,
})
.send()
.await
{
Err(e) => {
error!("Failed to send request: {e}");
return Err(e.into());
}
Ok(res) => {
if res.status() == STATUSCODE_NO_CONTENT {
return Ok(());
}
match res.json::<PushMessageResponse>().await {
Err(e) => {
error!("Failed to load response body {e}");
return Err(e.into());
}
Ok(resp) => {
match resp {
PushMessageResponse::Id(id) => {
let _ = serde_json::to_writer(std::io::stdout(), &id);
}
PushMessageResponse::Reply(mri) => {
let cm = CliMessage {
id: mri.id,
topic: mri.topic.map(|topic| {
if let Ok(s) = String::from_utf8(topic.clone()) {
Payload::Readable(s)
} else {
Payload::NotReadable(topic)
}
}),
src_ip: mri.src_ip,
src_pk: mri.src_pk,
dst_ip: mri.dst_ip,
dst_pk: mri.dst_pk,
payload: Some({
if let Ok(s) = String::from_utf8(mri.payload.clone()) {
Payload::Readable(s)
} else {
Payload::NotReadable(mri.payload)
}
}),
};
let _ = serde_json::to_writer(std::io::stdout(), &cm);
}
}
println!();
}
}
}
}
Ok(())
}
const STATUSCODE_NO_CONTENT: u16 = 204;
pub async fn recv_msg(
timeout: Option<u64>,
topic: Option<String>,
msg_path: Option<PathBuf>,
raw: bool,
server_addr: SocketAddr,
) -> Result<(), Box<dyn std::error::Error>> {
// One year timeout should be sufficient
let timeout = timeout.unwrap_or(60 * 60 * 24 * 365);
let mut url = format!("http://{server_addr}/api/v1/messages?timeout={timeout}");
if let Some(ref topic) = topic {
if topic.len() > 255 {
error!("{topic} is longer than the maximum allowed topic length of 255");
return Err(
std::io::Error::new(std::io::ErrorKind::InvalidInput, "Topic too long").into(),
);
}
url.push_str(&format!("&topic={}", encode_base64(topic.as_bytes())));
}
let mut cm = match reqwest::get(url).await {
Err(e) => {
error!("Failed to wait for message: {e}");
return Err(e.into());
}
Ok(resp) => {
if resp.status() == STATUSCODE_NO_CONTENT {
debug!("No message ready yet");
return Ok(());
}
debug!("Received message response");
match resp.json::<MessageReceiveInfo>().await {
Err(e) => {
error!("Failed to load response json: {e}");
return Err(e.into());
}
Ok(mri) => CliMessage {
id: mri.id,
topic: mri.topic.map(|topic| {
if let Ok(s) = String::from_utf8(topic.clone()) {
Payload::Readable(s)
} else {
Payload::NotReadable(topic)
}
}),
src_ip: mri.src_ip,
src_pk: mri.src_pk,
dst_ip: mri.dst_ip,
dst_pk: mri.dst_pk,
payload: Some({
if let Ok(s) = String::from_utf8(mri.payload.clone()) {
Payload::Readable(s)
} else {
Payload::NotReadable(mri.payload)
}
}),
},
}
}
};
if let Some(ref file_path) = msg_path {
if let Err(e) = tokio::fs::write(
&file_path,
match mem::take(&mut cm.payload).unwrap() {
Payload::Readable(ref s) => s as &dyn AsRef<[u8]>,
Payload::NotReadable(ref v) => v,
},
)
.await
{
error!("Failed to write response payload to file: {e}");
return Err(e.into());
}
}
if raw {
// only print payload if not already written
if msg_path.is_none() {
let _ = std::io::stdout().write_all(match cm.payload.unwrap() {
Payload::Readable(ref s) => s.as_bytes(),
Payload::NotReadable(ref v) => v,
});
println!();
}
} else {
let _ = serde_json::to_writer(std::io::stdout(), &cm);
println!();
}
Ok(())
}

View File

@@ -0,0 +1,141 @@
use mycelium::peer_manager::PeerStats;
use mycelium_api::AddPeer;
use prettytable::{row, Table};
use std::net::SocketAddr;
use tracing::{debug, error};
/// List the peers the current node is connected to
pub async fn list_peers(
server_addr: SocketAddr,
json_print: bool,
) -> Result<(), Box<dyn std::error::Error>> {
// Make API call
let request_url = format!("http://{server_addr}/api/v1/admin/peers");
match reqwest::get(&request_url).await {
Err(e) => {
error!("Failed to retrieve peers");
return Err(e.into());
}
Ok(resp) => {
debug!("Listing connected peers");
match resp.json::<Vec<PeerStats>>().await {
Err(e) => {
error!("Failed to load response json: {e}");
return Err(e.into());
}
Ok(peers) => {
if json_print {
// Print peers in JSON format
let json_output = serde_json::to_string_pretty(&peers)?;
println!("{json_output}");
} else {
// Print peers in table format
let mut table = Table::new();
table.add_row(row![
"Protocol",
"Socket",
"Type",
"Connection",
"Rx total",
"Tx total",
"Discovered",
"Last connection"
]);
for peer in peers.iter() {
table.add_row(row![
peer.endpoint.proto(),
peer.endpoint.address(),
peer.pt,
peer.connection_state,
format_bytes(peer.rx_bytes),
format_bytes(peer.tx_bytes),
format_seconds(peer.discovered),
peer.last_connected
.map(format_seconds)
.unwrap_or("Never connected".to_string()),
]);
}
table.printstd();
}
}
}
}
};
Ok(())
}
fn format_bytes(bytes: u64) -> String {
let byte = byte_unit::Byte::from_u64(bytes);
let adjusted_byte = byte.get_appropriate_unit(byte_unit::UnitType::Binary);
format!(
"{:.2} {}",
adjusted_byte.get_value(),
adjusted_byte.get_unit()
)
}
/// Convert an amount of seconds into a human readable string.
fn format_seconds(total_seconds: u64) -> String {
let seconds = total_seconds % 60;
let minutes = (total_seconds / 60) % 60;
let hours = (total_seconds / 3600) % 60;
let days = (total_seconds / 86400) % 60;
if days > 0 {
format!("{days}d {hours}h {minutes}m {seconds}s")
} else if hours > 0 {
format!("{hours}h {minutes}m {seconds}s")
} else if minutes > 0 {
format!("{minutes}m {seconds}s")
} else {
format!("{seconds}s")
}
}
/// Remove peer(s) by (underlay) IP
pub async fn remove_peers(
server_addr: SocketAddr,
peers: Vec<String>,
) -> Result<(), Box<dyn std::error::Error>> {
let client = reqwest::Client::new();
for peer in peers.iter() {
// encode to pass in URL
let peer_encoded = urlencoding::encode(peer);
let request_url = format!("http://{server_addr}/api/v1/admin/peers/{peer_encoded}");
if let Err(e) = client
.delete(&request_url)
.send()
.await
.and_then(|res| res.error_for_status())
{
error!("Failed to delete peer: {e}");
return Err(e.into());
}
}
Ok(())
}
/// Add peer(s) by (underlay) IP
pub async fn add_peers(
server_addr: SocketAddr,
peers: Vec<String>,
) -> Result<(), Box<dyn std::error::Error>> {
let client = reqwest::Client::new();
for peer in peers.into_iter() {
let request_url = format!("http://{server_addr}/api/v1/admin/peers");
if let Err(e) = client
.post(&request_url)
.json(&AddPeer { endpoint: peer })
.send()
.await
.and_then(|res| res.error_for_status())
{
error!("Failed to add peer: {e}");
return Err(e.into());
}
}
Ok(())
}

View File

@@ -0,0 +1,152 @@
use mycelium_api::{NoRouteSubnet, QueriedSubnet, Route};
use prettytable::{row, Table};
use std::net::SocketAddr;
use tracing::{debug, error};
pub async fn list_selected_routes(
server_addr: SocketAddr,
json_print: bool,
) -> Result<(), Box<dyn std::error::Error>> {
let request_url = format!("http://{server_addr}/api/v1/admin/routes/selected");
match reqwest::get(&request_url).await {
Err(e) => {
error!("Failed to retrieve selected routes");
return Err(e.into());
}
Ok(resp) => {
debug!("Listing selected routes");
if json_print {
// API call returns routes in JSON format by default
let selected_routes = resp.text().await?;
println!("{selected_routes}");
} else {
// Print routes in table format
let routes: Vec<Route> = resp.json().await?;
let mut table = Table::new();
table.add_row(row!["Subnet", "Next Hop", "Metric", "Seq No"]);
for route in routes.iter() {
table.add_row(row![
&route.subnet,
&route.next_hop,
route.metric,
route.seqno,
]);
}
table.printstd();
}
}
}
Ok(())
}
pub async fn list_fallback_routes(
server_addr: SocketAddr,
json_print: bool,
) -> Result<(), Box<dyn std::error::Error>> {
let request_url = format!("http://{server_addr}/api/v1/admin/routes/fallback");
match reqwest::get(&request_url).await {
Err(e) => {
error!("Failed to retrieve fallback routes");
return Err(e.into());
}
Ok(resp) => {
debug!("Listing fallback routes");
if json_print {
// API call returns routes in JSON format by default
let fallback_routes = resp.text().await?;
println!("{fallback_routes}");
} else {
// Print routes in table format
let routes: Vec<Route> = resp.json().await?;
let mut table = Table::new();
table.add_row(row!["Subnet", "Next Hop", "Metric", "Seq No"]);
for route in routes.iter() {
table.add_row(row![
&route.subnet,
&route.next_hop,
route.metric,
route.seqno,
]);
}
table.printstd();
}
}
}
Ok(())
}
pub async fn list_queried_subnets(
server_addr: SocketAddr,
json_print: bool,
) -> Result<(), Box<dyn std::error::Error>> {
let request_url = format!("http://{server_addr}/api/v1/admin/routes/queried");
match reqwest::get(&request_url).await {
Err(e) => {
error!("Failed to retrieve queried subnets");
return Err(e.into());
}
Ok(resp) => {
debug!("Listing queried routes");
if json_print {
// API call returns routes in JSON format by default
let queried_routes = resp.text().await?;
println!("{queried_routes}");
} else {
// Print routes in table format
let queries: Vec<QueriedSubnet> = resp.json().await?;
let mut table = Table::new();
table.add_row(row!["Subnet", "Query expiration"]);
for query in queries.iter() {
table.add_row(row![query.subnet, query.expiration,]);
}
table.printstd();
}
}
}
Ok(())
}
pub async fn list_no_route_entries(
server_addr: SocketAddr,
json_print: bool,
) -> Result<(), Box<dyn std::error::Error>> {
let request_url = format!("http://{server_addr}/api/v1/admin/routes/no_route");
match reqwest::get(&request_url).await {
Err(e) => {
error!("Failed to retrieve subnets with no route entries");
return Err(e.into());
}
Ok(resp) => {
debug!("Listing no route entries");
if json_print {
// API call returns routes in JSON format by default
let nrs = resp.text().await?;
println!("{nrs}");
} else {
// Print routes in table format
let no_routes: Vec<NoRouteSubnet> = resp.json().await?;
let mut table = Table::new();
table.add_row(row!["Subnet", "Entry expiration"]);
for nrs in no_routes.iter() {
table.add_row(row![nrs.subnet, nrs.expiration,]);
}
table.printstd();
}
}
}
Ok(())
}

View File

@@ -0,0 +1,25 @@
[package]
name = "mycelium-metrics"
version = "0.6.1"
edition = "2021"
license-file = "../LICENSE"
readme = "../README.md"
[features]
prometheus = ["dep:axum", "dep:prometheus", "dep:tokio", "dep:tracing"]
[dependencies]
axum = { version = "0.8.4", default-features = false, optional = true, features = [
"http1",
"http2",
"tokio",
] }
mycelium = { path = "../mycelium", default-features = false }
prometheus = { version = "0.14.0", default-features = false, optional = true, features = [
"process",
] }
tokio = { version = "1.46.1", default-features = false, optional = true, features = [
"net",
"rt",
] }
tracing = { version = "0.1.41", optional = true }

View File

@@ -0,0 +1,11 @@
//! This crate provides implementations of [`the Metrics trait`](mycelium::metrics::Metrics).
//! 2 options are exposed currently: a NOOP implementation which doesn't record anything,
//! and a prometheus exporter which exposes all metrics in a promtheus compatible format.
mod noop;
pub use noop::NoMetrics;
#[cfg(feature = "prometheus")]
mod prometheus;
#[cfg(feature = "prometheus")]
pub use prometheus::PrometheusExporter;

View File

@@ -0,0 +1,5 @@
use mycelium::metrics::Metrics;
#[derive(Clone)]
pub struct NoMetrics;
impl Metrics for NoMetrics {}

View File

@@ -0,0 +1,446 @@
use axum::{routing::get, Router};
use mycelium::metrics::Metrics;
use prometheus::{
opts, register_int_counter, register_int_counter_vec, register_int_gauge, Encoder, IntCounter,
IntCounterVec, IntGauge, TextEncoder,
};
use tracing::{error, info};
use std::net::SocketAddr;
/// A [`Metrics`] implementation which uses prometheus to expose the metrics to the outside world.
#[derive(Clone)]
pub struct PrometheusExporter {
router_processed_tlvs: IntCounterVec,
router_peer_added: IntCounter,
router_peer_removed: IntCounter,
router_peer_died: IntCounter,
router_route_selection_ran: IntCounter,
router_source_key_expired: IntCounter,
router_expired_routes: IntCounterVec,
router_selected_route_expired: IntCounter,
router_triggered_update: IntCounter,
router_route_packet: IntCounterVec,
router_seqno_action: IntCounterVec,
router_tlv_handling_time_spent: IntCounterVec,
router_update_dead_peer: IntCounter,
router_received_tlvs: IntCounter,
router_tlv_source_died: IntCounter,
router_tlv_discarded: IntCounter,
router_propage_selected_peers_time_spent: IntCounter,
router_update_skipped_route_selection: IntCounter,
router_update_denied_by_filter: IntCounter,
router_update_not_interested: IntCounter,
peer_manager_peer_added: IntCounterVec,
peer_manager_known_peers: IntGauge,
peer_manager_connection_attemps: IntCounterVec,
}
impl PrometheusExporter {
/// Create a new [`PrometheusExporter`].
pub fn new() -> Self {
Self {
router_processed_tlvs: register_int_counter_vec!(
opts!(
"mycelium_router_processed_tlvs",
"Amount of processed TLV's from peers, by type of TLV"
), &["tlv_type"]
).expect("Can register int counter vec in default registry"),
router_peer_added: register_int_counter!(
"mycelium_router_peer_added",
"Amount of times a peer was added to the router"
).expect("Can register int counter in default registry"),
router_peer_removed: register_int_counter!(
"mycelium_router_peer_removed",
"Amount of times a peer was removed from the router"
).expect("Can register int counter in default registry"),
router_peer_died: register_int_counter!(
"mycelium_router_peer_died",
"Amount of times the router noticed a peer was dead, or the peer noticed itself and informed the router",
).expect("Can register int counter in default registry"),
router_route_selection_ran: register_int_counter!(
"mycelium_router_route_selections",
"Amount of times a route selection procedure was ran as result of routes expiring or peers being disconnected. Does not include route selection after an update",
).expect("Can register int counte rin default registry"),
router_source_key_expired: register_int_counter!(
"mycelium_router_source_key_expired",
"Amount of source keys expired"
)
.expect("Can register int counter in default registry"),
router_expired_routes: register_int_counter_vec!(
opts!(
"mycelium_router_expired_routes",
"Route expiration events and the action taken on the route",
),
&["action"]
)
.expect("Can register int counter vec in default registry"),
router_selected_route_expired: register_int_counter!(
"mycelium_router_selected_route_expired",
"Amount of times a selected route in the routing table expired"
)
.expect("Can register int counter in default registry"),
router_triggered_update: register_int_counter!(
"mycelium_router_triggered_updates",
"Amount of triggered updates sent"
)
.expect("Can register int counter in default registry"),
router_route_packet: register_int_counter_vec!(
opts!(
"mycelium_router_packets_routed",
"What happened to a routed data packet"
),
&["verdict"],
)
.expect("Can register int counter vec in default registry"),
router_seqno_action: register_int_counter_vec!(
opts!(
"mycelium_router_seqno_handling",
"What happened to a received seqno request",
),
&["action"],
)
.expect("Can register int counter vec in default registry"),
router_tlv_handling_time_spent: register_int_counter_vec!(
opts!(
"mycelium_router_tlv_handling_time",
"Amount of time spent handling incoming TLV packets, in nanoseconds",
),
&["tlv_type"],
)
.expect("Can register an int counter vec in default registry"),
router_update_dead_peer: register_int_counter!(
"mycelium_router_update_dead_peer",
"Amount of updates we tried to send to a peer, where we found the peer to be dead before actually sending"
)
.expect("Can register an int counter in default registry"),
router_received_tlvs: register_int_counter!(
"mycelium_router_received_tlvs",
"Amount of tlv's received by peers",
)
.expect("Can register an int counter in the default registry"),
router_tlv_source_died: register_int_counter!(
"mycelium_router_tlv_source_died",
"Dropped TLV's which have been received, but where the peer has died before they could be processed",
)
.expect("Can register an int counter in default registry"),
router_tlv_discarded: register_int_counter!(
"mycelium_router_tlv_discarded",
"Dropped TLV's which have been received, but where not processed because the router couldn't keep up",
)
.expect("Can register an int counter in default registry"),
router_propage_selected_peers_time_spent: register_int_counter!(
"mycelium_router_propagate_selected_route_time",
"Time spent in the propagate_selected_route task, which periodically announces selected routes to peers. Measurement is in nanoseconds",
)
.expect("Can register an int counter in default registry"),
router_update_skipped_route_selection: register_int_counter!(
"mycelium_router_update_skipped_route_selection",
"Updates which were processed but did not run the route selection step, because the updated route could not be selected anyway",
)
.expect("Can register an int counter in default registry"),
router_update_denied_by_filter: register_int_counter!(
"mycelium_router_update_denied",
"Updates which were received and immediately denied by a configured filter",
)
.expect("Can register an int counter in default registry"),
router_update_not_interested: register_int_counter!(
"mycelium_router_update_not_interested",
"Updates which were allowed by the configured filters, but not of interest as they were either not feasible, or retractions, for an unknown subnet",
)
.expect("Can register an int counter in default registry"),
peer_manager_peer_added: register_int_counter_vec!(
opts!(
"mycelium_peer_manager_peers_added",
"Peers added to the peer manager at runtime, by peer type"
),
&["peer_type"],
)
.expect("Can register int counter vec in default registry"),
peer_manager_known_peers: register_int_gauge!(
"mycelium_peer_manager_known_peers",
"Amount of known peers in the peer manager"
)
.expect("Can register int gauge in default registry"),
peer_manager_connection_attemps: register_int_counter_vec!(
opts!(
"mycelium_peer_manager_connection_attempts",
"Count how many connections the peer manager started to remotes, and finished"
),
&["connection_state"]
)
.expect("Can register int counter vec in the default registry"),
}
}
/// Spawns a HTTP server on the provided [`SocketAddr`], to export the gathered metrics. Metrics
/// are served under the /metrics endpoint.
pub fn spawn(self, listen_addr: SocketAddr) {
info!("Enable system metrics on http://{listen_addr}/metrics");
let app = Router::new().route("/metrics", get(serve_metrics));
tokio::spawn(async move {
let listener = match tokio::net::TcpListener::bind(listen_addr).await {
Ok(listener) => listener,
Err(e) => {
error!("Failed to bind listener for Http metrics server: {e}");
error!("metrics disabled");
return;
}
};
let server = axum::serve(listener, app.into_make_service());
if let Err(e) = server.await {
error!("Http API server error: {e}");
}
});
}
}
/// Expose prometheus formatted metrics
async fn serve_metrics() -> String {
let mut buffer = Vec::new();
let encoder = TextEncoder::new();
// Gather the metrics.
let metric_families = prometheus::gather();
// Encode them to send.
encoder
.encode(&metric_families, &mut buffer)
.expect("Can encode metrics");
String::from_utf8(buffer).expect("Metrics are encoded in valid prometheus format")
}
impl Metrics for PrometheusExporter {
#[inline]
fn router_process_hello(&self) {
self.router_processed_tlvs
.with_label_values(&["hello"])
.inc()
}
#[inline]
fn router_process_ihu(&self) {
self.router_processed_tlvs.with_label_values(&["ihu"]).inc()
}
#[inline]
fn router_process_seqno_request(&self) {
self.router_processed_tlvs
.with_label_values(&["seqno_request"])
.inc()
}
#[inline]
fn router_process_route_request(&self, wildcard: bool) {
let label = if wildcard {
"wildcard_route_request"
} else {
"route_request"
};
self.router_processed_tlvs.with_label_values(&[label]).inc()
}
#[inline]
fn router_process_update(&self) {
self.router_processed_tlvs
.with_label_values(&["update"])
.inc()
}
#[inline]
fn router_peer_added(&self) {
self.router_peer_added.inc()
}
#[inline]
fn router_peer_removed(&self) {
self.router_peer_removed.inc()
}
#[inline]
fn router_peer_died(&self) {
self.router_peer_died.inc()
}
#[inline]
fn router_route_selection_ran(&self) {
self.router_route_selection_ran.inc()
}
#[inline]
fn router_source_key_expired(&self) {
self.router_source_key_expired.inc()
}
#[inline]
fn router_route_key_expired(&self, removed: bool) {
let label = if removed { "removed" } else { "retracted" };
self.router_expired_routes.with_label_values(&[label]).inc()
}
#[inline]
fn router_selected_route_expired(&self) {
self.router_selected_route_expired.inc()
}
#[inline]
fn router_triggered_update(&self) {
self.router_triggered_update.inc()
}
#[inline]
fn router_route_packet_local(&self) {
self.router_route_packet.with_label_values(&["local"]).inc()
}
#[inline]
fn router_route_packet_forward(&self) {
self.router_route_packet
.with_label_values(&["forward"])
.inc()
}
#[inline]
fn router_route_packet_ttl_expired(&self) {
self.router_route_packet
.with_label_values(&["ttl_expired"])
.inc()
}
#[inline]
fn router_route_packet_no_route(&self) {
self.router_route_packet
.with_label_values(&["no_route"])
.inc()
}
#[inline]
fn router_seqno_request_reply_local(&self) {
self.router_seqno_action
.with_label_values(&["reply_local"])
.inc()
}
#[inline]
fn router_seqno_request_bump_seqno(&self) {
self.router_seqno_action
.with_label_values(&["bump_seqno"])
.inc()
}
#[inline]
fn router_seqno_request_dropped_ttl(&self) {
self.router_seqno_action
.with_label_values(&["ttl_expired"])
.inc()
}
#[inline]
fn router_seqno_request_forward_feasible(&self) {
self.router_seqno_action
.with_label_values(&["forward_feasible"])
.inc()
}
#[inline]
fn router_seqno_request_forward_unfeasible(&self) {
self.router_seqno_action
.with_label_values(&["forward_unfeasible"])
.inc()
}
#[inline]
fn router_seqno_request_unhandled(&self) {
self.router_seqno_action
.with_label_values(&["unhandled"])
.inc()
}
#[inline]
fn router_time_spent_handling_tlv(&self, duration: std::time::Duration, tlv_type: &str) {
self.router_tlv_handling_time_spent
.with_label_values(&[tlv_type])
.inc_by(duration.as_nanos() as u64)
}
#[inline]
fn router_update_dead_peer(&self) {
self.router_update_dead_peer.inc()
}
#[inline]
fn router_received_tlv(&self) {
self.router_received_tlvs.inc()
}
#[inline]
fn router_tlv_source_died(&self) {
self.router_tlv_source_died.inc()
}
#[inline]
fn router_tlv_discarded(&self) {
self.router_tlv_discarded.inc()
}
#[inline]
fn router_time_spent_periodic_propagating_selected_routes(
&self,
duration: std::time::Duration,
) {
self.router_propage_selected_peers_time_spent
.inc_by(duration.as_nanos() as u64)
}
#[inline]
fn router_update_skipped_route_selection(&self) {
self.router_update_skipped_route_selection.inc()
}
#[inline]
fn router_update_denied_by_filter(&self) {
self.router_update_denied_by_filter.inc()
}
#[inline]
fn router_update_not_interested(&self) {
self.router_update_not_interested.inc()
}
#[inline]
fn peer_manager_peer_added(&self, pt: mycelium::peer_manager::PeerType) {
let label = match pt {
mycelium::peer_manager::PeerType::Static => "static",
mycelium::peer_manager::PeerType::Inbound => "inbound",
mycelium::peer_manager::PeerType::LinkLocalDiscovery => "link_local",
};
self.peer_manager_peer_added
.with_label_values(&[label])
.inc()
}
#[inline]
fn peer_manager_known_peers(&self, amount: usize) {
self.peer_manager_known_peers.set(amount as i64)
}
#[inline]
fn peer_manager_connection_attempted(&self) {
self.peer_manager_connection_attemps
.with_label_values(&["started"])
.inc()
}
#[inline]
fn peer_manager_connection_finished(&self) {
self.peer_manager_connection_attemps
.with_label_values(&["finished"])
.inc()
}
}
impl Default for PrometheusExporter {
fn default() -> Self {
Self::new()
}
}

View File

@@ -0,0 +1,9 @@
# Generated by Cargo
# will have compiled files and executables
/target/
/dist/
/static/
/.dioxus/
# These are backup files generated by rustfmt
**/*.rs.bk

7232
components/mycelium/mycelium-ui/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,35 @@
[package]
name = "mycelium-ui"
version = "0.6.1"
edition = "2021"
license-file = "../LICENSE"
readme = "../README.md"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
dioxus = { version = "0.6.2", features = ["desktop", "router"] }
mycelium = { path = "../mycelium" }
mycelium-api = { path = "../mycelium-api" }
# Debug
tracing = "0.1.40"
dioxus-logger = "0.6.2"
reqwest = { version = "0.12.5", features = ["json"] }
serde_json = "1.0.120"
dioxus-sortable = "0.1.2"
manganis = "0.6.2"
dioxus-free-icons = { version = "0.9.0", features = [
"font-awesome-solid",
"font-awesome-brands",
"font-awesome-regular",
] }
human_bytes = { version = "0.4.3", features = ["fast"] }
tokio = "1.44.1"
dioxus-charts = "0.3.1"
futures-util = "0.3.31"
urlencoding = "2.1.3"
[features]
bundle = []

View File

@@ -0,0 +1,48 @@
[application]
# App (Project) Name
name = "mycelium-ui"
# Dioxus App Default Platform
# desktop, web
default_platform = "desktop"
# `build` & `serve` dist path
out_dir = "dist"
# assets file folder
asset_dir = "assets"
[web.app]
# HTML title tag content
title = "mycelium-ui"
[web.watcher]
# when watcher trigger, regenerate the `index.html`
reload_html = true
# which files or dirs will be watcher monitoring
watch_path = ["src", "assets"]
# add fallback 404 page
index_on_404 = true
# include `assets` in web platform
[web.resource]
# CSS style file
style = [
"https://fonts.googleapis.com/css2?family=Lato:wght@300;400;700&display=swap",
]
# Javascript code file
script = []
[web.resource.dev]
# Javascript code file
# serve: [dev-server] only
script = []

View File

@@ -0,0 +1,54 @@
# Mycelium Network Dashboard
The Mycelium Network Dashboard is a GUI application built with Dioxus, a modern library for building
cross-platform applications using Rust. More information about Dioxus can be found [here](https://dioxuslabs.com/)
## Getting Started
To get started with the Mycelium Network Dashboard, you'll need to have the Dioxus CLI tool installed.
You can install it using the following command:
`cargo install dioxus-cli`
Before running the Mycelium Network Dashboard application, make sure that the `myceliumd` daemon is running on your system.
The myceliumd daemon is the background process that manages the Mycelium network connection
and provides the data that the dashboard application displays. For more information on setting up and
running `myceliumd`, please read [this](../README.md).
Once you have the Dioxus CLI installed, you can build and run the application in development mode using
the following command (in the `mycelium-ui` directory):
`dx serve`
This will start a development server and launch the application in a WebView.
## Bundling the application
To bundle the application, you can use:
`dx bundle --release --features bundle`
This will create a bundled version of the application in the `dist/bundle/` directory. The bundled
application can be distributed and run on various platforms, including Windows, MacOS and Linux. Dioxus
also offers support for mobile, but note that this has not been tested.
## Documentation
The Mycelium Network Dashboard application provides the following features:
- **Home**: Displays information about the node and allows to change address of the API server on which
the application should listen.
- **Peers**: Shows and overview of all the connected peers. Adding and removing peers can be done here.
- **Routes**: Provides information about the routing table and network routes
## Contributing
If you would like to contribute to the Mycelium Network Dashboard project, please follow the standard GitHub workflow:
1. Fork the repository
2. Create a new branch for your changes
3. Make your changes and commit them
4. Push your changes to your forked repository
5. Submit a pull request to the main repository

View File

@@ -0,0 +1,514 @@
@import url('https://fonts.googleapis.com/css2?family=Lato:wght@300;400;700&display=swap');
:root {
--primary-color: #3498db;
--secondary-color: #2c3e50;
--background-color: #ecf0f1;
--text-color: #34495e;
--border-color: #bdc3c7;
}
* {
box-sizing: border-box;
margin: 0;
padding: 0;
}
body {
font-family: 'Lato', sans-serif;
background-color: var(--background-color);
color: var(--text-color);
}
.app-container {
display: flex;
flex-direction: column;
min-height: 100vh;
}
header {
background-color: var(--primary-color);
color: white;
padding: 1rem;
display: flex;
justify-content: space-between;
align-items: center;
position: fixed;
width: 100%;
z-index: 1000;
height: 60px;
}
header h1 {
font-size: 1rem;
font-weight: 700;
}
.node-info {
font-size: 0.9rem;
display: flex;
align-items: center;
}
.node-info span {
margin-right: 1rem;
}
.node-info .separator {
margin: 0 1rem;
}
.content-container {
display: flex;
padding-top: 60px;
min-height: calc(100vh - 60px);
}
.sidebar {
background-color: var(--secondary-color);
color: white;
width: 250px;
height: 100%;
position: fixed;
top: 60px;
left: 0;
transition: transform 0.3s ease-in-out;
z-index: 100;
}
.sidebar.collapsed {
transform: translateX(-250px);
}
.sidebar ul {
list-style-type: none;
padding: 1rem;
}
.sidebar li {
margin-bottom: 1rem;
}
.sidebar a {
color: white;
text-decoration: none;
font-size: 1.1rem;
transition: color 0.3s ease;
}
.sidebar a:hover {
color: var(--primary-color);
}
.main-content {
flex: 1;
padding: 2rem;
margin-left: 250px;
transition: margin-left 0.3s ease-in-out;
}
.main-content.expanded {
margin-left: 0;
}
.toggle-sidebar.collapsed {
left: 10px;
transform: translate(-10px) rotate(180deg);
transition: left 0.3s ease-in-out, transform: 0s;
}
.toggle-sidebar {
background-color: var(--secondary-color);
color: white;
border: none;
width: 40px;
height: 40px;
cursor: pointer;
display: flex;
align-items: center;
justify-content: center;
position: fixed;
top: 70px;
left: 250px;
transition: left 0.3s ease-in-out, transform 0.3s ease-in-out;
z-index: 200;
}
.table-container {
overflow-x: auto;
}
table {
width: 100%;
border-collapse: collapse;
margin-top: 1rem;
background-color: white;
box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
}
th, td {
padding: 1rem;
text-align: left;
border-bottom: 1px solid var(--border-color);
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
th {
background-color: var(--primary-color);
color: white;
font-weight: 700;
cursor: pointer;
transition: background-color 0.3s ease;
}
th:hover {
background-color: #2980b9;
}
.subnet-column { width: 25%; }
.next-hop-column { width: 35%; }
.metric-column { width: 20%; }
.seqno-column { width: 20%; }
.endpoint-column { width: 30%; }
.type-column { width: 15%; }
.connection-state-column { width: 20%; }
.tx-bytes-column { width: 17.5%; }
.rx-bytes-column { width: 17.5%; }
.pagination {
display: flex;
justify-content: center;
align-items: center;
margin-top: 1rem;
}
.pagination button {
background-color: var(--primary-color);
color: white;
border: none;
padding: 0.5rem 1rem;
margin: 0 0.5rem;
cursor: pointer;
transition: background-color 0.3s ease;
}
.pagination button:hover:not(:disabled) {
background-color: #2980b9;
}
.pagination button:disabled {
background-color: var(--border-color);
cursor: not-allowed;
}
.pagination span {
margin: 0 0.5rem;
}
.peers-table, .selected-routes, .fallback-routes {
margin-top: 2rem;
}
h2 {
color: var(--secondary-color);
margin-bottom: 1rem;
}
.node-info h3 {
margin-bottom: 0.5rem;
font-size: 1.1rem;
font-weight: 400;
}
/* Home component */
.home-container {
max-width: 800px;
margin: 0 auto;
padding: 2rem;
}
.home-container h2 {
color: var(--secondary-color);
margin-bottom: 1.5rem;
}
.home-container p {
margin-bottom: 1rem;
}
.bold {
font-weight: 700;
}
/* API server */
.server-input {
display: flex;
margin-bottom: 1rem;
}
.server-input input {
flex-grow: 1;
padding: 0.5rem;
font-size: 1rem;
border: 1px solid var(--border-color);
border-radius: 4px 0 0 4px;
}
.server-input button {
padding: 0.5rem 1rem;
font-size: 1rem;
background-color: var(--primary-color);
color: white;
border: none;
border-radius: 0 4px 4px 0;
cursor: pointer;
transition: background-color 0.3s ease;
}
.server-input button:hover {
background-color: #2980b9;
}
.error {
color: #e74c3c;
margin-bottom: 1rem;
}
.warning {
color: #f39c12;
margin-bottom: 1rem;
}
/* Searching and adding */
.search-and-add-container {
display: flex;
justify-content: space-between;
align-items: flex-start;
margin-bottom: 1rem;
flex-wrap: wrap;
}
/* Searching */
.search-container {
flex: 0 0 60%;
display: flex;
margin-right: 1rem;
margin-bottom: 0.5rem;
}
.search-container input {
flex-grow: 1;
padding: 0.5rem;
font-size: 1rem;
border: 1px solid var(--border-color);
border-radius: 4px 0 0 4px;
min-width: 200px;
height: 40px;
}
.search-container select {
padding: 0.5rem;
font-size: 1rem;
border: 1px solid var(--border-color);
border-left: none;
border-radius: 0 4px 4px 0;
background-color: white;
min-width: 120px;
height: 40px;
}
/* Add peer button */
.add-peer-container {
flex: 0 0 35%;
display: flex;
flex-direction: column;
align-items: flex-start;
margin-bottom: 0.5rem;
}
.add-peer-input-button {
display: flex;
width: 100%;
}
.add-peer-container button {
padding: 0.5rem 1rem;
font-size: 1rem;
background-color: var(--primary-color);
color: white;
border: none;
border-radius: 4px;
cursor: pointer;
transition: background-color 0.3s ease;
white-space: nowrap;
height: 40px;
}
.add-peer-container button:hover {
background-color: #2980b9
}
.add-peer-error {
color: #e74c3c !important;
font-size: 0.9rem;
margin-top: 0.5rem;
width: 100%;
}
.expanded-add-peer-container {
flex-grow: 1;
display: flex;
margin-right: 0.5rem;
}
.expanded-add-peer-container input {
flex-grow: 1;
padding: 0.5rem;
font-size: 1rem;
border: 1px solid var(--border-color);
border-radius: 4px;
min-width: 150px;
height: 40px;
}
/* Refresh button */
.refresh-button {
display: flex;
align-items: center;
justify-content: center;
background-color: var(--primary-color);
color: white;
border: none;
padding: 0.5rem 1rem;
margin-bottom: 1rem;
cursor: pointer;
transition: background-color 0.3s ease;
border-radius: 4px;
font-size: 1rem;
}
.refresh-button:hover {
background-color: #2980b9;
}
.refresh-button svg {
margin-right: 0.5rem;
}
/* Expandable row styles */
.expanded-row {
background-color: #f8f9fa;
}
.graph-container {
width: calc(50% - 1rem);
margin-bottom: 2rem;
}
.graph-title {
font-size: 1.2rem;
font-weight: 700;
margin-bottom: 1rem;
color: var(--secondary-color);
text-align: center;
}
.expanded-content {
padding: 2rem;
display: flex;
flex-wrap: wrap;
justify-content: space-between;
align-items: flex-start;
}
.expanded-content p {
margin: 0;
font-size: 0.5rem;
}
/* Style for both Tx and Rx charts */
.expanded-content svg {
width: 100%;
height: auto;
margin-bottom: 1rem;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
border-radius: 8px;
overflow: hidden;
}
/* Style for chart lines */
.expanded-content path {
stroke-width: 2;
}
/* Style for Tx bytes chart */
.graph-container:nth-child(1) path {
stroke: #3498db;
}
/* Style for Rx bytes chart */
.graph-container:nth-child(2) path {
stroke: #2ecc71;
}
.button-container {
width: 100%;
display: flex;
justify-content: space-between;
margin-top: 1rem;
}
.close-button {
background-color: var(--primary-color);
color: white;
border: none;
padding: 0.75rem 1.5rem;
font-size: 1rem;
cursor: pointer;
transition: background-color 0.3s ease;
border-radius: 4px;
margin-top: 1rem;
}
.remove-button {
background-color: var(--primary-color);
color: white;
border: none;
padding: 0.75rem 1.5rem;
font-size: 1rem;
cursor: pointer;
transition: background-color 0.3s ease;
border-radius: 4px;
margin-top: 1rem;
}
.remove-button:hover {
background-color: #c0392b;
}
/* Make the table rows clickable */
tbody tr {
cursor: pointer;
}
tbody tr:hover {
background-color: #f1f3f5;
}
/* Responsive design for smaller screens */
@media (max-width: 768px) {
.expanded-content {
flex-direction: column;
align-items: center;
}
.graph-container {
width: 100%;
max-width: 100%;
}
}

View File

@@ -0,0 +1,98 @@
use mycelium::endpoint::Endpoint;
use mycelium_api::AddPeer;
use std::net::SocketAddr;
use urlencoding::encode;
pub async fn get_peers(
server_addr: SocketAddr,
) -> Result<Vec<mycelium::peer_manager::PeerStats>, reqwest::Error> {
let request_url = format!("http://{server_addr}/api/v1/admin/peers");
match reqwest::get(&request_url).await {
Err(e) => Err(e),
Ok(resp) => match resp.json::<Vec<mycelium::peer_manager::PeerStats>>().await {
Err(e) => Err(e),
Ok(peers) => Ok(peers),
},
}
}
pub async fn get_selected_routes(
server_addr: SocketAddr,
) -> Result<Vec<mycelium_api::Route>, reqwest::Error> {
let request_url = format!("http://{server_addr}/api/v1/admin/routes/selected");
match reqwest::get(&request_url).await {
Err(e) => Err(e),
Ok(resp) => match resp.json::<Vec<mycelium_api::Route>>().await {
Err(e) => Err(e),
Ok(selected_routes) => Ok(selected_routes),
},
}
}
pub async fn get_fallback_routes(
server_addr: SocketAddr,
) -> Result<Vec<mycelium_api::Route>, reqwest::Error> {
let request_url = format!("http://{server_addr}/api/v1/admin/routes/fallback");
match reqwest::get(&request_url).await {
Err(e) => Err(e),
Ok(resp) => match resp.json::<Vec<mycelium_api::Route>>().await {
Err(e) => Err(e),
Ok(selected_routes) => Ok(selected_routes),
},
}
}
pub async fn get_node_info(server_addr: SocketAddr) -> Result<mycelium_api::Info, reqwest::Error> {
let request_url = format!("http://{server_addr}/api/v1/admin");
match reqwest::get(&request_url).await {
Err(e) => Err(e),
Ok(resp) => match resp.json::<mycelium_api::Info>().await {
Err(e) => Err(e),
Ok(node_info) => Ok(node_info),
},
}
}
pub async fn remove_peer(
server_addr: SocketAddr,
peer_endpoint: Endpoint,
) -> Result<(), reqwest::Error> {
let full_endpoint = format!(
"{}://{}",
peer_endpoint.proto().to_string().to_lowercase(),
peer_endpoint.address()
);
let encoded_full_endpoint = encode(&full_endpoint);
let request_url = format!(
"http://{}/api/v1/admin/peers/{}",
server_addr, encoded_full_endpoint
);
let client = reqwest::Client::new();
client
.delete(request_url)
.send()
.await?
.error_for_status()?;
Ok(())
}
pub async fn add_peer(
server_addr: SocketAddr,
peer_endpoint: String,
) -> Result<(), reqwest::Error> {
println!("adding peer: {peer_endpoint}");
let client = reqwest::Client::new();
let request_url = format!("http://{server_addr}/api/v1/admin/peers");
client
.post(request_url)
.json(&AddPeer {
endpoint: peer_endpoint,
})
.send()
.await?
.error_for_status()?;
Ok(())
}

View File

@@ -0,0 +1,4 @@
pub mod home;
pub mod layout;
pub mod peers;
pub mod routes;

View File

@@ -0,0 +1,68 @@
use crate::api;
use crate::{ServerAddress, ServerConnected};
use dioxus::prelude::*;
use std::net::SocketAddr;
use std::str::FromStr;
#[component]
pub fn Home() -> Element {
let mut server_addr = use_context::<Signal<ServerAddress>>();
let mut new_address = use_signal(|| server_addr.read().0.to_string());
let mut node_info = use_resource(fetch_node_info);
let try_connect = move |_| {
if let Ok(addr) = SocketAddr::from_str(&new_address.read()) {
server_addr.write().0 = addr;
node_info.restart();
}
};
rsx! {
div { class: "home-container",
h2 { "Node information" }
div { class: "server-input",
input {
placeholder: "Server address (e.g. 127.0.0.1:8989)",
value: "{new_address}",
oninput: move |evt| new_address.set(evt.value().clone()),
}
button { onclick: try_connect, "Connect" }
}
{match node_info.read().as_ref() {
Some(Ok(info)) => rsx! {
p {
"Node subnet: ",
span { class: "bold", "{info.node_subnet}" }
}
p {
"Node public key: ",
span { class: "bold", "{info.node_pubkey}" }
}
},
Some(Err(e)) => rsx! {
p { class: "error", "Error: {e}" }
},
None => rsx! {
p { "Enter a server address and click 'Connect' to fetch node information." }
}
}}
}
}
}
async fn fetch_node_info() -> Result<mycelium_api::Info, reqwest::Error> {
let server_addr = use_context::<Signal<ServerAddress>>();
let mut server_connected = use_context::<Signal<ServerConnected>>();
let address = server_addr.read().0;
match api::get_node_info(address).await {
Ok(info) => {
server_connected.write().0 = true;
Ok(info)
}
Err(e) => {
server_connected.write().0 = false;
Err(e)
}
}
}

View File

@@ -0,0 +1,65 @@
use crate::{api, Route, ServerAddress};
use dioxus::prelude::*;
use dioxus_free_icons::{icons::fa_solid_icons::FaChevronLeft, Icon};
#[component]
pub fn Layout() -> Element {
let sidebar_collapsed = use_signal(|| false);
rsx! {
div { class: "app-container",
Header {}
div { class: "content-container",
Sidebar { collapsed: sidebar_collapsed }
main { class: if *sidebar_collapsed.read() { "main-content expanded" } else { "main-content" },
Outlet::<Route> {}
}
}
}
}
}
#[component]
pub fn Header() -> Element {
let server_addr = use_context::<Signal<ServerAddress>>();
let fetched_node_info = use_resource(move || api::get_node_info(server_addr.read().0));
rsx! {
header {
h1 { "Mycelium Network Dashboard" }
div { class: "node-info",
{ match &*fetched_node_info.read_unchecked() {
Some(Ok(info)) => rsx! {
span { "Subnet: {info.node_subnet}" }
span { class: "separator", "|" }
span { "Public Key: {info.node_pubkey}" }
},
Some(Err(_)) => rsx! { span { "Error loading node info" } },
None => rsx! { span { "Loading node info..." } },
}}
}
}
}
}
#[component]
pub fn Sidebar(collapsed: Signal<bool>) -> Element {
rsx! {
nav { class: if *collapsed.read() { "sidebar collapsed" } else { "sidebar" },
ul {
li { Link { to: Route::Home {}, "Home" } }
li { Link { to: Route::Peers {}, "Peers" } }
li { Link { to: Route::Routes {}, "Routes" } }
}
}
button { class: if *collapsed.read() { "toggle-sidebar collapsed" } else { "toggle-sidebar" },
onclick: {
let c = *collapsed.read();
move |_| collapsed.set(!c)
},
Icon {
icon: FaChevronLeft,
}
}
}
}

View File

@@ -0,0 +1,521 @@
use crate::get_sort_indicator;
use crate::{api, SearchState, ServerAddress, SortDirection, StopFetchingPeerSignal};
use dioxus::prelude::*;
use dioxus_charts::LineChart;
use human_bytes::human_bytes;
use mycelium::{
endpoint::Endpoint,
peer_manager::{PeerStats, PeerType},
};
use std::{
cmp::Ordering,
collections::{HashMap, HashSet, VecDeque},
str::FromStr,
};
use tracing::{error, info};
const REFRESH_RATE_MS: u64 = 500;
const MAX_DATA_POINTS: usize = 8; // displays last 4 seconds
#[component]
pub fn Peers() -> Element {
let server_addr = use_context::<Signal<ServerAddress>>().read().0;
let error = use_signal(|| None::<String>);
let peer_data = use_signal(HashMap::<Endpoint, PeerStats>::new);
let _ = use_resource(move || async move {
to_owned![server_addr, error, peer_data];
loop {
let stop_fetching_signal = use_context::<Signal<StopFetchingPeerSignal>>().read().0;
if !stop_fetching_signal {
match api::get_peers(server_addr).await {
Ok(fetched_peers) => {
peer_data.with_mut(|data| {
// Collect the endpoint from the fetched peers
let fetched_endpoints: HashSet<Endpoint> =
fetched_peers.iter().map(|peer| peer.endpoint).collect();
// Remove peers that are no longer in the fetched data
data.retain(|endpoint, _| fetched_endpoints.contains(endpoint));
// Insert or update the fetched peers
for peer in fetched_peers {
data.insert(peer.endpoint, peer);
}
});
error.set(None);
}
Err(e) => {
eprintln!("Error fetching peers: {}", e);
error.set(Some(format!(
"An error has occurred while fetching peers: {}",
e
)))
}
}
} else {
break;
}
tokio::time::sleep(tokio::time::Duration::from_millis(REFRESH_RATE_MS)).await;
}
});
rsx! {
if let Some(err) = error.read().as_ref() {
div { class: "error-message", "{err}" }
} else {
PeersTable { peer_data: peer_data }
}
}
}
#[component]
fn PeersTable(peer_data: Signal<HashMap<Endpoint, PeerStats>>) -> Element {
let mut current_page = use_signal(|| 0);
let items_per_page = 20;
let mut sort_column = use_signal(|| "Protocol".to_string());
let mut sort_direction = use_signal(|| SortDirection::Ascending);
let peers_len = peer_data.read().len();
// Pagination
let mut change_page = move |delta: i32| {
let cur_page = *current_page.read() as i32;
current_page.set(
(cur_page + delta)
.max(0)
.min((peers_len - 1) as i32 / items_per_page),
);
};
// Sorting
let mut sort_peers_signal = move |column: String| {
if column == *sort_column.read() {
let new_sort_direction = match *sort_direction.read() {
SortDirection::Ascending => SortDirection::Descending,
SortDirection::Descending => SortDirection::Ascending,
};
sort_direction.set(new_sort_direction);
} else {
sort_column.set(column);
sort_direction.set(SortDirection::Descending);
}
// When sorting, we should jump back to the first page
current_page.set(0);
};
let sorted_peers = use_memo(move || {
let mut peers = peer_data.read().values().cloned().collect::<Vec<_>>();
sort_peers(&mut peers, &sort_column.read(), &sort_direction.read());
peers
});
// Searching
let mut search_state = use_signal(|| SearchState {
query: String::new(),
column: "Protocol".to_string(),
});
let filtered_peers = use_memo(move || {
let query = search_state.read().query.to_lowercase();
let column = &search_state.read().column;
let sorted_peers = sorted_peers.read();
sorted_peers
.iter()
.filter(|peer| match column.as_str() {
"Protocol" => peer
.endpoint
.proto()
.to_string()
.to_lowercase()
.contains(&query),
"Address" => peer
.endpoint
.address()
.ip()
.to_string()
.to_lowercase()
.contains(&query),
"Port" => peer
.endpoint
.address()
.port()
.to_string()
.to_lowercase()
.contains(&query),
"Type" => peer.pt.to_string().to_lowercase().contains(&query),
"Connection State" => peer
.connection_state
.to_string()
.to_lowercase()
.contains(&query),
"Tx bytes" => peer.tx_bytes.to_string().to_lowercase().contains(&query),
"Rx bytes" => peer.rx_bytes.to_string().to_lowercase().contains(&query),
_ => false,
})
.cloned()
.collect::<Vec<PeerStats>>()
});
let peers_len = filtered_peers.read().len();
let start = current_page * items_per_page;
let end = (start + items_per_page).min(peers_len as i32);
let current_peers = filtered_peers.read()[start as usize..end as usize].to_vec();
// Expanding peer to show rx/tx bytes graphs
let mut expanded_rows = use_signal(|| ExpandedRows(HashSet::new()));
let mut toggle_row_expansion = move |peer_endpoint: String| {
expanded_rows.with_mut(|rows| {
if rows.0.contains(&peer_endpoint) {
rows.0.remove(&peer_endpoint);
} else {
rows.0.insert(peer_endpoint);
}
});
};
let toggle_add_peer_input = use_signal(|| true); //TODO: fix UX for adding peer
let mut add_peer_error = use_signal(|| None::<String>);
let add_peer = move |peer_endpoint: String| {
spawn(async move {
let server_addr = use_context::<Signal<ServerAddress>>().read().0;
// Check correct endpoint format and add peer
match Endpoint::from_str(&peer_endpoint) {
Ok(_) => {
if let Err(e) = api::add_peer(server_addr, peer_endpoint.clone()).await {
error!("Error adding peer: {e}");
add_peer_error.set(Some(format!("Error adding peer: {}", e)));
} else {
info!("Succesfully added peer: {peer_endpoint}");
}
}
Err(e) => {
error!("Incorrect peer endpoint: {e}");
add_peer_error.set(Some(format!("Incorrect peer endpoint: {}", e)));
}
}
});
};
let mut new_peer_endpoint = use_signal(|| "".to_string());
rsx! {
div { class: "peers-table",
h2 { "Peers" }
div { class: "search-and-add-container",
div { class: "search-container",
input {
placeholder: "Search...",
value: "{search_state.read().query}",
oninput: move |evt| search_state.write().query.clone_from(&evt.value()),
}
select {
value: "{search_state.read().column}",
onchange: move |evt| search_state.write().column.clone_from(&evt.value()),
option { value: "Protocol", "Protocol" }
option { value: "Address", "Address" }
option { value: "Port", "Port" }
option { value: "Type", "Type" }
option { value: "Connection State", "Connection State" }
option { value: "Tx bytes", "Tx bytes" }
option { value: "Rx bytes", "Rx bytes" }
}
}
div { class: "add-peer-container",
div { class: "add-peer-input-button",
if *toggle_add_peer_input.read() {
div { class: "expanded-add-peer-container",
input {
placeholder: "tcp://ipaddr:port",
oninput: move |evt| new_peer_endpoint.set(evt.value())
}
}
}
button {
onclick: move |_| add_peer(new_peer_endpoint.read().to_string()),
"Add peer"
}
}
if let Some(error) = add_peer_error.read().as_ref() {
div { class: "add-peer-error", "{error}" }
}
}
}
div { class: "table-container",
table {
thead {
tr {
th { class: "protocol-column",
onclick: move |_| sort_peers_signal("Protocol".to_string()),
"Protocol {get_sort_indicator(sort_column, sort_direction, \"Protocol\".to_string())}"
}
th { class: "address-column",
onclick: move |_| sort_peers_signal("Address".to_string()),
"Address {get_sort_indicator(sort_column, sort_direction, \"Address\".to_string())}"
}
th { class: "port-column",
onclick: move |_| sort_peers_signal("Port".to_string()),
"Port {get_sort_indicator(sort_column, sort_direction, \"Port\".to_string())}"
}
th { class: "type-column",
onclick: move |_| sort_peers_signal("Type".to_string()),
"Type {get_sort_indicator(sort_column, sort_direction, \"Type\".to_string())}"
}
th { class: "connection-state-column",
onclick: move |_| sort_peers_signal("Connection State".to_string()),
"Connection State {get_sort_indicator(sort_column, sort_direction, \"Connection State\".to_string())}"
}
th { class: "tx-bytes-column",
onclick: move |_| sort_peers_signal("Tx bytes".to_string()),
"Tx bytes {get_sort_indicator(sort_column, sort_direction, \"Tx bytes\".to_string())}"
}
th { class: "rx-bytes-column",
onclick: move |_| sort_peers_signal("Rx bytes".to_string()),
"Rx bytes {get_sort_indicator(sort_column, sort_direction, \"Rx bytes\".to_string())}"
}
}
}
tbody {
for peer in current_peers.into_iter() {
tr {
onclick: move |_| toggle_row_expansion(peer.endpoint.to_string()),
td { class: "protocol-column", "{peer.endpoint.proto()}" }
td { class: "address-column", "{peer.endpoint.address().ip()}" }
td { class: "port-column", "{peer.endpoint.address().port()}" }
td { class: "type-column", "{peer.pt}" }
td { class: "connection-state-column", "{peer.connection_state}" }
td { class: "tx-bytes-column", "{human_bytes(peer.tx_bytes as f64)}" }
td { class: "rx-bytes-column", "{human_bytes(peer.rx_bytes as f64)}" }
}
{
let peer_expanded = expanded_rows.read().0.contains(&peer.endpoint.to_string());
if peer_expanded {
rsx! {
ExpandedPeerRow {
peer_endpoint: peer.endpoint,
peer_data: peer_data,
on_close: move |_| toggle_row_expansion(peer.endpoint.to_string()),
}
}
} else {
rsx! {}
}
}
}
}
}
}
div { class: "pagination",
button {
disabled: *current_page.read() == 0,
onclick: move |_| change_page(-1),
"Previous"
}
span { "Page {current_page + 1}" }
button {
disabled: (current_page + 1) * items_per_page >= peers_len as i32,
onclick: move |_| change_page(1),
"Next"
}
}
}
}
}
#[derive(Clone)]
struct BandwidthData {
tx_bytes: u64,
rx_bytes: u64,
timestamp: tokio::time::Duration,
}
#[component]
fn ExpandedPeerRow(
peer_endpoint: Endpoint,
peer_data: Signal<HashMap<Endpoint, PeerStats>>,
on_close: EventHandler<()>,
) -> Element {
let bandwidth_data = use_signal(VecDeque::<BandwidthData>::new);
let start_time = use_signal(tokio::time::Instant::now);
use_future(move || {
to_owned![bandwidth_data, start_time, peer_data, peer_endpoint];
async move {
let mut last_tx = 0;
let mut last_rx = 0;
if let Some(peer_stats) = peer_data.read().get(&peer_endpoint) {
last_tx = peer_stats.tx_bytes;
last_rx = peer_stats.rx_bytes;
}
loop {
let current_time = tokio::time::Instant::now();
let elapsed_time = current_time.duration_since(*start_time.read());
if let Some(peer_stats) = peer_data.read().get(&peer_endpoint) {
let tx_rate =
(peer_stats.tx_bytes - last_tx) as f64 / (REFRESH_RATE_MS as f64 / 1000.0);
let rx_rate =
(peer_stats.rx_bytes - last_rx) as f64 / (REFRESH_RATE_MS as f64 / 1000.0);
bandwidth_data.with_mut(|data| {
let new_data = BandwidthData {
tx_bytes: tx_rate as u64,
rx_bytes: rx_rate as u64,
timestamp: elapsed_time,
};
data.push_back(new_data);
if data.len() > MAX_DATA_POINTS {
data.pop_front();
}
});
last_tx = peer_stats.tx_bytes;
last_rx = peer_stats.rx_bytes;
}
tokio::time::sleep(tokio::time::Duration::from_millis(REFRESH_RATE_MS)).await;
}
}
});
let tx_data = use_memo(move || {
bandwidth_data
.read()
.iter()
.map(|d| d.tx_bytes as f32)
.collect::<Vec<f32>>()
});
let rx_data = use_memo(move || {
bandwidth_data
.read()
.iter()
.map(|d| d.rx_bytes as f32)
.collect::<Vec<f32>>()
});
let labels = bandwidth_data
.read()
.iter()
.map(|d| format!("{:.1}", d.timestamp.as_secs_f64()))
.collect::<Vec<String>>();
let remove_peer = move |_| {
spawn(async move {
println!("Removing peer: {}", peer_endpoint);
let server_addr = use_context::<Signal<ServerAddress>>().read().0;
match api::remove_peer(server_addr, peer_endpoint).await {
Ok(_) => on_close.call(()),
Err(e) => eprintln!("Error removing peer: {e}"),
}
});
};
rsx! {
tr { class: "expanded-row",
td { colspan: "7",
div { class: "expanded-content",
div { class: "graph-container",
// Tx chart
div { class: "graph-title", "Tx Bytes/s" }
LineChart {
show_grid: false,
show_dots: false,
padding_top: 80,
padding_left: 100,
padding_right: 80,
padding_bottom: 80,
label_interpolation: (|v| human_bytes(v as f64).to_string()) as fn(f32)-> String,
series: vec![tx_data.read().to_vec()],
labels: labels.clone(),
series_labels: vec!["Tx Bytes/s".into()],
}
}
div { class: "graph-container",
// Rx chart
div { class: "graph-title", "Rx Bytes/s" }
LineChart {
show_grid: false,
show_dots: false,
padding_top: 80,
padding_left: 100,
padding_right: 80,
padding_bottom: 80,
label_interpolation: (|v| human_bytes(v as f64).to_string()) as fn(f32)-> String,
series: vec![rx_data.read().clone()],
labels: labels.clone(),
series_labels: vec!["Rx Bytes/s".into()],
}
}
div { class: "button-container",
button { class: "close-button",
onclick: move |_| on_close.call(()),
"Close"
}
button { class: "remove-button",
onclick: remove_peer,
"Remove peer"
}
}
}
}
}
}
}
#[derive(Clone, PartialEq)]
struct ExpandedRows(HashSet<String>);
fn sort_peers(
peers: &mut [mycelium::peer_manager::PeerStats],
column: &str,
direction: &SortDirection,
) {
peers.sort_by(|a, b| {
let cmp = match column {
"Protocol" => a.endpoint.proto().cmp(&b.endpoint.proto()),
"Address" => a.endpoint.address().ip().cmp(&b.endpoint.address().ip()),
"Port" => a
.endpoint
.address()
.port()
.cmp(&b.endpoint.address().port()),
"Type" => PeerTypeWrapper(a.pt.clone()).cmp(&PeerTypeWrapper(b.pt.clone())),
"Connection State" => a.connection_state.cmp(&b.connection_state),
"Tx bytes" => a.tx_bytes.cmp(&b.tx_bytes),
"Rx bytes" => a.rx_bytes.cmp(&b.rx_bytes),
_ => Ordering::Equal,
};
match direction {
SortDirection::Ascending => cmp,
SortDirection::Descending => cmp.reverse(),
}
});
}
pub struct PeerTypeWrapper(pub mycelium::peer_manager::PeerType);
impl Ord for PeerTypeWrapper {
fn cmp(&self, other: &Self) -> Ordering {
match (&self.0, &other.0) {
(PeerType::Static, PeerType::Static) => Ordering::Equal,
(PeerType::Static, _) => Ordering::Less,
(PeerType::LinkLocalDiscovery, PeerType::Static) => Ordering::Greater,
(PeerType::LinkLocalDiscovery, PeerType::LinkLocalDiscovery) => Ordering::Equal,
(PeerType::LinkLocalDiscovery, PeerType::Inbound) => Ordering::Less,
(PeerType::Inbound, PeerType::Inbound) => Ordering::Equal,
(PeerType::Inbound, _) => Ordering::Greater,
}
}
}
impl PartialOrd for PeerTypeWrapper {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for PeerTypeWrapper {
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
impl Eq for PeerTypeWrapper {}

View File

@@ -0,0 +1,193 @@
use std::cmp::Ordering;
use crate::api;
use crate::{get_sort_indicator, SearchState, ServerAddress, SortDirection};
use dioxus::prelude::*;
#[component]
pub fn Routes() -> Element {
rsx! {
SelectedRoutesTable {}
FallbackRoutesTable {}
}
}
#[component]
pub fn SelectedRoutesTable() -> Element {
let server_addr = use_context::<Signal<ServerAddress>>();
let fetched_selected_routes =
use_resource(move || api::get_selected_routes(server_addr.read().0));
match &*fetched_selected_routes.read_unchecked() {
Some(Ok(routes)) => {
rsx! { RoutesTable { routes: routes.clone(), table_name: "Selected"} }
}
Some(Err(e)) => rsx! { div { "An error has occurred while fetching selected routes: {e}" }},
None => rsx! { div { "Loading selected routes..." }},
}
}
#[component]
pub fn FallbackRoutesTable() -> Element {
let server_addr = use_context::<Signal<ServerAddress>>();
let fetched_fallback_routes =
use_resource(move || api::get_fallback_routes(server_addr.read().0));
match &*fetched_fallback_routes.read_unchecked() {
Some(Ok(routes)) => {
rsx! { RoutesTable { routes: routes.clone(), table_name: "Fallback"} }
}
Some(Err(e)) => rsx! { div { "An error has occurred while fetching fallback routes: {e}" }},
None => rsx! { div { "Loading fallback routes..." }},
}
}
#[component]
fn RoutesTable(routes: Vec<mycelium_api::Route>, table_name: String) -> Element {
let mut current_page = use_signal(|| 0);
let items_per_page = 10;
let mut sort_column = use_signal(|| "Subnet".to_string());
let mut sort_direction = use_signal(|| SortDirection::Descending);
let routes_len = routes.len();
let mut change_page = move |delta: i32| {
let cur_page = *current_page.read() as i32;
current_page.set(
(cur_page + delta)
.max(0)
.min((routes_len - 1) as i32 / items_per_page as i32) as usize,
);
};
let mut sort_routes_signal = move |column: String| {
if column == *sort_column.read() {
let new_sort_direction = match *sort_direction.read() {
SortDirection::Ascending => SortDirection::Descending,
SortDirection::Descending => SortDirection::Ascending,
};
sort_direction.set(new_sort_direction);
} else {
sort_column.set(column);
sort_direction.set(SortDirection::Ascending);
}
current_page.set(0);
};
let sorted_routes = use_memo(move || {
let mut sorted = routes.clone();
sort_routes(&mut sorted, &sort_column.read(), &sort_direction.read());
sorted
});
let mut search_state = use_signal(|| SearchState {
query: String::new(),
column: "Subnet".to_string(),
});
let filtered_routes = use_memo(move || {
let query = search_state.read().query.to_lowercase();
let column = &search_state.read().column;
sorted_routes
.read()
.iter()
.filter(|route| match column.as_str() {
"Subnet" => route.subnet.to_string().to_lowercase().contains(&query),
"Next-hop" => route.next_hop.to_string().to_lowercase().contains(&query),
"Metric" => route.metric.to_string().to_lowercase().contains(&query),
"Seqno" => route.seqno.to_string().to_lowercase().contains(&query),
_ => false,
})
.cloned()
.collect::<Vec<_>>()
});
let routes_len = filtered_routes.len();
let start = current_page * items_per_page;
let end = (start + items_per_page).min(routes_len);
let current_routes = &filtered_routes.read()[start..end];
rsx! {
div { class: "{table_name.to_lowercase()}-routes",
h2 { "{table_name} Routes" }
div { class: "search-container",
input {
placeholder: "Search...",
value: "{search_state.read().query}",
oninput: move |evt| search_state.write().query.clone_from(&evt.value()),
}
select {
value: "{search_state.read().column}",
onchange: move |evt| search_state.write().column.clone_from(&evt.value()),
option { value: "Subnet", "Subnet" }
option { value: "Next-hop", "Next-hop" }
option { value: "Metric", "Metric" }
option { value: "Seqno", "Seqno" }
}
}
div { class: "table-container",
table {
thead {
tr {
th { class: "subnet-column",
onclick: move |_| sort_routes_signal("Subnet".to_string()),
"Subnet {get_sort_indicator(sort_column, sort_direction, \"Subnet\".to_string())}"
}
th { class: "next-hop-column",
onclick: move |_| sort_routes_signal("Next-hop".to_string()),
"Next-hop {get_sort_indicator(sort_column, sort_direction, \"Next-hop\".to_string())}"
}
th { class: "metric-column",
onclick: move |_| sort_routes_signal("Metric".to_string()),
"Metric {get_sort_indicator(sort_column, sort_direction, \"Metric\".to_string())}"
}
th { class: "seqno_column",
onclick: move |_| sort_routes_signal("Seqno".to_string()),
"Seqno {get_sort_indicator(sort_column, sort_direction, \"Seqno\".to_string())}"
}
}
}
tbody {
for route in current_routes {
tr {
td { class: "subnet-column", "{route.subnet}" }
td { class: "next-hop-column", "{route.next_hop}" }
td { class: "metric-column", "{route.metric}" }
td { class: "seqno-column", "{route.seqno}" }
}
}
}
}
}
div { class: "pagination",
button {
disabled: *current_page.read() == 0,
onclick: move |_| change_page(-1),
"Previous"
}
span { "Page {current_page + 1}" }
button {
disabled: (current_page + 1) * items_per_page >= routes_len,
onclick: move |_| change_page(1),
"Next"
}
}
}
}
}
fn sort_routes(routes: &mut [mycelium_api::Route], column: &str, direction: &SortDirection) {
routes.sort_by(|a, b| {
let cmp = match column {
"Subnet" => a.subnet.cmp(&b.subnet),
"Next-hop" => a.next_hop.cmp(&b.next_hop),
"Metric" => a.metric.cmp(&b.metric),
"Seqno" => a.seqno.cmp(&b.seqno),
_ => Ordering::Equal,
};
match direction {
SortDirection::Ascending => cmp,
SortDirection::Descending => cmp.reverse(),
}
});
}

View File

@@ -0,0 +1,118 @@
#![allow(non_snake_case)]
// Disable terminal popup on Windows
#![cfg_attr(feature = "bundle", windows_subsystem = "windows")]
mod api;
mod components;
use components::home::Home;
use components::peers::Peers;
use components::routes::Routes;
use dioxus::prelude::*;
use mycelium::{endpoint::Endpoint, peer_manager::PeerStats};
use std::{
collections::HashMap,
net::{IpAddr, Ipv4Addr, SocketAddr},
};
const _: manganis::Asset = manganis::asset!("assets/styles.css");
const DEFAULT_SERVER_ADDR: SocketAddr =
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8989);
fn main() {
// Init logger
dioxus_logger::init(tracing::Level::INFO).expect("failed to init logger");
let config = dioxus::desktop::Config::new()
.with_custom_head(r#"<link rel="stylesheet" href="styles.css">"#.to_string());
LaunchBuilder::desktop().with_cfg(config).launch(App);
// dioxus::launch(App);
}
#[component]
fn App() -> Element {
use_context_provider(|| Signal::new(ServerAddress(DEFAULT_SERVER_ADDR)));
use_context_provider(|| Signal::new(ServerConnected(false)));
use_context_provider(|| {
Signal::new(PeerSignalMapping(
HashMap::<Endpoint, Signal<PeerStats>>::new(),
))
});
use_context_provider(|| Signal::new(StopFetchingPeerSignal(false)));
rsx! {
Router::<Route> {
config: || {
RouterConfig::default().on_update(|state| {
use_context::<Signal<StopFetchingPeerSignal>>().write().0 = state.current() != Route::Peers {};
(state.current() == Route::Peers {}).then_some(NavigationTarget::Internal(Route::Peers {}))
})
}
}
}
}
#[derive(Clone, Routable, Debug, PartialEq)]
#[rustfmt::skip]
pub enum Route {
#[layout(components::layout::Layout)]
#[route("/")]
Home {},
#[route("/peers")]
Peers,
#[route("/routes")]
Routes,
#[end_layout]
#[route("/:..route")]
PageNotFound { route: Vec<String> },
}
//
#[derive(Clone, PartialEq)]
struct SearchState {
query: String,
column: String,
}
// This signal is used to stop the loop that keeps fetching information about the peers when
// looking at the peers table, e.g. when the user goes back to Home or Routes page.
#[derive(Clone, PartialEq)]
struct StopFetchingPeerSignal(bool);
#[derive(Clone, PartialEq)]
struct ServerAddress(SocketAddr);
#[derive(Clone, PartialEq)]
struct ServerConnected(bool);
#[derive(Clone, PartialEq)]
struct PeerSignalMapping(HashMap<Endpoint, Signal<PeerStats>>);
pub fn get_sort_indicator(
sort_column: Signal<String>,
sort_direction: Signal<SortDirection>,
column: String,
) -> String {
if *sort_column.read() == column {
match *sort_direction.read() {
SortDirection::Ascending => "".to_string(),
SortDirection::Descending => "".to_string(),
}
} else {
"".to_string()
}
}
#[component]
fn PageNotFound(route: Vec<String>) -> Element {
rsx! {
p { "Page not found"}
}
}
#[derive(Clone)]
pub enum SortDirection {
Ascending,
Descending,
}

View File

@@ -0,0 +1,79 @@
[package]
name = "mycelium"
version = "0.6.1"
edition = "2021"
license-file = "../LICENSE"
readme = "../README.md"
[features]
message = []
private-network = ["dep:openssl", "dep:tokio-openssl"]
vendored-openssl = ["openssl/vendored"]
mactunfd = [
"tun/appstore",
] #mactunfd is a flag to specify that macos should provide tun FD instead of tun name
[dependencies]
cdn-meta = { git = "https://github.com/threefoldtech/mycelium-cdn-registry", package = "cdn-meta" }
tokio = { version = "1.46.1", features = [
"io-util",
"fs",
"macros",
"net",
"sync",
"time",
"rt-multi-thread", # FIXME: remove once tokio::task::block_in_place calls are resolved
] }
tokio-util = { version = "0.7.15", features = ["codec"] }
futures = "0.3.31"
serde = { version = "1.0.219", features = ["derive"] }
rand = "0.9.1"
bytes = "1.10.1"
x25519-dalek = { version = "2.0.1", features = ["getrandom", "static_secrets"] }
aes-gcm = "0.10.3"
tracing = { version = "0.1.41", features = ["release_max_level_debug"] }
tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
tracing-logfmt = { version = "0.3.5", features = ["ansi_logs"] }
faster-hex = "0.10.0"
tokio-stream = { version = "0.1.17", features = ["sync"] }
left-right = "0.11.5"
ipnet = "2.11.0"
ip_network_table-deps-treebitmap = "0.5.0"
blake3 = "1.8.2"
etherparse = "0.18.0"
quinn = { version = "0.11.8", default-features = false, features = [
"runtime-tokio",
"rustls",
] }
rustls = { version = "0.23.29", default-features = false, features = ["ring"] }
rcgen = "0.14.2"
netdev = "0.36.0"
openssl = { version = "0.10.73", optional = true }
tokio-openssl = { version = "0.6.5", optional = true }
arc-swap = "1.7.1"
dashmap = { version = "6.1.0", features = ["inline"] }
ahash = "0.8.11"
axum = "0.8.4"
axum-extra = "0.10.1"
reqwest = "0.12.22"
redis = { version = "0.32.4", features = ["tokio-comp"] }
reed-solomon-erasure = "6.0.0"
[target.'cfg(target_os = "linux")'.dependencies]
rtnetlink = "0.17.0"
tokio-tun = "0.13.2"
nix = { version = "0.30.1", features = ["socket"] }
[target.'cfg(target_os = "macos")'.dependencies]
tun = { git = "https://github.com/LeeSmet/rust-tun", features = ["async"] }
libc = "0.2.174"
nix = { version = "0.29.0", features = ["net", "socket", "ioctl"] }
[target.'cfg(target_os = "windows")'.dependencies]
wintun = "0.5.1"
[target.'cfg(target_os = "android")'.dependencies]
tun = { git = "https://github.com/LeeSmet/rust-tun", features = ["async"] }
[target.'cfg(target_os = "ios")'.dependencies]
tun = { git = "https://github.com/LeeSmet/rust-tun", features = ["async"] }

View File

@@ -0,0 +1,321 @@
//! This module contains babel related structs.
//!
//! We don't fully implement the babel spec, and items which are implemented might deviate to fit
//! our specific use case. For reference, the implementation is based on [this
//! RFC](https://datatracker.ietf.org/doc/html/rfc8966).
use std::io;
use bytes::{Buf, BufMut};
use tokio_util::codec::{Decoder, Encoder};
use tracing::trace;
pub use self::{
hello::Hello, ihu::Ihu, route_request::RouteRequest, seqno_request::SeqNoRequest,
update::Update,
};
pub use self::tlv::Tlv;
mod hello;
mod ihu;
mod route_request;
mod seqno_request;
mod tlv;
mod update;
/// Magic byte to identify babel protocol packet.
const BABEL_MAGIC: u8 = 42;
/// The version of the protocol we are currently using.
const BABEL_VERSION: u8 = 3;
/// Size of a babel header on the wire.
const HEADER_WIRE_SIZE: usize = 4;
/// TLV type for the [`Hello`] tlv
const TLV_TYPE_HELLO: u8 = 4;
/// TLV type for the [`Ihu`] tlv
const TLV_TYPE_IHU: u8 = 5;
/// TLV type for the [`Update`] tlv
const TLV_TYPE_UPDATE: u8 = 8;
/// TLV type for the [`RouteRequest`] tlv
const TLV_TYPE_ROUTE_REQUEST: u8 = 9;
/// TLV type for the [`SeqNoRequest`] tlv
const TLV_TYPE_SEQNO_REQUEST: u8 = 10;
/// Wildcard address, the value is empty (0 bytes length).
const AE_WILDCARD: u8 = 0;
/// IPv4 address, the value is _at most_ 4 bytes long.
const AE_IPV4: u8 = 1;
/// IPv6 address, the value is _at most_ 16 bytes long.
const AE_IPV6: u8 = 2;
/// Link-local IPv6 address, the value is 8 bytes long. This implies a `fe80::/64` prefix.
const AE_IPV6_LL: u8 = 3;
/// A codec which can send and receive whole babel packets on the wire.
#[derive(Debug, Clone)]
pub struct Codec {
header: Option<Header>,
}
impl Codec {
/// Create a new `BabelCodec`.
pub fn new() -> Self {
Self { header: None }
}
/// Resets the `BabelCodec` to its default state.
pub fn reset(&mut self) {
self.header = None;
}
}
/// The header for a babel packet. This follows the definition of the header [in the
/// RFC](https://datatracker.ietf.org/doc/html/rfc8966#name-packet-format). Since the header
/// contains only hard-coded fields and the length of an encoded body, there is no need for users
/// to manually construct this. In fact, it exists only to make our lives slightly easier in
/// reading/writing the header on the wire.
#[derive(Debug, Clone)]
struct Header {
magic: u8,
version: u8,
/// This is the length of the whole body following this header. Also excludes any possible
/// trailers.
body_length: u16,
}
impl Decoder for Codec {
type Item = Tlv;
type Error = io::Error;
fn decode(&mut self, src: &mut bytes::BytesMut) -> Result<Option<Self::Item>, Self::Error> {
// Read a header if we don't have one yet.
let header = if let Some(header) = self.header.take() {
trace!("Continue from stored header");
header
} else {
if src.remaining() < HEADER_WIRE_SIZE {
trace!("Insufficient bytes to read a babel header");
return Ok(None);
}
trace!("Read babel header");
Header {
magic: src.get_u8(),
version: src.get_u8(),
body_length: src.get_u16(),
}
};
if src.remaining() < header.body_length as usize {
trace!("Insufficient bytes to read babel body");
self.header = Some(header);
return Ok(None);
}
// Siltently ignore packets which don't have the correct values set, as defined in the
// spec. Note that we consume the amount of bytes indentified so we leave the parser in the
// correct state for the next packet.
if header.magic != BABEL_MAGIC || header.version != BABEL_VERSION {
trace!("Dropping babel packet with wrong magic or version");
src.advance(header.body_length as usize);
self.reset();
return Ok(None);
}
// at this point we have a whole body loaded in the buffer. We currently don't support sub
// TLV's
trace!("Read babel TLV body");
// TODO: Technically we need to loop here as we can have multiple TLVs.
// TLV header
let tlv_type = src.get_u8();
let body_len = src.get_u8();
// TLV payload
let tlv = match tlv_type {
TLV_TYPE_HELLO => Some(Hello::from_bytes(src).into()),
TLV_TYPE_IHU => Ihu::from_bytes(src, body_len).map(From::from),
TLV_TYPE_UPDATE => Update::from_bytes(src, body_len).map(From::from),
TLV_TYPE_ROUTE_REQUEST => RouteRequest::from_bytes(src, body_len).map(From::from),
TLV_TYPE_SEQNO_REQUEST => SeqNoRequest::from_bytes(src, body_len).map(From::from),
_ => {
// unrecoginized body type, silently drop
trace!("Dropping unrecognized tlv");
// We already read 2 bytes
src.advance(header.body_length as usize - 2);
self.reset();
return Ok(None);
}
};
Ok(tlv)
}
}
impl Encoder<Tlv> for Codec {
type Error = io::Error;
fn encode(&mut self, item: Tlv, dst: &mut bytes::BytesMut) -> Result<(), Self::Error> {
// Write header
dst.put_u8(BABEL_MAGIC);
dst.put_u8(BABEL_VERSION);
dst.put_u16(item.wire_size() as u16 + 2); // tlv payload + tlv header
// Write TLV's, TODO: currently only 1 TLV/body
// TLV header
match item {
Tlv::Hello(_) => dst.put_u8(TLV_TYPE_HELLO),
Tlv::Ihu(_) => dst.put_u8(TLV_TYPE_IHU),
Tlv::Update(_) => dst.put_u8(TLV_TYPE_UPDATE),
Tlv::RouteRequest(_) => dst.put_u8(TLV_TYPE_ROUTE_REQUEST),
Tlv::SeqNoRequest(_) => dst.put_u8(TLV_TYPE_SEQNO_REQUEST),
}
dst.put_u8(item.wire_size());
item.write_bytes(dst);
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::{net::Ipv6Addr, time::Duration};
use futures::{SinkExt, StreamExt};
use tokio_util::codec::Framed;
use crate::subnet::Subnet;
#[tokio::test]
async fn codec_hello() {
let (tx, rx) = tokio::io::duplex(1024);
let mut sender = Framed::new(tx, super::Codec::new());
let mut receiver = Framed::new(rx, super::Codec::new());
let hello = super::Hello::new_unicast(15.into(), 400);
sender
.send(hello.clone().into())
.await
.expect("Send on a non-networked buffer can never fail; qed");
let recv_hello = receiver
.next()
.await
.expect("Buffer isn't closed so this is always `Some`; qed")
.expect("Can decode the previously encoded value");
assert_eq!(super::Tlv::from(hello), recv_hello);
}
#[tokio::test]
async fn codec_ihu() {
let (tx, rx) = tokio::io::duplex(1024);
let mut sender = Framed::new(tx, super::Codec::new());
let mut receiver = Framed::new(rx, super::Codec::new());
let ihu = super::Ihu::new(27.into(), 400, None);
sender
.send(ihu.clone().into())
.await
.expect("Send on a non-networked buffer can never fail; qed");
let recv_ihu = receiver
.next()
.await
.expect("Buffer isn't closed so this is always `Some`; qed")
.expect("Can decode the previously encoded value");
assert_eq!(super::Tlv::from(ihu), recv_ihu);
}
#[tokio::test]
async fn codec_update() {
let (tx, rx) = tokio::io::duplex(1024);
let mut sender = Framed::new(tx, super::Codec::new());
let mut receiver = Framed::new(rx, super::Codec::new());
let update = super::Update::new(
Duration::from_secs(400),
16.into(),
25.into(),
Subnet::new(Ipv6Addr::new(0x400, 1, 2, 3, 0, 0, 0, 0).into(), 64)
.expect("64 is a valid IPv6 prefix size; qed"),
[
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
]
.into(),
);
sender
.send(update.clone().into())
.await
.expect("Send on a non-networked buffer can never fail; qed");
println!("Sent update packet");
let recv_update = receiver
.next()
.await
.expect("Buffer isn't closed so this is always `Some`; qed")
.expect("Can decode the previously encoded value");
println!("Received update packet");
assert_eq!(super::Tlv::from(update), recv_update);
}
#[tokio::test]
async fn codec_seqno_request() {
let (tx, rx) = tokio::io::duplex(1024);
let mut sender = Framed::new(tx, super::Codec::new());
let mut receiver = Framed::new(rx, super::Codec::new());
let snr = super::SeqNoRequest::new(
16.into(),
[
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
]
.into(),
Subnet::new(Ipv6Addr::new(0x400, 1, 2, 3, 0, 0, 0, 0).into(), 64)
.expect("64 is a valid IPv6 prefix size; qed"),
);
sender
.send(snr.clone().into())
.await
.expect("Send on a non-networked buffer can never fail; qed");
let recv_update = receiver
.next()
.await
.expect("Buffer isn't closed so this is always `Some`; qed")
.expect("Can decode the previously encoded value");
assert_eq!(super::Tlv::from(snr), recv_update);
}
#[tokio::test]
async fn codec_route_request() {
let (tx, rx) = tokio::io::duplex(1024);
let mut sender = Framed::new(tx, super::Codec::new());
let mut receiver = Framed::new(rx, super::Codec::new());
let rr = super::RouteRequest::new(
Some(
Subnet::new(Ipv6Addr::new(0x400, 1, 2, 3, 0, 0, 0, 0).into(), 64)
.expect("64 is a valid IPv6 prefix size; qed"),
),
13,
);
sender
.send(rr.clone().into())
.await
.expect("Send on a non-networked buffer can never fail; qed");
let recv_update = receiver
.next()
.await
.expect("Buffer isn't closed so this is always `Some`; qed")
.expect("Can decode the previously encoded value");
assert_eq!(super::Tlv::from(rr), recv_update);
}
}

View File

@@ -0,0 +1,162 @@
//! The babel [Hello TLV](https://datatracker.ietf.org/doc/html/rfc8966#section-4.6.5).
use bytes::{Buf, BufMut};
use tracing::trace;
use crate::sequence_number::SeqNo;
/// Flag bit indicating a [`Hello`] is sent as unicast hello.
const HELLO_FLAG_UNICAST: u16 = 0x8000;
/// Mask to apply to [`Hello`] flags, leaving only valid flags.
const FLAG_MASK: u16 = 0b10000000_00000000;
/// Wire size of a [`Hello`] TLV without TLV header.
const HELLO_WIRE_SIZE: u8 = 6;
/// Hello TLV body as defined in https://datatracker.ietf.org/doc/html/rfc8966#section-4.6.5.
#[derive(Debug, Clone, PartialEq)]
pub struct Hello {
flags: u16,
seqno: SeqNo,
interval: u16,
}
impl Hello {
/// Create a new unicast hello packet.
pub fn new_unicast(seqno: SeqNo, interval: u16) -> Self {
Self {
flags: HELLO_FLAG_UNICAST,
seqno,
interval,
}
}
/// Calculates the size on the wire of this `Hello`.
pub fn wire_size(&self) -> u8 {
HELLO_WIRE_SIZE
}
/// Construct a `Hello` from wire bytes.
///
/// # Panics
///
/// This function will panic if there are insufficient bytes present in the provided buffer to
/// decode a complete `Hello`.
pub fn from_bytes(src: &mut bytes::BytesMut) -> Self {
let flags = src.get_u16() & FLAG_MASK;
let seqno = src.get_u16().into();
let interval = src.get_u16();
trace!("Read hello tlv body");
Self {
flags,
seqno,
interval,
}
}
/// Encode this `Hello` tlv as part of a packet.
pub fn write_bytes(&self, dst: &mut bytes::BytesMut) {
dst.put_u16(self.flags);
dst.put_u16(self.seqno.into());
dst.put_u16(self.interval);
}
}
#[cfg(test)]
mod tests {
use bytes::Buf;
#[test]
fn encoding() {
let mut buf = bytes::BytesMut::new();
let hello = super::Hello {
flags: 0,
seqno: 25.into(),
interval: 400,
};
hello.write_bytes(&mut buf);
assert_eq!(buf.len(), 6);
assert_eq!(buf[..6], [0, 0, 0, 25, 1, 144]);
let mut buf = bytes::BytesMut::new();
let hello = super::Hello {
flags: super::HELLO_FLAG_UNICAST,
seqno: 16.into(),
interval: 4000,
};
hello.write_bytes(&mut buf);
assert_eq!(buf.len(), 6);
assert_eq!(buf[..6], [128, 0, 0, 16, 15, 160]);
}
#[test]
fn decoding() {
let mut buf = bytes::BytesMut::from(&[0b10000000u8, 0b00000000, 0, 19, 2, 1][..]);
let hello = super::Hello {
flags: super::HELLO_FLAG_UNICAST,
seqno: 19.into(),
interval: 513,
};
assert_eq!(super::Hello::from_bytes(&mut buf), hello);
assert_eq!(buf.remaining(), 0);
let mut buf = bytes::BytesMut::from(&[0b00000000u8, 0b00000000, 1, 19, 200, 100][..]);
let hello = super::Hello {
flags: 0,
seqno: 275.into(),
interval: 51300,
};
assert_eq!(super::Hello::from_bytes(&mut buf), hello);
assert_eq!(buf.remaining(), 0);
}
#[test]
fn decode_ignores_invalid_flag_bits() {
let mut buf = bytes::BytesMut::from(&[0b10001001u8, 0b00000000, 0, 100, 1, 144][..]);
let hello = super::Hello {
flags: super::HELLO_FLAG_UNICAST,
seqno: 100.into(),
interval: 400,
};
assert_eq!(super::Hello::from_bytes(&mut buf), hello);
assert_eq!(buf.remaining(), 0);
let mut buf = bytes::BytesMut::from(&[0b00001001u8, 0b00000000, 0, 100, 1, 144][..]);
let hello = super::Hello {
flags: 0,
seqno: 100.into(),
interval: 400,
};
assert_eq!(super::Hello::from_bytes(&mut buf), hello);
assert_eq!(buf.remaining(), 0);
}
#[test]
fn roundtrip() {
let mut buf = bytes::BytesMut::new();
let hello_src = super::Hello::new_unicast(16.into(), 400);
hello_src.write_bytes(&mut buf);
let decoded = super::Hello::from_bytes(&mut buf);
assert_eq!(hello_src, decoded);
assert_eq!(buf.remaining(), 0);
}
}

View File

@@ -0,0 +1,246 @@
//! The babel [IHU TLV](https://datatracker.ietf.org/doc/html/rfc8966#name-ihu).
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
use bytes::{Buf, BufMut};
use tracing::trace;
use crate::metric::Metric;
use super::{AE_IPV4, AE_IPV6, AE_IPV6_LL, AE_WILDCARD};
/// Base wire size of an [`Ihu`] without variable length address encoding.
const IHU_BASE_WIRE_SIZE: u8 = 6;
/// IHU TLV body as defined in https://datatracker.ietf.org/doc/html/rfc8966#name-ihu.
#[derive(Debug, Clone, PartialEq)]
pub struct Ihu {
rx_cost: Metric,
interval: u16,
address: Option<IpAddr>,
}
impl Ihu {
/// Create a new `Ihu` to be transmitted.
pub fn new(rx_cost: Metric, interval: u16, address: Option<IpAddr>) -> Self {
// An interval of 0 is illegal according to the RFC, as this value is used by the receiver
// to calculate the hold time.
if interval == 0 {
panic!("Ihu interval MUST NOT be 0");
}
Self {
rx_cost,
interval,
address,
}
}
/// Calculates the size on the wire of this `Ihu`.
pub fn wire_size(&self) -> u8 {
IHU_BASE_WIRE_SIZE
+ match self.address {
None => 0,
Some(IpAddr::V4(_)) => 4,
// TODO: link local should be encoded differently
Some(IpAddr::V6(_)) => 16,
}
}
/// Construct a `Ihu` from wire bytes.
///
/// # Panics
///
/// This function will panic if there are insufficient bytes present in the provided buffer to
/// decode a complete `Ihu`.
pub fn from_bytes(src: &mut bytes::BytesMut, len: u8) -> Option<Self> {
let ae = src.get_u8();
// read and ignore reserved byte
let _ = src.get_u8();
let rx_cost = src.get_u16().into();
let interval = src.get_u16();
let address = match ae {
AE_WILDCARD => None,
AE_IPV4 => {
let mut raw_ip = [0; 4];
raw_ip.copy_from_slice(&src[..4]);
src.advance(4);
Some(Ipv4Addr::from(raw_ip).into())
}
AE_IPV6 => {
let mut raw_ip = [0; 16];
raw_ip.copy_from_slice(&src[..16]);
src.advance(16);
Some(Ipv6Addr::from(raw_ip).into())
}
AE_IPV6_LL => {
let mut raw_ip = [0; 16];
raw_ip[0] = 0xfe;
raw_ip[1] = 0x80;
raw_ip[8..].copy_from_slice(&src[..8]);
src.advance(8);
Some(Ipv6Addr::from(raw_ip).into())
}
_ => {
// Invalid AE type, skip reamining data and ignore
trace!("Invalid AE type in IHU TLV, drop TLV");
src.advance(len as usize - 6);
return None;
}
};
trace!("Read ihu tlv body");
Some(Self {
rx_cost,
interval,
address,
})
}
/// Encode this `Ihu` tlv as part of a packet.
pub fn write_bytes(&self, dst: &mut bytes::BytesMut) {
dst.put_u8(match self.address {
None => AE_WILDCARD,
Some(IpAddr::V4(_)) => AE_IPV4,
Some(IpAddr::V6(_)) => AE_IPV6,
});
// reserved byte, must be all 0
dst.put_u8(0);
dst.put_u16(self.rx_cost.into());
dst.put_u16(self.interval);
match self.address {
None => {}
Some(IpAddr::V4(ip)) => dst.put_slice(&ip.octets()),
Some(IpAddr::V6(ip)) => dst.put_slice(&ip.octets()),
}
}
}
#[cfg(test)]
mod tests {
use std::net::{Ipv4Addr, Ipv6Addr};
use bytes::Buf;
#[test]
fn encoding() {
let mut buf = bytes::BytesMut::new();
let ihu = super::Ihu {
rx_cost: 25.into(),
interval: 400,
address: Some(Ipv4Addr::new(1, 1, 1, 1).into()),
};
ihu.write_bytes(&mut buf);
assert_eq!(buf.len(), 10);
assert_eq!(buf[..10], [1, 0, 0, 25, 1, 144, 1, 1, 1, 1]);
let mut buf = bytes::BytesMut::new();
let ihu = super::Ihu {
rx_cost: 100.into(),
interval: 4000,
address: Some(Ipv6Addr::new(2, 0, 1234, 2345, 3456, 4567, 5678, 1).into()),
};
ihu.write_bytes(&mut buf);
assert_eq!(buf.len(), 22);
assert_eq!(
buf[..22],
[2, 0, 0, 100, 15, 160, 0, 2, 0, 0, 4, 210, 9, 41, 13, 128, 17, 215, 22, 46, 0, 1]
);
}
#[test]
fn decoding() {
let mut buf = bytes::BytesMut::from(&[0, 0, 0, 1, 1, 44][..]);
let ihu = super::Ihu {
rx_cost: 1.into(),
interval: 300,
address: None,
};
let buf_len = buf.len();
assert_eq!(super::Ihu::from_bytes(&mut buf, buf_len as u8), Some(ihu));
assert_eq!(buf.remaining(), 0);
let mut buf = bytes::BytesMut::from(&[1, 0, 0, 2, 0, 44, 3, 4, 5, 6][..]);
let ihu = super::Ihu {
rx_cost: 2.into(),
interval: 44,
address: Some(Ipv4Addr::new(3, 4, 5, 6).into()),
};
let buf_len = buf.len();
assert_eq!(super::Ihu::from_bytes(&mut buf, buf_len as u8), Some(ihu));
assert_eq!(buf.remaining(), 0);
let mut buf = bytes::BytesMut::from(
&[
2, 0, 0, 2, 0, 44, 4, 0, 0, 0, 0, 5, 0, 6, 7, 8, 9, 10, 11, 12, 13, 14,
][..],
);
let ihu = super::Ihu {
rx_cost: 2.into(),
interval: 44,
address: Some(Ipv6Addr::new(0x400, 0, 5, 6, 0x708, 0x90a, 0xb0c, 0xd0e).into()),
};
let buf_len = buf.len();
assert_eq!(super::Ihu::from_bytes(&mut buf, buf_len as u8), Some(ihu));
assert_eq!(buf.remaining(), 0);
let mut buf = bytes::BytesMut::from(&[3, 0, 1, 2, 0, 42, 7, 8, 9, 10, 11, 12, 13, 14][..]);
let ihu = super::Ihu {
rx_cost: 258.into(),
interval: 42,
address: Some(Ipv6Addr::new(0xfe80, 0, 0, 0, 0x708, 0x90a, 0xb0c, 0xd0e).into()),
};
let buf_len = buf.len();
assert_eq!(super::Ihu::from_bytes(&mut buf, buf_len as u8), Some(ihu));
assert_eq!(buf.remaining(), 0);
}
#[test]
fn decode_ignores_invalid_ae_encoding() {
// AE 4 as it is the first one which should be used in protocol extension, causing this
// test to fail if we forget to update something
let mut buf = bytes::BytesMut::from(
&[
4, 0, 0, 2, 0, 44, 2, 0, 0, 0, 0, 5, 0, 6, 7, 8, 9, 10, 11, 12, 13, 14,
][..],
);
let buf_len = buf.len();
assert_eq!(super::Ihu::from_bytes(&mut buf, buf_len as u8), None);
// Decode function should still consume the required amount of bytes to leave parser in a
// good state (assuming the length in the tlv preamble is good).
assert_eq!(buf.remaining(), 0);
}
#[test]
fn roundtrip() {
let mut buf = bytes::BytesMut::new();
let hello_src = super::Ihu::new(
16.into(),
400,
Some(Ipv6Addr::new(156, 5646, 4164, 1236, 872, 960, 10, 844).into()),
);
hello_src.write_bytes(&mut buf);
let buf_len = buf.len();
let decoded = super::Ihu::from_bytes(&mut buf, buf_len as u8);
assert_eq!(Some(hello_src), decoded);
assert_eq!(buf.remaining(), 0);
}
}

View File

@@ -0,0 +1,301 @@
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
use bytes::{Buf, BufMut};
use tracing::trace;
use crate::subnet::Subnet;
use super::{AE_IPV4, AE_IPV6, AE_IPV6_LL, AE_WILDCARD};
/// Base wire size of a [`RouteRequest`] without variable length address encoding.
const ROUTE_REQUEST_BASE_WIRE_SIZE: u8 = 3;
/// Seqno request TLV body as defined in https://datatracker.ietf.org/doc/html/rfc8966#name-route-request
#[derive(Debug, Clone, PartialEq)]
pub struct RouteRequest {
/// The prefix being requested
prefix: Option<Subnet>,
/// The requests' generation
generation: u8,
}
impl RouteRequest {
/// Creates a new `RouteRequest` for the given [`prefix`]. If no [`prefix`] is given, a full
/// route table dumb in requested.
///
/// [`prefix`]: Subnet
pub fn new(prefix: Option<Subnet>, generation: u8) -> Self {
Self { prefix, generation }
}
/// Return the [`prefix`](Subnet) associated with this `RouteRequest`.
pub fn prefix(&self) -> Option<Subnet> {
self.prefix
}
/// Return the generation of the `RouteRequest`, which is the amount of times it has been
/// forwarded already.
pub fn generation(&self) -> u8 {
self.generation
}
/// Increment the generation of the `RouteRequest`.
pub fn inc_generation(&mut self) {
self.generation += 1
}
/// Calculates the size on the wire of this `RouteRequest`.
pub fn wire_size(&self) -> u8 {
ROUTE_REQUEST_BASE_WIRE_SIZE
+ (if let Some(prefix) = self.prefix {
prefix.prefix_len().div_ceil(8)
} else {
0
})
}
/// Construct a `RouteRequest` from wire bytes.
///
/// # Panics
///
/// This function will panic if there are insufficient bytes present in the provided buffer to
/// decode a complete `RouteRequest`.
pub fn from_bytes(src: &mut bytes::BytesMut, len: u8) -> Option<Self> {
let generation = src.get_u8();
let ae = src.get_u8();
let plen = src.get_u8();
let prefix_size = plen.div_ceil(8) as usize;
let prefix_ip = match ae {
AE_WILDCARD => None,
AE_IPV4 => {
if plen > 32 {
return None;
}
let mut raw_ip = [0; 4];
raw_ip[..prefix_size].copy_from_slice(&src[..prefix_size]);
src.advance(prefix_size);
Some(Ipv4Addr::from(raw_ip).into())
}
AE_IPV6 => {
if plen > 128 {
return None;
}
let mut raw_ip = [0; 16];
raw_ip[..prefix_size].copy_from_slice(&src[..prefix_size]);
src.advance(prefix_size);
Some(Ipv6Addr::from(raw_ip).into())
}
AE_IPV6_LL => {
if plen != 64 {
return None;
}
let mut raw_ip = [0; 16];
raw_ip[0] = 0xfe;
raw_ip[1] = 0x80;
raw_ip[8..].copy_from_slice(&src[..8]);
src.advance(8);
Some(Ipv6Addr::from(raw_ip).into())
}
_ => {
// Invalid AE type, skip reamining data and ignore
trace!("Invalid AE type in route_request packet, drop packet");
src.advance(len as usize - 3);
return None;
}
};
let prefix = prefix_ip.and_then(|prefix| Subnet::new(prefix, plen).ok());
trace!("Read route_request tlv body");
Some(RouteRequest { prefix, generation })
}
/// Encode this `RouteRequest` tlv as part of a packet.
pub fn write_bytes(&self, dst: &mut bytes::BytesMut) {
dst.put_u8(self.generation);
if let Some(prefix) = self.prefix {
dst.put_u8(match prefix.address() {
IpAddr::V4(_) => AE_IPV4,
IpAddr::V6(_) => AE_IPV6,
});
dst.put_u8(prefix.prefix_len());
let prefix_len = prefix.prefix_len().div_ceil(8) as usize;
match prefix.address() {
IpAddr::V4(ip) => dst.put_slice(&ip.octets()[..prefix_len]),
IpAddr::V6(ip) => dst.put_slice(&ip.octets()[..prefix_len]),
}
} else {
dst.put_u8(AE_WILDCARD);
// Prefix len MUST be 0 for wildcard requests
dst.put_u8(0);
}
}
}
#[cfg(test)]
mod tests {
use std::net::{Ipv4Addr, Ipv6Addr};
use bytes::Buf;
use crate::subnet::Subnet;
#[test]
fn encoding() {
let mut buf = bytes::BytesMut::new();
let rr = super::RouteRequest {
prefix: Some(
Subnet::new(Ipv6Addr::new(512, 25, 26, 27, 28, 0, 0, 29).into(), 64)
.expect("64 is a valid IPv6 prefix size; qed"),
),
generation: 2,
};
rr.write_bytes(&mut buf);
assert_eq!(buf.len(), 11);
assert_eq!(buf[..11], [2, 2, 64, 2, 0, 0, 25, 0, 26, 0, 27]);
let mut buf = bytes::BytesMut::new();
let rr = super::RouteRequest {
prefix: Some(
Subnet::new(Ipv4Addr::new(10, 101, 4, 1).into(), 32)
.expect("32 is a valid IPv4 prefix size; qed"),
),
generation: 3,
};
rr.write_bytes(&mut buf);
assert_eq!(buf.len(), 7);
assert_eq!(buf[..7], [3, 1, 32, 10, 101, 4, 1]);
let mut buf = bytes::BytesMut::new();
let rr = super::RouteRequest {
prefix: None,
generation: 0,
};
rr.write_bytes(&mut buf);
assert_eq!(buf.len(), 3);
assert_eq!(buf[..3], [0, 0, 0]);
}
#[test]
fn decoding() {
let mut buf = bytes::BytesMut::from(&[12, 0, 0][..]);
let rr = super::RouteRequest {
prefix: None,
generation: 12,
};
let buf_len = buf.len();
assert_eq!(
super::RouteRequest::from_bytes(&mut buf, buf_len as u8),
Some(rr)
);
assert_eq!(buf.remaining(), 0);
let mut buf = bytes::BytesMut::from(&[24, 1, 24, 10, 15, 19][..]);
let rr = super::RouteRequest {
prefix: Some(
Subnet::new(Ipv4Addr::new(10, 15, 19, 0).into(), 24)
.expect("24 is a valid IPv4 prefix size; qed"),
),
generation: 24,
};
let buf_len = buf.len();
assert_eq!(
super::RouteRequest::from_bytes(&mut buf, buf_len as u8),
Some(rr)
);
assert_eq!(buf.remaining(), 0);
let mut buf = bytes::BytesMut::from(&[7, 2, 64, 0, 10, 0, 20, 0, 30, 0, 40][..]);
let rr = super::RouteRequest {
prefix: Some(
Subnet::new(Ipv6Addr::new(10, 20, 30, 40, 0, 0, 0, 0).into(), 64)
.expect("64 is a valid IPv6 prefix size; qed"),
),
generation: 7,
};
let buf_len = buf.len();
assert_eq!(
super::RouteRequest::from_bytes(&mut buf, buf_len as u8),
Some(rr)
);
assert_eq!(buf.remaining(), 0);
let mut buf = bytes::BytesMut::from(&[4, 3, 64, 0, 10, 0, 20, 0, 30, 0, 40][..]);
let rr = super::RouteRequest {
prefix: Some(
Subnet::new(Ipv6Addr::new(0xfe80, 0, 0, 0, 10, 20, 30, 40).into(), 64)
.expect("64 is a valid IPv6 prefix size; qed"),
),
generation: 4,
};
let buf_len = buf.len();
assert_eq!(
super::RouteRequest::from_bytes(&mut buf, buf_len as u8),
Some(rr)
);
assert_eq!(buf.remaining(), 0);
}
#[test]
fn decode_ignores_invalid_ae_encoding() {
// AE 4 as it is the first one which should be used in protocol extension, causing this
// test to fail if we forget to update something
let mut buf = bytes::BytesMut::from(
&[
0, 4, 64, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
][..],
);
let buf_len = buf.len();
assert_eq!(
super::RouteRequest::from_bytes(&mut buf, buf_len as u8),
None
);
// Decode function should still consume the required amount of bytes to leave parser in a
// good state (assuming the length in the tlv preamble is good).
assert_eq!(buf.remaining(), 0);
}
#[test]
fn roundtrip() {
let mut buf = bytes::BytesMut::new();
let seqno_src = super::RouteRequest::new(
Some(
Subnet::new(
Ipv6Addr::new(0x21f, 0x4025, 0xabcd, 0xdead, 0, 0, 0, 0).into(),
64,
)
.expect("64 is a valid IPv6 prefix size; qed"),
),
27,
);
seqno_src.write_bytes(&mut buf);
let buf_len = buf.len();
let decoded = super::RouteRequest::from_bytes(&mut buf, buf_len as u8);
assert_eq!(Some(seqno_src), decoded);
assert_eq!(buf.remaining(), 0);
}
}

View File

@@ -0,0 +1,356 @@
use std::{
net::{IpAddr, Ipv4Addr, Ipv6Addr},
num::NonZeroU8,
};
use bytes::{Buf, BufMut};
use tracing::{debug, trace};
use crate::{router_id::RouterId, sequence_number::SeqNo, subnet::Subnet};
use super::{AE_IPV4, AE_IPV6, AE_IPV6_LL, AE_WILDCARD};
/// The default HOP COUNT value used in new SeqNo requests, as per https://datatracker.ietf.org/doc/html/rfc8966#section-3.8.2.1
// SAFETY: value is not zero.
const DEFAULT_HOP_COUNT: NonZeroU8 = NonZeroU8::new(64).unwrap();
/// Base wire size of a [`SeqNoRequest`] without variable length address encoding.
const SEQNO_REQUEST_BASE_WIRE_SIZE: u8 = 6 + RouterId::BYTE_SIZE as u8;
/// Seqno request TLV body as defined in https://datatracker.ietf.org/doc/html/rfc8966#name-seqno-request
#[derive(Debug, Clone, PartialEq)]
pub struct SeqNoRequest {
/// The sequence number that is being requested.
seqno: SeqNo,
/// The maximum number of times this TLV may be forwarded, plus 1.
hop_count: NonZeroU8,
/// The router id that is being requested.
router_id: RouterId,
/// The prefix being requested
prefix: Subnet,
}
impl SeqNoRequest {
/// Create a new `SeqNoRequest` for the given [prefix](Subnet) advertised by the [`RouterId`],
/// with the required new [`SeqNo`].
pub fn new(seqno: SeqNo, router_id: RouterId, prefix: Subnet) -> SeqNoRequest {
Self {
seqno,
hop_count: DEFAULT_HOP_COUNT,
router_id,
prefix,
}
}
/// Return the [`prefix`](Subnet) associated with this `SeqNoRequest`.
pub fn prefix(&self) -> Subnet {
self.prefix
}
/// Return the [`RouterId`] associated with this `SeqNoRequest`.
pub fn router_id(&self) -> RouterId {
self.router_id
}
/// Return the requested [`SeqNo`] associated with this `SeqNoRequest`.
pub fn seqno(&self) -> SeqNo {
self.seqno
}
/// Get the hop count for this `SeqNoRequest`.
pub fn hop_count(&self) -> u8 {
self.hop_count.into()
}
/// Decrement the hop count for this `SeqNoRequest`.
///
/// # Panics
///
/// This function will panic if the hop count before calling this function is 1, as that will
/// result in a hop count of 0, which is illegal for a `SeqNoRequest`. It is up to the caller
/// to ensure this condition holds.
pub fn decrement_hop_count(&mut self) {
// SAFETY: The panic from this expect is documented in the function signature.
self.hop_count = NonZeroU8::new(self.hop_count.get() - 1)
.expect("Decrementing a hop count of 1 is not allowed");
}
/// Calculates the size on the wire of this `Update`.
pub fn wire_size(&self) -> u8 {
SEQNO_REQUEST_BASE_WIRE_SIZE + self.prefix.prefix_len().div_ceil(8)
// TODO: Wildcard should be encoded differently
}
/// Construct a `SeqNoRequest` from wire bytes.
///
/// # Panics
///
/// This function will panic if there are insufficient bytes present in the provided buffer to
/// decode a complete `SeqNoRequest`.
pub fn from_bytes(src: &mut bytes::BytesMut, len: u8) -> Option<Self> {
let ae = src.get_u8();
let plen = src.get_u8();
let seqno = src.get_u16().into();
let hop_count = src.get_u8();
// Read "reserved" value, we assume this is 0
let _ = src.get_u8();
let mut router_id_bytes = [0u8; RouterId::BYTE_SIZE];
router_id_bytes.copy_from_slice(&src[..RouterId::BYTE_SIZE]);
src.advance(RouterId::BYTE_SIZE);
let router_id = RouterId::from(router_id_bytes);
let prefix_size = plen.div_ceil(8) as usize;
let prefix = match ae {
AE_WILDCARD => {
if plen != 0 {
return None;
}
// TODO: this is a temporary placeholder until we figure out how to handle this
Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0).into()
}
AE_IPV4 => {
if plen > 32 {
return None;
}
let mut raw_ip = [0; 4];
raw_ip[..prefix_size].copy_from_slice(&src[..prefix_size]);
src.advance(prefix_size);
Ipv4Addr::from(raw_ip).into()
}
AE_IPV6 => {
if plen > 128 {
return None;
}
let mut raw_ip = [0; 16];
raw_ip[..prefix_size].copy_from_slice(&src[..prefix_size]);
src.advance(prefix_size);
Ipv6Addr::from(raw_ip).into()
}
AE_IPV6_LL => {
if plen != 64 {
return None;
}
let mut raw_ip = [0; 16];
raw_ip[0] = 0xfe;
raw_ip[1] = 0x80;
raw_ip[8..].copy_from_slice(&src[..8]);
src.advance(8);
Ipv6Addr::from(raw_ip).into()
}
_ => {
// Invalid AE type, skip reamining data and ignore
trace!("Invalid AE type in seqno_request packet, drop packet");
src.advance(len as usize - 46);
return None;
}
};
let prefix = Subnet::new(prefix, plen).ok()?;
trace!("Read seqno_request tlv body");
// Make sure hop_count is valid
let hop_count = if let Some(hc) = NonZeroU8::new(hop_count) {
hc
} else {
debug!("Dropping seqno_request as hop_count field is set to 0");
return None;
};
Some(SeqNoRequest {
seqno,
hop_count,
router_id,
prefix,
})
}
/// Encode this `SeqNoRequest` tlv as part of a packet.
pub fn write_bytes(&self, dst: &mut bytes::BytesMut) {
dst.put_u8(match self.prefix.address() {
IpAddr::V4(_) => AE_IPV4,
IpAddr::V6(_) => AE_IPV6,
});
dst.put_u8(self.prefix.prefix_len());
dst.put_u16(self.seqno.into());
dst.put_u8(self.hop_count.into());
// Write "reserved" value.
dst.put_u8(0);
dst.put_slice(&self.router_id.as_bytes()[..]);
let prefix_len = self.prefix.prefix_len().div_ceil(8) as usize;
match self.prefix.address() {
IpAddr::V4(ip) => dst.put_slice(&ip.octets()[..prefix_len]),
IpAddr::V6(ip) => dst.put_slice(&ip.octets()[..prefix_len]),
}
}
}
#[cfg(test)]
mod tests {
use std::{
net::{Ipv4Addr, Ipv6Addr},
num::NonZeroU8,
};
use crate::{router_id::RouterId, subnet::Subnet};
use bytes::Buf;
#[test]
fn encoding() {
let mut buf = bytes::BytesMut::new();
let snr = super::SeqNoRequest {
seqno: 17.into(),
hop_count: NonZeroU8::new(64).unwrap(),
prefix: Subnet::new(Ipv6Addr::new(512, 25, 26, 27, 28, 0, 0, 29).into(), 64)
.expect("64 is a valid IPv6 prefix size; qed"),
router_id: RouterId::from([1u8; RouterId::BYTE_SIZE]),
};
snr.write_bytes(&mut buf);
assert_eq!(buf.len(), 54);
assert_eq!(
buf[..54],
[
2, 64, 0, 17, 64, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 0, 25, 0, 26, 0, 27,
]
);
let mut buf = bytes::BytesMut::new();
let snr = super::SeqNoRequest {
seqno: 170.into(),
hop_count: NonZeroU8::new(111).unwrap(),
prefix: Subnet::new(Ipv4Addr::new(10, 101, 4, 1).into(), 32)
.expect("32 is a valid IPv4 prefix size; qed"),
router_id: RouterId::from([2u8; RouterId::BYTE_SIZE]),
};
snr.write_bytes(&mut buf);
assert_eq!(buf.len(), 50);
assert_eq!(
buf[..50],
[
1, 32, 0, 170, 111, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 10, 101, 4, 1,
]
);
}
#[test]
fn decoding() {
let mut buf = bytes::BytesMut::from(
&[
0, 0, 0, 0, 1, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
][..],
);
let snr = super::SeqNoRequest {
hop_count: NonZeroU8::new(1).unwrap(),
seqno: 0.into(),
prefix: Subnet::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0).into(), 0)
.expect("0 is a valid IPv6 prefix size; qed"),
router_id: RouterId::from([3u8; RouterId::BYTE_SIZE]),
};
let buf_len = buf.len();
assert_eq!(
super::SeqNoRequest::from_bytes(&mut buf, buf_len as u8),
Some(snr)
);
assert_eq!(buf.remaining(), 0);
let mut buf = bytes::BytesMut::from(
&[
3, 64, 0, 42, 232, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 10, 0, 20, 0, 30, 0,
40,
][..],
);
let snr = super::SeqNoRequest {
seqno: 42.into(),
hop_count: NonZeroU8::new(232).unwrap(),
prefix: Subnet::new(Ipv6Addr::new(0xfe80, 0, 0, 0, 10, 20, 30, 40).into(), 64)
.expect("92 is a valid IPv6 prefix size; qed"),
router_id: RouterId::from([4u8; RouterId::BYTE_SIZE]),
};
let buf_len = buf.len();
assert_eq!(
super::SeqNoRequest::from_bytes(&mut buf, buf_len as u8),
Some(snr)
);
assert_eq!(buf.remaining(), 0);
}
#[test]
fn decode_ignores_invalid_ae_encoding() {
// AE 4 as it is the first one which should be used in protocol extension, causing this
// test to fail if we forget to update something
let mut buf = bytes::BytesMut::from(
&[
4, 64, 0, 0, 44, 0, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21,
][..],
);
let buf_len = buf.len();
assert_eq!(
super::SeqNoRequest::from_bytes(&mut buf, buf_len as u8),
None
);
// Decode function should still consume the required amount of bytes to leave parser in a
// good state (assuming the length in the tlv preamble is good).
assert_eq!(buf.remaining(), 0);
}
#[test]
fn decode_ignores_invalid_hop_count() {
// Set all flag bits, only allowed bits should be set on the decoded value
let mut buf = bytes::BytesMut::from(
&[
3, 64, 92, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 10, 0, 20, 0, 30, 0,
40,
][..],
);
let buf_len = buf.len();
assert_eq!(
super::SeqNoRequest::from_bytes(&mut buf, buf_len as u8),
None
);
assert_eq!(buf.remaining(), 0);
}
#[test]
fn roundtrip() {
let mut buf = bytes::BytesMut::new();
let seqno_src = super::SeqNoRequest::new(
64.into(),
RouterId::from([6; RouterId::BYTE_SIZE]),
Subnet::new(
Ipv6Addr::new(0x21f, 0x4025, 0xabcd, 0xdead, 0, 0, 0, 0).into(),
64,
)
.expect("64 is a valid IPv6 prefix size; qed"),
);
seqno_src.write_bytes(&mut buf);
let buf_len = buf.len();
let decoded = super::SeqNoRequest::from_bytes(&mut buf, buf_len as u8);
assert_eq!(Some(seqno_src), decoded);
assert_eq!(buf.remaining(), 0);
}
}

View File

@@ -0,0 +1,72 @@
pub use super::{hello::Hello, ihu::Ihu, update::Update};
use super::{route_request::RouteRequest, SeqNoRequest};
/// A single `Tlv` in a babel packet body.
#[derive(Debug, Clone, PartialEq)]
pub enum Tlv {
/// Hello Tlv type.
Hello(Hello),
/// Ihu Tlv type.
Ihu(Ihu),
/// Update Tlv type.
Update(Update),
/// RouteRequest Tlv type.
RouteRequest(RouteRequest),
/// SeqNoRequest Tlv type
SeqNoRequest(SeqNoRequest),
}
impl Tlv {
/// Calculate the size on the wire for this `Tlv`. This DOES NOT included the TLV header size
/// (2 bytes).
pub fn wire_size(&self) -> u8 {
match self {
Self::Hello(hello) => hello.wire_size(),
Self::Ihu(ihu) => ihu.wire_size(),
Self::Update(update) => update.wire_size(),
Self::RouteRequest(route_request) => route_request.wire_size(),
Self::SeqNoRequest(seqno_request) => seqno_request.wire_size(),
}
}
/// Encode this `Tlv` as part of a packet.
pub fn write_bytes(&self, dst: &mut bytes::BytesMut) {
match self {
Self::Hello(hello) => hello.write_bytes(dst),
Self::Ihu(ihu) => ihu.write_bytes(dst),
Self::Update(update) => update.write_bytes(dst),
Self::RouteRequest(route_request) => route_request.write_bytes(dst),
Self::SeqNoRequest(seqno_request) => seqno_request.write_bytes(dst),
}
}
}
impl From<SeqNoRequest> for Tlv {
fn from(v: SeqNoRequest) -> Self {
Self::SeqNoRequest(v)
}
}
impl From<RouteRequest> for Tlv {
fn from(v: RouteRequest) -> Self {
Self::RouteRequest(v)
}
}
impl From<Update> for Tlv {
fn from(v: Update) -> Self {
Self::Update(v)
}
}
impl From<Ihu> for Tlv {
fn from(v: Ihu) -> Self {
Self::Ihu(v)
}
}
impl From<Hello> for Tlv {
fn from(v: Hello) -> Self {
Self::Hello(v)
}
}

View File

@@ -0,0 +1,385 @@
//! The babel [Update TLV](https://datatracker.ietf.org/doc/html/rfc8966#name-update).
use std::{
net::{IpAddr, Ipv4Addr, Ipv6Addr},
time::Duration,
};
use bytes::{Buf, BufMut};
use tracing::trace;
use crate::{metric::Metric, router_id::RouterId, sequence_number::SeqNo, subnet::Subnet};
use super::{AE_IPV4, AE_IPV6, AE_IPV6_LL, AE_WILDCARD};
/// Flag bit indicating an [`Update`] TLV establishes a new default prefix.
#[allow(dead_code)]
const UPDATE_FLAG_PREFIX: u8 = 0x80;
/// Flag bit indicating an [`Update`] TLV establishes a new default router-id.
#[allow(dead_code)]
const UPDATE_FLAG_ROUTER_ID: u8 = 0x40;
/// Mask to apply to [`Update`] flags, leaving only valid flags.
const FLAG_MASK: u8 = 0b1100_0000;
/// Base wire size of an [`Update`] without variable length address encoding.
const UPDATE_BASE_WIRE_SIZE: u8 = 10 + RouterId::BYTE_SIZE as u8;
/// Update TLV body as defined in https://datatracker.ietf.org/doc/html/rfc8966#name-update.
#[derive(Debug, Clone, PartialEq)]
pub struct Update {
/// Flags set in the TLV.
flags: u8,
/// Upper bound in centiseconds after which a new `Update` is sent. Must not be 0.
interval: u16,
/// Senders sequence number.
seqno: SeqNo,
/// Senders metric for this route.
metric: Metric,
/// The [`Subnet`] contained in this update. An update packet itself can contain any allowed
/// subnet.
subnet: Subnet,
/// Router id of the sender. Importantly this is not part of the update itself, though we do
/// transmit it for now as such.
router_id: RouterId,
}
impl Update {
/// Create a new `Update`.
pub fn new(
interval: Duration,
seqno: SeqNo,
metric: Metric,
subnet: Subnet,
router_id: RouterId,
) -> Self {
let interval_centiseconds = (interval.as_millis() / 10) as u16;
Self {
// No flags used for now
flags: 0,
interval: interval_centiseconds,
seqno,
metric,
subnet,
router_id,
}
}
/// Returns the [`SeqNo`] of the sender of this `Update`.
pub fn seqno(&self) -> SeqNo {
self.seqno
}
/// Return the [`Metric`] of the sender for this route in the `Update`.
pub fn metric(&self) -> Metric {
self.metric
}
/// Return the [`Subnet`] in this `Update.`
pub fn subnet(&self) -> Subnet {
self.subnet
}
/// Return the [`router-id`](PublicKey) of the router who advertised this [`Prefix`](IpAddr).
pub fn router_id(&self) -> RouterId {
self.router_id
}
/// Calculates the size on the wire of this `Update`.
pub fn wire_size(&self) -> u8 {
let address_bytes = self.subnet.prefix_len().div_ceil(8);
UPDATE_BASE_WIRE_SIZE + address_bytes
}
/// Get the time until a new `Update` for the [`Subnet`] is received at the latest.
pub fn interval(&self) -> Duration {
// Interval is expressed as centiseconds on the wire.
Duration::from_millis(self.interval as u64 * 10)
}
/// Construct an `Update` from wire bytes.
///
/// # Panics
///
/// This function will panic if there are insufficient bytes present in the provided buffer to
/// decode a complete `Update`.
pub fn from_bytes(src: &mut bytes::BytesMut, len: u8) -> Option<Self> {
let ae = src.get_u8();
let flags = src.get_u8() & FLAG_MASK;
let plen = src.get_u8();
// Read "omitted" value, we assume this is 0
let _ = src.get_u8();
let interval = src.get_u16();
let seqno = src.get_u16().into();
let metric = src.get_u16().into();
let prefix_size = plen.div_ceil(8) as usize;
let prefix = match ae {
AE_WILDCARD => {
if prefix_size != 0 {
return None;
}
// TODO: this is a temporary placeholder until we figure out how to handle this
Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0).into()
}
AE_IPV4 => {
if plen > 32 {
return None;
}
let mut raw_ip = [0; 4];
raw_ip[..prefix_size].copy_from_slice(&src[..prefix_size]);
src.advance(prefix_size);
Ipv4Addr::from(raw_ip).into()
}
AE_IPV6 => {
if plen > 128 {
return None;
}
let mut raw_ip = [0; 16];
raw_ip[..prefix_size].copy_from_slice(&src[..prefix_size]);
src.advance(prefix_size);
Ipv6Addr::from(raw_ip).into()
}
AE_IPV6_LL => {
if plen != 64 {
return None;
}
let mut raw_ip = [0; 16];
raw_ip[0] = 0xfe;
raw_ip[1] = 0x80;
raw_ip[8..].copy_from_slice(&src[..8]);
src.advance(8);
Ipv6Addr::from(raw_ip).into()
}
_ => {
// Invalid AE type, skip reamining data and ignore
trace!("Invalid AE type in update packet, drop packet");
src.advance(len as usize - 10);
return None;
}
};
let subnet = Subnet::new(prefix, plen).ok()?;
let mut router_id_bytes = [0u8; RouterId::BYTE_SIZE];
router_id_bytes.copy_from_slice(&src[..RouterId::BYTE_SIZE]);
src.advance(RouterId::BYTE_SIZE);
let router_id = RouterId::from(router_id_bytes);
trace!("Read update tlv body");
Some(Update {
flags,
interval,
seqno,
metric,
subnet,
router_id,
})
}
/// Encode this `Update` tlv as part of a packet.
pub fn write_bytes(&self, dst: &mut bytes::BytesMut) {
dst.put_u8(match self.subnet.address() {
IpAddr::V4(_) => AE_IPV4,
IpAddr::V6(_) => AE_IPV6,
});
dst.put_u8(self.flags);
dst.put_u8(self.subnet.prefix_len());
// Write "omitted" value, currently not used in our encoding scheme.
dst.put_u8(0);
dst.put_u16(self.interval);
dst.put_u16(self.seqno.into());
dst.put_u16(self.metric.into());
let prefix_len = self.subnet.prefix_len().div_ceil(8) as usize;
match self.subnet.address() {
IpAddr::V4(ip) => dst.put_slice(&ip.octets()[..prefix_len]),
IpAddr::V6(ip) => dst.put_slice(&ip.octets()[..prefix_len]),
}
dst.put_slice(&self.router_id.as_bytes()[..])
}
}
#[cfg(test)]
mod tests {
use std::{
net::{Ipv4Addr, Ipv6Addr},
time::Duration,
};
use crate::{router_id::RouterId, subnet::Subnet};
use bytes::Buf;
#[test]
fn encoding() {
let mut buf = bytes::BytesMut::new();
let ihu = super::Update {
flags: 0b1100_0000,
interval: 400,
seqno: 17.into(),
metric: 25.into(),
subnet: Subnet::new(Ipv6Addr::new(512, 25, 26, 27, 28, 0, 0, 29).into(), 64)
.expect("64 is a valid IPv6 prefix size; qed"),
router_id: RouterId::from([1u8; RouterId::BYTE_SIZE]),
};
ihu.write_bytes(&mut buf);
assert_eq!(buf.len(), 58);
assert_eq!(
buf[..58],
[
2, 192, 64, 0, 1, 144, 0, 17, 0, 25, 2, 0, 0, 25, 0, 26, 0, 27, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1
]
);
let mut buf = bytes::BytesMut::new();
let ihu = super::Update {
flags: 0b0000_0000,
interval: 600,
seqno: 170.into(),
metric: 256.into(),
subnet: Subnet::new(Ipv4Addr::new(10, 101, 4, 1).into(), 23)
.expect("23 is a valid IPv4 prefix size; qed"),
router_id: RouterId::from([2u8; RouterId::BYTE_SIZE]),
};
ihu.write_bytes(&mut buf);
assert_eq!(buf.len(), 53);
assert_eq!(
buf[..53],
[
1, 0, 23, 0, 2, 88, 0, 170, 1, 0, 10, 101, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
]
);
}
#[test]
fn decoding() {
let mut buf = bytes::BytesMut::from(
&[
0, 64, 0, 0, 0, 100, 0, 70, 2, 0, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
][..],
);
let ihu = super::Update {
flags: 0b0100_0000,
interval: 100,
seqno: 70.into(),
metric: 512.into(),
subnet: Subnet::new(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0).into(), 0)
.expect("0 is a valid IPv6 prefix size; qed"),
router_id: RouterId::from([3u8; RouterId::BYTE_SIZE]),
};
let buf_len = buf.len();
assert_eq!(
super::Update::from_bytes(&mut buf, buf_len as u8),
Some(ihu)
);
assert_eq!(buf.remaining(), 0);
let mut buf = bytes::BytesMut::from(
&[
3, 0, 64, 0, 3, 232, 0, 42, 3, 1, 0, 10, 0, 20, 0, 30, 0, 40, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4,
][..],
);
let ihu = super::Update {
flags: 0b0000_0000,
interval: 1000,
seqno: 42.into(),
metric: 769.into(),
subnet: Subnet::new(Ipv6Addr::new(0xfe80, 0, 0, 0, 10, 20, 30, 40).into(), 64)
.expect("92 is a valid IPv6 prefix size; qed"),
router_id: RouterId::from([4u8; RouterId::BYTE_SIZE]),
};
let buf_len = buf.len();
assert_eq!(
super::Update::from_bytes(&mut buf, buf_len as u8),
Some(ihu)
);
assert_eq!(buf.remaining(), 0);
}
#[test]
fn decode_ignores_invalid_ae_encoding() {
// AE 4 as it is the first one which should be used in protocol extension, causing this
// test to fail if we forget to update something
let mut buf = bytes::BytesMut::from(
&[
4, 0, 64, 0, 0, 44, 2, 0, 0, 10, 10, 5, 0, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
][..],
);
let buf_len = buf.len();
assert_eq!(super::Update::from_bytes(&mut buf, buf_len as u8), None);
// Decode function should still consume the required amount of bytes to leave parser in a
// good state (assuming the length in the tlv preamble is good).
assert_eq!(buf.remaining(), 0);
}
#[test]
fn decode_ignores_invalid_flag_bits() {
// Set all flag bits, only allowed bits should be set on the decoded value
let mut buf = bytes::BytesMut::from(
&[
3, 255, 64, 0, 3, 232, 0, 42, 3, 1, 0, 10, 0, 20, 0, 30, 0, 40, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4,
][..],
);
let ihu = super::Update {
flags: super::UPDATE_FLAG_PREFIX | super::UPDATE_FLAG_ROUTER_ID,
interval: 1000,
seqno: 42.into(),
metric: 769.into(),
subnet: Subnet::new(Ipv6Addr::new(0xfe80, 0, 0, 0, 10, 20, 30, 40).into(), 64)
.expect("92 is a valid IPv6 prefix size; qed"),
router_id: RouterId::from([4u8; RouterId::BYTE_SIZE]),
};
let buf_len = buf.len();
assert_eq!(
super::Update::from_bytes(&mut buf, buf_len as u8),
Some(ihu)
);
assert_eq!(buf.remaining(), 0);
}
#[test]
fn roundtrip() {
let mut buf = bytes::BytesMut::new();
let hello_src = super::Update::new(
Duration::from_secs(64),
10.into(),
25.into(),
Subnet::new(
Ipv6Addr::new(0x21f, 0x4025, 0xabcd, 0xdead, 0, 0, 0, 0).into(),
64,
)
.expect("64 is a valid IPv6 prefix size; qed"),
RouterId::from([6; RouterId::BYTE_SIZE]),
);
hello_src.write_bytes(&mut buf);
let buf_len = buf.len();
let decoded = super::Update::from_bytes(&mut buf, buf_len as u8);
assert_eq!(Some(hello_src), decoded);
assert_eq!(buf.remaining(), 0);
}
}

View File

@@ -0,0 +1,338 @@
use std::path::PathBuf;
use aes_gcm::{aead::Aead, KeyInit};
use axum::{
extract::{Query, State},
http::{HeaderMap, StatusCode},
routing::get,
Router,
};
use axum_extra::extract::Host;
use futures::{stream::FuturesUnordered, StreamExt};
use reqwest::header::CONTENT_TYPE;
use tokio::net::TcpListener;
use tokio_util::sync::CancellationToken;
use tracing::{debug, error, info, warn};
/// Cdn functionality. Urls of specific format lead to donwnlaoding of metadata from the registry,
/// and serving of chunks.
pub struct Cdn {
cache: PathBuf,
cancel_token: CancellationToken,
}
/// Cache for reconstructed blocks
#[derive(Clone)]
struct Cache {
base: PathBuf,
}
impl Cdn {
pub fn new(cache: PathBuf) -> Self {
let cancel_token = CancellationToken::new();
Self {
cache,
cancel_token,
}
}
/// Start the Cdn server. This future runs until the server is stopped.
pub fn start(&self, listener: TcpListener) -> Result<(), Box<dyn std::error::Error>> {
let state = Cache {
base: self.cache.clone(),
};
if !self.cache.exists() {
info!(dir = %self.cache.display(), "Creating cache dir");
std::fs::create_dir(&self.cache)?;
}
if !self.cache.is_dir() {
return Err("Cache dir is not a directory".into());
}
let router = Router::new().route("/", get(cdn)).with_state(state);
let cancel_token = self.cancel_token.clone();
tokio::spawn(async {
axum::serve(listener, router)
.with_graceful_shutdown(cancel_token.cancelled_owned())
.await
.map_err(|err| {
warn!(%err, "Cdn server error");
})
});
Ok(())
}
}
#[derive(Debug, serde::Deserialize)]
struct DecryptionKeyQuery {
key: Option<String>,
}
#[tracing::instrument(level = tracing::Level::DEBUG, skip(cache))]
async fn cdn(
Host(host): Host,
Query(query): Query<DecryptionKeyQuery>,
State(cache): State<Cache>,
) -> Result<(HeaderMap, Vec<u8>), StatusCode> {
debug!("Received request at {host}");
let mut parts = host.split('.');
let prefix = parts
.next()
.expect("Splitting a String always yields at least 1 result; Qed.");
if prefix.len() != 32 {
return Err(StatusCode::BAD_REQUEST);
}
let mut hash = [0; 16];
faster_hex::hex_decode(prefix.as_bytes(), &mut hash).map_err(|_| StatusCode::BAD_REQUEST)?;
let registry_url = parts.collect::<Vec<_>>().join(".");
let decryption_key = if let Some(query_key) = query.key {
let mut key = [0; 16];
faster_hex::hex_decode(query_key.as_bytes(), &mut key)
.map_err(|_| StatusCode::BAD_REQUEST)?;
Some(key)
} else {
None
};
let meta = load_meta(registry_url.clone(), hash, decryption_key).await?;
debug!("Metadata loaded");
let mut headers = HeaderMap::new();
match meta {
cdn_meta::Metadata::File(file) => {
//
if let Some(mime) = file.mime {
debug!(%mime, "Setting mime type");
headers.append(
CONTENT_TYPE,
mime.parse().map_err(|_| {
warn!("Not serving file with unprocessable mime type");
StatusCode::UNPROCESSABLE_ENTITY
})?,
);
}
// File recombination
let mut content = vec![];
for block in file.blocks {
content.extend_from_slice(cache.fetch_block(&block).await?.as_slice());
}
Ok((headers, content))
}
cdn_meta::Metadata::Directory(dir) => {
let mut out = r#"
<!DOCTYPE html>
<html i18n-values="dir:textdirection;lang:language">
<head>
<meta charset="utf-8">
</head>
<body>
<ul>"#
.to_string();
headers.append(
CONTENT_TYPE,
"text/html"
.parse()
.expect("Can parse \"text/html\" to content-type"),
);
for (file_hash, encryption_key) in dir.files {
let meta = load_meta(registry_url.clone(), file_hash, encryption_key).await?;
let name = match meta {
cdn_meta::Metadata::File(file) => file.name,
cdn_meta::Metadata::Directory(dir) => dir.name,
};
out.push_str(&format!(
"<li><a href=\"http://{}.{registry_url}/?key={}\">{name}</a></li>\n",
faster_hex::hex_string(&file_hash),
&encryption_key
.map(|ek| faster_hex::hex_string(&ek))
.unwrap_or_else(String::new),
));
}
out.push_str("</ul></body></html>");
Ok((headers, out.into()))
}
}
}
/// Load a metadata blob from a metadata repository.
async fn load_meta(
registry_url: String,
hash: cdn_meta::Hash,
encryption_key: Option<cdn_meta::Hash>,
) -> Result<cdn_meta::Metadata, StatusCode> {
let mut r_url = reqwest::Url::parse(&format!("http://{registry_url}")).map_err(|err| {
error!(%err, "Could not parse registry URL");
StatusCode::INTERNAL_SERVER_ERROR
})?;
let hex_hash = faster_hex::hex_string(&hash);
r_url.set_path(&format!("/api/v1/metadata/{hex_hash}"));
r_url.set_scheme("http").map_err(|_| {
error!("Could not set HTTP scheme");
StatusCode::INTERNAL_SERVER_ERROR
})?;
debug!(url = %r_url, "Fetching chunk");
let metadata_reply = reqwest::get(r_url).await.map_err(|err| {
error!(%err, "Could not load metadata from registry");
StatusCode::INTERNAL_SERVER_ERROR
})?;
// TODO: Should we just check if status code is success here?
if metadata_reply.status() != StatusCode::OK {
debug!(
status = %metadata_reply.status(),
"Registry replied with non-OK status code"
);
return Err(metadata_reply.status());
}
let encrypted_metadata = metadata_reply.bytes().await.map_err(|err| {
error!(%err, "Could not load metadata response from registry");
StatusCode::INTERNAL_SERVER_ERROR
})?;
let metadata = if let Some(encryption_key) = encryption_key {
if encrypted_metadata.len() < 12 {
debug!("Attempting to decrypt metadata with inufficient size");
return Err(StatusCode::UNPROCESSABLE_ENTITY);
}
let decryptor = aes_gcm::Aes128Gcm::new(&encryption_key.into());
let plaintext = decryptor
.decrypt(
encrypted_metadata[encrypted_metadata.len() - 12..].into(),
&encrypted_metadata[..encrypted_metadata.len() - 12],
)
.map_err(|_| {
warn!("Decryption of block failed");
// Either the decryption key is wrong or the blob is corrupt, we assume the
// registry is not a fault so the decryption key is wrong, which is a user error.
StatusCode::UNPROCESSABLE_ENTITY
})?;
plaintext
} else {
encrypted_metadata.into()
};
// If the metadata is not decodable, this is not really our fault, but also not the necessarily
// the users fault.
let (meta, consumed) =
cdn_meta::Metadata::from_binary(&metadata).map_err(|_| StatusCode::UNPROCESSABLE_ENTITY)?;
if consumed != metadata.len() {
warn!(
metadata_length = metadata.len(),
consumed, "Trailing binary metadata which wasn't decoded"
);
}
Ok(meta)
}
impl Drop for Cdn {
fn drop(&mut self) {
self.cancel_token.cancel();
}
}
/// Download a shard from a 0-db.
async fn download_shard(
location: &cdn_meta::Location,
key: &[u8],
) -> Result<Vec<u8>, Box<dyn std::error::Error>> {
let client = redis::Client::open(format!("redis://{}", location.host))?;
let mut con = client.get_multiplexed_async_connection().await?;
redis::cmd("SELECT")
.arg(&location.namespace)
.query_async::<()>(&mut con)
.await?;
Ok(redis::cmd("GET").arg(key).query_async(&mut con).await?)
}
impl Cache {
async fn fetch_block(&self, block: &cdn_meta::Block) -> Result<Vec<u8>, StatusCode> {
let mut cached_file_path = self.base.clone();
cached_file_path.push(faster_hex::hex_string(&block.encrypted_hash));
// If we have the file in cache, just open it, load it, and return from there.
if cached_file_path.exists() {
return tokio::fs::read(&cached_file_path).await.map_err(|err| {
error!(%err, "Could not load cached file");
StatusCode::INTERNAL_SERVER_ERROR
});
}
// File is not in cache, download and save
// TODO: Rank based on expected latency
// FIXME: Only download the required amount
let mut shard_stream = block
.shards
.iter()
.enumerate()
.map(|(i, loc)| async move { (i, download_shard(loc, &block.encrypted_hash).await) })
.collect::<FuturesUnordered<_>>();
let mut shards = vec![None; block.shards.len()];
while let Some((idx, shard)) = shard_stream.next().await {
let shard = shard.map_err(|err| {
warn!(err, "Could not load shard");
StatusCode::INTERNAL_SERVER_ERROR
})?;
shards[idx] = Some(shard);
}
// recombine
let encoder = reed_solomon_erasure::galois_8::ReedSolomon::new(
block.required_shards as usize,
block.shards.len() - block.required_shards as usize,
)
.map_err(|err| {
error!(%err, "Failed to construct erausre codec");
StatusCode::INTERNAL_SERVER_ERROR
})?;
encoder.reconstruct_data(&mut shards).map_err(|err| {
error!(%err, "Shard recombination failed");
StatusCode::INTERNAL_SERVER_ERROR
})?;
// SAFETY: Since decoding was succesfull, the first shards (data shards) must be
// Option::Some
let mut encrypted_data = shards
.into_iter()
.map(Option::unwrap)
.take(block.required_shards as usize)
.flatten()
.collect::<Vec<_>>();
let padding_len = encrypted_data[encrypted_data.len() - 1] as usize;
encrypted_data.resize(encrypted_data.len() - padding_len, 0);
let decryptor = aes_gcm::Aes128Gcm::new(&block.content_hash.into());
let c = decryptor
.decrypt(&block.nonce.into(), encrypted_data.as_slice())
.map_err(|err| {
warn!(%err, "Decryption of content block failed");
StatusCode::UNPROCESSABLE_ENTITY
})?;
// Save file to cache, this is not critical if it fails
if let Err(err) = tokio::fs::write(&cached_file_path, &c).await {
warn!(%err, "Could not write block to cache");
};
Ok(c)
}
}

View File

@@ -0,0 +1,158 @@
use std::{io, net::SocketAddr, pin::Pin};
use tokio::{
io::{AsyncRead, AsyncWrite},
net::TcpStream,
};
mod tracked;
pub use tracked::Tracked;
#[cfg(feature = "private-network")]
mod tls;
/// Cost to add to the peer_link_cost for "local processing", when peers are connected over IPv6.
///
/// The current peer link cost is calculated from a HELLO rtt. This is great to measure link
/// latency, since packets are processed in order. However, on local idle links, this value will
/// likely be 0 since we round down (from the amount of ms it took to process), which does not
/// accurately reflect the fact that there is in fact a cost associated with using a peer, even on
/// these local links.
const PACKET_PROCESSING_COST_IP6_TCP: u16 = 10;
/// Cost to add to the peer_link_cost for "local processing", when peers are connected over IPv6.
///
/// This is similar to [`PACKET_PROCESSING_COST_IP6`], but slightly higher so we skew towards IPv6
/// connections if peers are connected over both IPv4 and IPv6.
const PACKET_PROCESSING_COST_IP4_TCP: u16 = 15;
// TODO
const PACKET_PROCESSING_COST_IP6_QUIC: u16 = 7;
// TODO
const PACKET_PROCESSING_COST_IP4_QUIC: u16 = 12;
pub trait Connection: AsyncRead + AsyncWrite {
/// Get an identifier for this connection, which shows details about the remote
fn identifier(&self) -> Result<String, io::Error>;
/// The static cost of using this connection
fn static_link_cost(&self) -> Result<u16, io::Error>;
}
/// A wrapper around a quic send and quic receive stream, implementing the [`Connection`] trait.
pub struct Quic {
tx: quinn::SendStream,
rx: quinn::RecvStream,
remote: SocketAddr,
}
impl Quic {
/// Create a new wrapper around Quic streams.
pub fn new(tx: quinn::SendStream, rx: quinn::RecvStream, remote: SocketAddr) -> Self {
Quic { tx, rx, remote }
}
}
impl Connection for TcpStream {
fn identifier(&self) -> Result<String, io::Error> {
Ok(format!(
"TCP {} <-> {}",
self.local_addr()?,
self.peer_addr()?
))
}
fn static_link_cost(&self) -> Result<u16, io::Error> {
Ok(match self.peer_addr()? {
SocketAddr::V4(_) => PACKET_PROCESSING_COST_IP4_TCP,
SocketAddr::V6(ip) if ip.ip().to_ipv4_mapped().is_some() => {
PACKET_PROCESSING_COST_IP4_TCP
}
SocketAddr::V6(_) => PACKET_PROCESSING_COST_IP6_TCP,
})
}
}
impl AsyncRead for Quic {
#[inline]
fn poll_read(
mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &mut tokio::io::ReadBuf<'_>,
) -> std::task::Poll<io::Result<()>> {
Pin::new(&mut self.rx).poll_read(cx, buf)
}
}
impl AsyncWrite for Quic {
#[inline]
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &[u8],
) -> std::task::Poll<Result<usize, io::Error>> {
Pin::new(&mut self.tx)
.poll_write(cx, buf)
.map_err(From::from)
}
#[inline]
fn poll_flush(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<(), io::Error>> {
Pin::new(&mut self.tx).poll_flush(cx)
}
#[inline]
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<(), io::Error>> {
Pin::new(&mut self.tx).poll_shutdown(cx)
}
#[inline]
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> std::task::Poll<Result<usize, io::Error>> {
Pin::new(&mut self.tx).poll_write_vectored(cx, bufs)
}
#[inline]
fn is_write_vectored(&self) -> bool {
self.tx.is_write_vectored()
}
}
impl Connection for Quic {
fn identifier(&self) -> Result<String, io::Error> {
Ok(format!("QUIC -> {}", self.remote))
}
fn static_link_cost(&self) -> Result<u16, io::Error> {
Ok(match self.remote {
SocketAddr::V4(_) => PACKET_PROCESSING_COST_IP4_QUIC,
SocketAddr::V6(ip) if ip.ip().to_ipv4_mapped().is_some() => {
PACKET_PROCESSING_COST_IP4_QUIC
}
SocketAddr::V6(_) => PACKET_PROCESSING_COST_IP6_QUIC,
})
}
}
#[cfg(test)]
use tokio::io::DuplexStream;
#[cfg(test)]
impl Connection for DuplexStream {
fn identifier(&self) -> Result<String, io::Error> {
Ok("Memory pipe".to_string())
}
fn static_link_cost(&self) -> Result<u16, io::Error> {
Ok(1)
}
}

View File

@@ -0,0 +1,23 @@
use std::{io, net::SocketAddr};
use tokio::net::TcpStream;
impl super::Connection for tokio_openssl::SslStream<TcpStream> {
fn identifier(&self) -> Result<String, io::Error> {
Ok(format!(
"TLS {} <-> {}",
self.get_ref().local_addr()?,
self.get_ref().peer_addr()?
))
}
fn static_link_cost(&self) -> Result<u16, io::Error> {
Ok(match self.get_ref().peer_addr()? {
SocketAddr::V4(_) => super::PACKET_PROCESSING_COST_IP4_TCP,
SocketAddr::V6(ip) if ip.ip().to_ipv4_mapped().is_some() => {
super::PACKET_PROCESSING_COST_IP4_TCP
}
SocketAddr::V6(_) => super::PACKET_PROCESSING_COST_IP6_TCP,
})
}
}

View File

@@ -0,0 +1,120 @@
use std::{
pin::Pin,
sync::{
atomic::{AtomicU64, Ordering},
Arc,
},
task::Poll,
};
use tokio::io::{AsyncRead, AsyncWrite};
use super::Connection;
/// Wrapper which keeps track of how much bytes have been read and written from a connection.
pub struct Tracked<C> {
/// Bytes read counter
read: Arc<AtomicU64>,
/// Bytes written counter
write: Arc<AtomicU64>,
/// Underlying connection we are measuring
con: C,
}
impl<C> Tracked<C>
where
C: Connection + Unpin,
{
/// Create a new instance of a tracked connections. Counters are passed in so they can be
/// reused accross connections.
pub fn new(read: Arc<AtomicU64>, write: Arc<AtomicU64>, con: C) -> Self {
Self { read, write, con }
}
}
impl<C> Connection for Tracked<C>
where
C: Connection + Unpin,
{
#[inline]
fn identifier(&self) -> Result<String, std::io::Error> {
self.con.identifier()
}
#[inline]
fn static_link_cost(&self) -> Result<u16, std::io::Error> {
self.con.static_link_cost()
}
}
impl<C> AsyncRead for Tracked<C>
where
C: AsyncRead + Unpin,
{
#[inline]
fn poll_read(
mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &mut tokio::io::ReadBuf<'_>,
) -> std::task::Poll<std::io::Result<()>> {
let start_len = buf.filled().len();
let res = Pin::new(&mut self.con).poll_read(cx, buf);
if let Poll::Ready(Ok(())) = res {
self.read
.fetch_add((buf.filled().len() - start_len) as u64, Ordering::Relaxed);
}
res
}
}
impl<C> AsyncWrite for Tracked<C>
where
C: AsyncWrite + Unpin,
{
#[inline]
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, std::io::Error>> {
let res = Pin::new(&mut self.con).poll_write(cx, buf);
if let Poll::Ready(Ok(written)) = res {
self.write.fetch_add(written as u64, Ordering::Relaxed);
}
res
}
#[inline]
fn poll_flush(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
Pin::new(&mut self.con).poll_flush(cx)
}
#[inline]
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
Pin::new(&mut self.con).poll_shutdown(cx)
}
#[inline]
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
bufs: &[std::io::IoSlice<'_>],
) -> Poll<Result<usize, std::io::Error>> {
let res = Pin::new(&mut self.con).poll_write_vectored(cx, bufs);
if let Poll::Ready(Ok(written)) = res {
self.write.fetch_add(written as u64, Ordering::Relaxed);
}
res
}
#[inline]
fn is_write_vectored(&self) -> bool {
self.con.is_write_vectored()
}
}

View File

@@ -0,0 +1,450 @@
//! Abstraction over diffie hellman, symmetric encryption, and hashing.
use core::fmt;
use std::{
error::Error,
fmt::Display,
net::Ipv6Addr,
ops::{Deref, DerefMut},
};
use aes_gcm::{aead::OsRng, AeadCore, AeadInPlace, Aes256Gcm, Key, KeyInit};
use serde::{de::Visitor, Deserialize, Serialize};
/// Default MTU for a packet. Ideally this would not be needed and the [`PacketBuffer`] takes a
/// const generic argument which is then expanded with the needed extra space for the buffer,
/// however as it stands const generics can only be used standalone and not in a constant
/// expression. This _is_ possible on nightly rust, with a feature gate (generic_const_exprs).
const PACKET_SIZE: usize = 1400;
/// Size of an AES_GCM tag in bytes.
const AES_TAG_SIZE: usize = 16;
/// Size of an AES_GCM nonce in bytes.
const AES_NONCE_SIZE: usize = 12;
/// Size of user defined data header. This header will be part of the encrypted data.
const DATA_HEADER_SIZE: usize = 4;
/// Size of a `PacketBuffer`.
const PACKET_BUFFER_SIZE: usize = PACKET_SIZE + AES_TAG_SIZE + AES_NONCE_SIZE + DATA_HEADER_SIZE;
/// A public key used as part of Diffie Hellman key exchange. It is derived from a [`SecretKey`].
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct PublicKey(x25519_dalek::PublicKey);
/// A secret used as part of Diffie Hellman key exchange.
///
/// This type intentionally does not implement or derive [`Debug`] to avoid accidentally leaking
/// secrets in logs.
#[derive(Clone)]
pub struct SecretKey(x25519_dalek::StaticSecret);
/// A statically computed secret from a [`SecretKey`] and a [`PublicKey`].
///
/// This type intentionally does not implement or derive [`Debug`] to avoid accidentally leaking
/// secrets in logs.
#[derive(Clone)]
pub struct SharedSecret([u8; 32]);
/// A buffer for packets. This holds enough space to encrypt a packet in place without
/// reallocating.
///
/// Internally, the buffer is created with an additional header. Because this header is part of the
/// encrypted content, it is not included in the global version set by the main packet header. As
/// such, an internal version is included.
pub struct PacketBuffer {
buf: Vec<u8>,
/// Amount of bytes written in the buffer
size: usize,
}
/// A reference to the header in a [`PacketBuffer`].
pub struct PacketBufferHeader<'a> {
data: &'a [u8; DATA_HEADER_SIZE],
}
/// A mutable reference to the header in a [`PacketBuffer`].
pub struct PacketBufferHeaderMut<'a> {
data: &'a mut [u8; DATA_HEADER_SIZE],
}
/// Opaque type indicating decryption failed.
#[derive(Debug, Clone, Copy)]
pub struct DecryptionError;
impl Display for DecryptionError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("Decryption failed, invalid or insufficient encrypted content for this key")
}
}
impl Error for DecryptionError {}
impl SecretKey {
/// Generate a new `StaticSecret` using [`OsRng`] as an entropy source.
pub fn new() -> Self {
SecretKey(x25519_dalek::StaticSecret::random_from_rng(OsRng))
}
/// View this `SecretKey` as a byte array.
#[inline]
pub fn as_bytes(&self) -> &[u8; 32] {
self.0.as_bytes()
}
/// Computes the [`SharedSecret`] from this `SecretKey` and a [`PublicKey`].
pub fn shared_secret(&self, other: &PublicKey) -> SharedSecret {
SharedSecret(self.0.diffie_hellman(&other.0).to_bytes())
}
}
impl Default for SecretKey {
fn default() -> Self {
Self::new()
}
}
impl PublicKey {
/// Generates an [`Ipv6Addr`] from a `PublicKey`.
///
/// The generated address is guaranteed to be part of the `400::/7` range.
pub fn address(&self) -> Ipv6Addr {
let mut hasher = blake3::Hasher::new();
hasher.update(self.as_bytes());
let mut buf = [0; 16];
hasher.finalize_xof().fill(&mut buf);
// Mangle the first byte to be of the expected form. Because of the network range
// requirement, we MUST set the third bit, and MAY set the last bit. Instead of discarding
// the first 7 bits of the hash, use the first byte to determine if the last bit is set.
// If there is an odd number of bits set in the first byte, set the last bit of the result.
let lsb = buf[0].count_ones() as u8 % 2;
buf[0] = 0x04 | lsb;
Ipv6Addr::from(buf)
}
/// Convert this `PublicKey` to a byte array.
pub fn to_bytes(self) -> [u8; 32] {
self.0.to_bytes()
}
/// View this `PublicKey` as a byte array.
pub fn as_bytes(&self) -> &[u8; 32] {
self.0.as_bytes()
}
}
impl SharedSecret {
/// Encrypt a [`PacketBuffer`] using the `SharedSecret` as key.
///
/// Internally, a new random nonce will be generated using the OS's crypto rng generator. This
/// nonce is appended to the encrypted data.
pub fn encrypt(&self, mut data: PacketBuffer) -> Vec<u8> {
let key: Key<Aes256Gcm> = self.0.into();
let nonce = Aes256Gcm::generate_nonce(OsRng);
let cipher = Aes256Gcm::new(&key);
let tag = cipher
.encrypt_in_place_detached(&nonce, &[], &mut data.buf[..data.size])
.expect("Encryption can't fail; qed.");
data.buf[data.size..data.size + AES_TAG_SIZE].clone_from_slice(tag.as_slice());
data.buf[data.size + AES_TAG_SIZE..data.size + AES_TAG_SIZE + AES_NONCE_SIZE]
.clone_from_slice(&nonce);
data.buf.truncate(data.size + AES_NONCE_SIZE + AES_TAG_SIZE);
data.buf
}
/// Decrypt a message previously encrypted with an equivalent `SharedSecret`. In other words, a
/// message that was previously created by the [`SharedSecret::encrypt`] method.
///
/// Internally, this messages assumes that a 12 byte nonce is present at the end of the data.
/// If the passed in data to decrypt does not contain a valid nonce, decryption fails and an
/// opaque error is returned. As an extension to this, if the data is not of sufficient length
/// to contain a valid nonce, an error is returned immediately.
pub fn decrypt(&self, mut data: Vec<u8>) -> Result<PacketBuffer, DecryptionError> {
// Make sure we have sufficient data (i.e. a nonce).
if data.len() < AES_NONCE_SIZE + AES_TAG_SIZE + DATA_HEADER_SIZE {
return Err(DecryptionError);
}
let data_len = data.len();
let key: Key<Aes256Gcm> = self.0.into();
{
let (data, nonce) = data.split_at_mut(data_len - AES_NONCE_SIZE);
let (data, tag) = data.split_at_mut(data.len() - AES_TAG_SIZE);
let cipher = Aes256Gcm::new(&key);
cipher
.decrypt_in_place_detached((&*nonce).into(), &[], data, (&*tag).into())
.map_err(|_| DecryptionError)?;
}
Ok(PacketBuffer {
// We did not remove the scratch space used for TAG and NONCE.
size: data.len() - AES_TAG_SIZE - AES_NONCE_SIZE,
buf: data,
})
}
}
impl PacketBuffer {
/// Create a new blank `PacketBuffer`.
pub fn new() -> Self {
Self {
buf: vec![0; PACKET_BUFFER_SIZE],
size: 0,
}
}
/// Get a reference to the packet header.
pub fn header(&self) -> PacketBufferHeader<'_> {
PacketBufferHeader {
data: self.buf[..DATA_HEADER_SIZE]
.try_into()
.expect("Header size constant is correct; qed"),
}
}
/// Get a mutable reference to the packet header.
pub fn header_mut(&mut self) -> PacketBufferHeaderMut<'_> {
PacketBufferHeaderMut {
data: <&mut [u8] as TryInto<&mut [u8; DATA_HEADER_SIZE]>>::try_into(
&mut self.buf[..DATA_HEADER_SIZE],
)
.expect("Header size constant is correct; qed"),
}
}
/// Get a reference to the entire useable inner buffer.
pub fn buffer(&self) -> &[u8] {
let buf_end = self.buf.len() - AES_NONCE_SIZE - AES_TAG_SIZE;
&self.buf[DATA_HEADER_SIZE..buf_end]
}
/// Get a mutable reference to the entire useable internal buffer.
pub fn buffer_mut(&mut self) -> &mut [u8] {
let buf_end = self.buf.len() - AES_NONCE_SIZE - AES_TAG_SIZE;
&mut self.buf[DATA_HEADER_SIZE..buf_end]
}
/// Sets the amount of bytes in use by the buffer.
pub fn set_size(&mut self, size: usize) {
self.size = size + DATA_HEADER_SIZE;
}
}
impl Default for PacketBuffer {
fn default() -> Self {
Self::new()
}
}
impl From<[u8; 32]> for SecretKey {
/// Load a secret key from a byte array.
fn from(bytes: [u8; 32]) -> SecretKey {
SecretKey(x25519_dalek::StaticSecret::from(bytes))
}
}
impl fmt::Display for PublicKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(&faster_hex::hex_string(self.as_bytes()))
}
}
impl Serialize for PublicKey {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_str(&faster_hex::hex_string(self.as_bytes()))
}
}
struct PublicKeyVisitor;
impl Visitor<'_> for PublicKeyVisitor {
type Value = PublicKey;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("A hex encoded public key (64 characters)")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
if v.len() != 64 {
Err(E::custom("Public key is 64 characters long"))
} else {
let mut backing = [0; 32];
faster_hex::hex_decode(v.as_bytes(), &mut backing)
.map_err(|_| E::custom("PublicKey is not valid hex"))?;
Ok(PublicKey(backing.into()))
}
}
}
impl<'de> Deserialize<'de> for PublicKey {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
deserializer.deserialize_str(PublicKeyVisitor)
}
}
impl From<[u8; 32]> for PublicKey {
/// Given a byte array, construct a `PublicKey`.
fn from(bytes: [u8; 32]) -> PublicKey {
PublicKey(x25519_dalek::PublicKey::from(bytes))
}
}
impl TryFrom<&str> for PublicKey {
type Error = faster_hex::Error;
fn try_from(value: &str) -> Result<Self, Self::Error> {
let mut output = [0u8; 32];
faster_hex::hex_decode(value.as_bytes(), &mut output)?;
Ok(PublicKey::from(output))
}
}
impl From<&SecretKey> for PublicKey {
fn from(value: &SecretKey) -> Self {
PublicKey(x25519_dalek::PublicKey::from(&value.0))
}
}
impl Deref for SharedSecret {
type Target = [u8; 32];
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl Deref for PacketBuffer {
type Target = [u8];
fn deref(&self) -> &Self::Target {
&self.buf[DATA_HEADER_SIZE..self.size]
}
}
impl Deref for PacketBufferHeader<'_> {
type Target = [u8; DATA_HEADER_SIZE];
fn deref(&self) -> &Self::Target {
self.data
}
}
impl Deref for PacketBufferHeaderMut<'_> {
type Target = [u8; DATA_HEADER_SIZE];
fn deref(&self) -> &Self::Target {
self.data
}
}
impl DerefMut for PacketBufferHeaderMut<'_> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.data
}
}
impl fmt::Debug for PacketBuffer {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("PacketBuffer")
.field("data", &"...")
.field("len", &self.size)
.finish()
}
}
#[cfg(test)]
mod tests {
use super::{PacketBuffer, SecretKey, AES_NONCE_SIZE, AES_TAG_SIZE, DATA_HEADER_SIZE};
#[test]
/// Test if encryption works in general. We just create some random value and encrypt it.
/// Specifically, this will help to catch runtime panics in case AES_TAG_SIZE or AES_NONCE_SIZE
/// don't have a proper value aligned with the underlying AES_GCM implementation.
fn encryption_succeeds() {
let k1 = SecretKey::new();
let k2 = SecretKey::new();
let ss = k1.shared_secret(&(&k2).into());
let mut pb = PacketBuffer::new();
let data = b"vnno30nv f654q364 vfsv 44"; // Random keyboard smash.
pb.buffer_mut()[..data.len()].copy_from_slice(data);
pb.set_size(data.len());
// We only care that this does not panic.
let res = ss.encrypt(pb);
// At the same time, check expected size.
assert_eq!(
res.len(),
data.len() + DATA_HEADER_SIZE + AES_TAG_SIZE + AES_NONCE_SIZE
);
}
#[test]
/// Encrypt a value and then decrypt it. This makes sure the decrypt flow and encrypt flow
/// match, and both follow the expected format. Also, we don't reuse the shared secret for
/// decryption, but instead generate the secret again the other way round, to simulate a remote
/// node.
fn encrypt_decrypt_roundtrip() {
let k1 = SecretKey::new();
let k2 = SecretKey::new();
let ss1 = k1.shared_secret(&(&k2).into());
let ss2 = k2.shared_secret(&(&k1).into());
// This assertion is not strictly necessary as it will be checked below implicitly.
assert_eq!(ss1.as_slice(), ss2.as_slice());
let data = b"dsafjiqjo23 u2953u8 3oid fjo321j";
let mut pb = PacketBuffer::new();
pb.buffer_mut()[..data.len()].copy_from_slice(data);
pb.set_size(data.len());
let res = ss1.encrypt(pb);
let original = ss2.decrypt(res).expect("Decryption works");
assert_eq!(&*original, &data[..]);
}
#[test]
/// Test if PacketBufferHeaderMut actually modifies the PacketBuffer storage.
fn modify_header() {
let mut pb = PacketBuffer::new();
let mut header = pb.header_mut();
header[0] = 1;
header[1] = 2;
header[2] = 3;
header[3] = 4;
assert_eq!(pb.buf[..DATA_HEADER_SIZE], [1, 2, 3, 4]);
}
#[test]
/// Verify [`PacketBuffer::buffer`] and [`PacketBuffer::buffer_mut`] actually have the
/// appropriate size.
fn buffer_mapping() {
let mut pb = PacketBuffer::new();
assert_eq!(pb.buffer().len(), super::PACKET_SIZE);
assert_eq!(pb.buffer_mut().len(), super::PACKET_SIZE);
}
}

View File

@@ -0,0 +1,487 @@
use std::net::{IpAddr, Ipv6Addr};
use etherparse::{
icmpv6::{DestUnreachableCode, TimeExceededCode},
Icmpv6Type, PacketBuilder,
};
use futures::{Sink, SinkExt, Stream, StreamExt};
use tokio::sync::mpsc::UnboundedReceiver;
use tracing::{debug, error, trace, warn};
use crate::{crypto::PacketBuffer, metrics::Metrics, packet::DataPacket, router::Router};
/// Current version of the user data header.
const USER_DATA_VERSION: u8 = 1;
/// Type value indicating L3 data in the user data header.
const USER_DATA_L3_TYPE: u8 = 0;
/// Type value indicating a user message in the data header.
const USER_DATA_MESSAGE_TYPE: u8 = 1;
/// Type value indicating an ICMP packet not returned as regular IPv6 traffic. This is needed when
/// intermediate nodes send back icmp data, as the original data is encrypted.
const USER_DATA_OOB_ICMP: u8 = 2;
/// Minimum size in bytes of an IPv6 header.
const IPV6_MIN_HEADER_SIZE: usize = 40;
/// Size of an ICMPv6 header.
const ICMP6_HEADER_SIZE: usize = 8;
/// Minimum MTU for IPV6 according to https://www.rfc-editor.org/rfc/rfc8200#section-5.
/// For ICMP, the packet must not be greater than this value. This is specified in
/// https://datatracker.ietf.org/doc/html/rfc4443#section-2.4, section (c).
const MIN_IPV6_MTU: usize = 1280;
/// Mask applied to the first byte of an IP header to extract the version.
const IP_VERSION_MASK: u8 = 0b1111_0000;
/// Version byte of an IP header indicating IPv6. Since the version is only 4 bits, the lower bits
/// must be masked first.
const IPV6_VERSION_BYTE: u8 = 0b0110_0000;
/// Default hop limit for message packets. For now this is set to 64 hops.
///
/// For regular l3 packets, we copy the hop limit from the packet itself. We can't do that here, so
/// 64 is used as sane default.
const MESSAGE_HOP_LIMIT: u8 = 64;
/// The DataPlane manages forwarding/receiving of local data packets to the [`Router`], and the
/// encryption/decryption of them.
///
/// DataPlane itself can be cloned, but this is not cheap on the router and should be avoided.
pub struct DataPlane<M> {
router: Router<M>,
}
impl<M> DataPlane<M>
where
M: Metrics + Clone + Send + 'static,
{
/// Create a new `DataPlane` using the given [`Router`] for packet handling.
///
/// `l3_packet_stream` is a stream of l3 packets from the host, usually read from a TUN interface.
/// `l3_packet_sink` is a sink for l3 packets received from a romte, usually send to a TUN interface,
pub fn new<S, T, U>(
router: Router<M>,
l3_packet_stream: S,
l3_packet_sink: T,
message_packet_sink: U,
host_packet_source: UnboundedReceiver<DataPacket>,
) -> Self
where
S: Stream<Item = Result<PacketBuffer, std::io::Error>> + Send + Unpin + 'static,
T: Sink<PacketBuffer> + Clone + Send + Unpin + 'static,
T::Error: std::fmt::Display,
U: Sink<(PacketBuffer, IpAddr, IpAddr)> + Send + Unpin + 'static,
U::Error: std::fmt::Display,
{
let dp = Self { router };
tokio::spawn(
dp.clone()
.inject_l3_packet_loop(l3_packet_stream, l3_packet_sink.clone()),
);
tokio::spawn(dp.clone().extract_packet_loop(
l3_packet_sink,
message_packet_sink,
host_packet_source,
));
dp
}
/// Get a reference to the [`Router`] used.
pub fn router(&self) -> &Router<M> {
&self.router
}
async fn inject_l3_packet_loop<S, T>(self, mut l3_packet_stream: S, mut l3_packet_sink: T)
where
// TODO: no result
// TODO: should IP extraction be handled higher up?
S: Stream<Item = Result<PacketBuffer, std::io::Error>> + Send + Unpin + 'static,
T: Sink<PacketBuffer> + Clone + Send + Unpin + 'static,
T::Error: std::fmt::Display,
{
let node_subnet = self.router.node_tun_subnet();
while let Some(packet) = l3_packet_stream.next().await {
let mut packet = match packet {
Err(e) => {
error!("Failed to read packet from TUN interface {e}");
continue;
}
Ok(packet) => packet,
};
trace!("Received packet from tun");
// Parse an IPv6 header. We don't care about the full header in reality. What we want
// to know is:
// - This is an IPv6 header
// - Hop limit
// - Source address
// - Destination address
// This translates to the following requirements:
// - at least 40 bytes of data, as that is the minimum size of an IPv6 header
// - first 4 bits (version) are the constant 6 (0b0110)
// - src is byte 9-24 (8-23 0 indexed).
// - dst is byte 25-40 (24-39 0 indexed).
if packet.len() < IPV6_MIN_HEADER_SIZE {
trace!("Packet can't contain an IPv6 header");
continue;
}
if packet[0] & IP_VERSION_MASK != IPV6_VERSION_BYTE {
trace!("Packet is not IPv6");
continue;
}
let hop_limit = u8::from_be_bytes([packet[7]]);
let src_ip = Ipv6Addr::from(
<&[u8] as TryInto<[u8; 16]>>::try_into(&packet[8..24])
.expect("Static range bounds on slice are correct length"),
);
let dst_ip = Ipv6Addr::from(
<&[u8] as TryInto<[u8; 16]>>::try_into(&packet[24..40])
.expect("Static range bounds on slice are correct length"),
);
// If this is a packet for our own Subnet, it means there is no local configuration for
// the destination ip or /64 subnet, and the IP is unreachable
if node_subnet.contains_ip(dst_ip.into()) {
trace!(
"Replying to local packet for unexisting address: {}",
dst_ip
);
let mut icmp_packet = PacketBuffer::new();
let host = self.router.node_public_key().address().octets();
let icmp = PacketBuilder::ipv6(host, src_ip.octets(), 64).icmpv6(
Icmpv6Type::DestinationUnreachable(DestUnreachableCode::Address),
);
icmp_packet.set_size(icmp.size(packet.len().min(1280 - 48)));
let mut writer = &mut icmp_packet.buffer_mut()[..];
if let Err(e) = icmp.write(&mut writer, &packet[..packet.len().min(1280 - 48)]) {
error!("Failed to construct ICMP packet: {e}");
continue;
}
if let Err(e) = l3_packet_sink.send(icmp_packet).await {
error!("Failed to send ICMP packet to host: {e}");
}
continue;
}
trace!("Received packet from TUN with dest addr: {:?}", dst_ip);
// Check if the source address is part of 400::/7
let first_src_byte = src_ip.segments()[0] >> 8;
if !(0x04..0x06).contains(&first_src_byte) {
let mut icmp_packet = PacketBuffer::new();
let host = self.router.node_public_key().address().octets();
let icmp = PacketBuilder::ipv6(host, src_ip.octets(), 64).icmpv6(
Icmpv6Type::DestinationUnreachable(
DestUnreachableCode::SourceAddressFailedPolicy,
),
);
icmp_packet.set_size(icmp.size(packet.len().min(1280 - 48)));
let mut writer = &mut icmp_packet.buffer_mut()[..];
if let Err(e) = icmp.write(&mut writer, &packet[..packet.len().min(1280 - 48)]) {
error!("Failed to construct ICMP packet: {e}");
continue;
}
if let Err(e) = l3_packet_sink.send(icmp_packet).await {
error!("Failed to send ICMP packet to host: {e}");
}
continue;
}
// No need to verify destination address, if it is not part of the global subnet there
// should not be a route for it, and therefore the route step will generate the
// appropriate ICMP.
let mut header = packet.header_mut();
header[0] = USER_DATA_VERSION;
header[1] = USER_DATA_L3_TYPE;
if let Some(icmp) = self.encrypt_and_route_packet(src_ip, dst_ip, hop_limit, packet) {
if let Err(e) = l3_packet_sink.send(icmp).await {
error!("Could not forward icmp packet back to TUN interface {e}");
}
}
}
warn!("Data inject loop from host to router ended");
}
/// Inject a new packet where the content is a `message` fragment.
pub fn inject_message_packet(
&self,
src_ip: Ipv6Addr,
dst_ip: Ipv6Addr,
mut packet: PacketBuffer,
) {
let mut header = packet.header_mut();
header[0] = USER_DATA_VERSION;
header[1] = USER_DATA_MESSAGE_TYPE;
self.encrypt_and_route_packet(src_ip, dst_ip, MESSAGE_HOP_LIMIT, packet);
}
/// Encrypt the content of a packet based on the destination key, and then inject the packet
/// into the [`Router`] for processing.
///
/// If no key exists for the destination, the content can'be encrypted, the packet is not injected
/// into the router, and a packet is returned containing an ICMP packet. Note that a return
/// value of [`Option::None`] does not mean the packet was successfully forwarded;
fn encrypt_and_route_packet(
&self,
src_ip: Ipv6Addr,
dst_ip: Ipv6Addr,
hop_limit: u8,
packet: PacketBuffer,
) -> Option<PacketBuffer> {
// If the packet only has a TTL of 1, we won't be able to route it to the destination
// regardless, so just reply with an unencrypted TTL exceeded ICMP.
if hop_limit < 2 {
debug!(
packet.ttl = hop_limit,
packet.src = %src_ip,
packet.dst = %dst_ip,
"Attempting to route packet with insufficient TTL",
);
let mut pb = PacketBuffer::new();
// From self to self
let icmp = PacketBuilder::ipv6(src_ip.octets(), src_ip.octets(), hop_limit)
.icmpv6(Icmpv6Type::TimeExceeded(TimeExceededCode::HopLimitExceeded));
// Scale to max size if needed
let orig_buf_end = packet
.buffer()
.len()
.min(MIN_IPV6_MTU - IPV6_MIN_HEADER_SIZE - ICMP6_HEADER_SIZE);
pb.set_size(icmp.size(orig_buf_end));
let mut b = pb.buffer_mut();
if let Err(e) = icmp.write(&mut b, &packet.buffer()[..orig_buf_end]) {
error!("Failed to construct time exceeded ICMP packet {e}");
return None;
}
return Some(pb);
}
// Get shared secret from node and dest address
let shared_secret = match self.router.get_shared_secret_if_selected(dst_ip.into()) {
Some(ss) => ss,
// If we don't have a route to the destination subnet, reply with ICMP no route to
// host. Do this here as well to avoid encrypting the ICMP to ourselves.
None => {
debug!(
packet.src = %src_ip,
packet.dst = %dst_ip,
"No entry found for destination address, dropping packet",
);
let mut pb = PacketBuffer::new();
// From self to self
let icmp = PacketBuilder::ipv6(src_ip.octets(), src_ip.octets(), hop_limit).icmpv6(
Icmpv6Type::DestinationUnreachable(DestUnreachableCode::NoRoute),
);
// Scale to max size if needed
let orig_buf_end = packet
.buffer()
.len()
.min(MIN_IPV6_MTU - IPV6_MIN_HEADER_SIZE - ICMP6_HEADER_SIZE);
pb.set_size(icmp.size(orig_buf_end));
let mut b = pb.buffer_mut();
if let Err(e) = icmp.write(&mut b, &packet.buffer()[..orig_buf_end]) {
error!("Failed to construct no route to host ICMP packet {e}");
return None;
}
return Some(pb);
}
};
self.router.route_packet(DataPacket {
dst_ip,
src_ip,
hop_limit,
raw_data: shared_secret.encrypt(packet),
});
None
}
async fn extract_packet_loop<T, U>(
self,
mut l3_packet_sink: T,
mut message_packet_sink: U,
mut host_packet_source: UnboundedReceiver<DataPacket>,
) where
T: Sink<PacketBuffer> + Send + Unpin + 'static,
T::Error: std::fmt::Display,
U: Sink<(PacketBuffer, IpAddr, IpAddr)> + Send + Unpin + 'static,
U::Error: std::fmt::Display,
{
while let Some(data_packet) = host_packet_source.recv().await {
// decrypt & send to TUN interface
let shared_secret = if let Some(ss) = self
.router
.get_shared_secret_from_dest(data_packet.src_ip.into())
{
ss
} else {
trace!("Received packet from unknown sender");
continue;
};
let mut decrypted_packet = match shared_secret.decrypt(data_packet.raw_data) {
Ok(data) => data,
Err(_) => {
debug!("Dropping data packet with invalid encrypted content");
continue;
}
};
// Check header
let header = decrypted_packet.header();
if header[0] != USER_DATA_VERSION {
trace!("Dropping decrypted packet with unknown header version");
continue;
}
// Route based on packet type.
match header[1] {
USER_DATA_L3_TYPE => {
let real_packet = decrypted_packet.buffer_mut();
if real_packet.len() < IPV6_MIN_HEADER_SIZE {
debug!(
"Decrypted packet is too short, can't possibly be a valid IPv6 packet"
);
continue;
}
// Adjust the hop limit in the decrypted packet to the new value.
real_packet[7] = data_packet.hop_limit;
if let Err(e) = l3_packet_sink.send(decrypted_packet).await {
error!("Failed to send packet on local TUN interface: {e}",);
continue;
}
}
USER_DATA_MESSAGE_TYPE => {
if let Err(e) = message_packet_sink
.send((
decrypted_packet,
IpAddr::V6(data_packet.src_ip),
IpAddr::V6(data_packet.dst_ip),
))
.await
{
error!("Failed to send packet to message handler: {e}",);
continue;
}
}
USER_DATA_OOB_ICMP => {
let real_packet = &*decrypted_packet;
if real_packet.len() < IPV6_MIN_HEADER_SIZE + ICMP6_HEADER_SIZE + 16 {
debug!(
"Decrypted packet is too short, can't possibly be a valid IPv6 ICMP packet"
);
continue;
}
if real_packet.len() > MIN_IPV6_MTU + 16 {
debug!("Discarding ICMP packet which is too large");
continue;
}
let dec_ip = Ipv6Addr::from(
<&[u8] as TryInto<[u8; 16]>>::try_into(&real_packet[..16]).unwrap(),
);
trace!("ICMP for original target {dec_ip}");
let key =
if let Some(key) = self.router.get_shared_secret_from_dest(dec_ip.into()) {
key
} else {
debug!("Can't decrypt OOB ICMP packet from unknown host");
continue;
};
let (_, body) = match etherparse::IpHeaders::from_slice(&real_packet[16..]) {
Ok(r) => r,
Err(e) => {
// This is a node which does not adhere to the protocol of sending back
// ICMP like this, or it is intentionally sending mallicious packets.
debug!(
"Dropping malformed OOB ICMP packet from {} for {e}",
data_packet.src_ip
);
continue;
}
};
let (header, body) = match etherparse::Icmpv6Header::from_slice(body.payload) {
Ok(r) => r,
Err(e) => {
// This is a node which does not adhere to the protocol of sending back
// ICMP like this, or it is intentionally sending mallicious packets.
debug!(
"Dropping OOB ICMP packet from {} with malformed ICMP header ({e})",
data_packet.src_ip
);
continue;
}
};
// Where are the leftover bytes coming from
let orig_pb = match key.decrypt(body[..body.len()].to_vec()) {
Ok(pb) => pb,
Err(e) => {
warn!("Failed to decrypt ICMP data body {e}");
continue;
}
};
let packet = etherparse::PacketBuilder::ipv6(
data_packet.src_ip.octets(),
data_packet.dst_ip.octets(),
data_packet.hop_limit,
)
.icmpv6(header.icmp_type);
let serialized_icmp = packet.size(orig_pb.len());
let mut rp = PacketBuffer::new();
rp.set_size(serialized_icmp);
if let Err(e) =
packet.write(&mut (&mut rp.buffer_mut()[..serialized_icmp]), &orig_pb)
{
error!("Could not reconstruct icmp packet {e}");
continue;
}
if let Err(e) = l3_packet_sink.send(rp).await {
error!("Failed to send packet on local TUN interface: {e}",);
continue;
}
}
_ => {
trace!("Dropping decrypted packet with unknown protocol type");
continue;
}
}
}
warn!("Extract loop from router to host ended");
}
}
impl<M> Clone for DataPlane<M>
where
M: Clone,
{
fn clone(&self) -> Self {
Self {
router: self.router.clone(),
}
}
}

View File

@@ -0,0 +1,116 @@
use std::{
fmt,
net::{AddrParseError, SocketAddr},
str::FromStr,
};
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, PartialEq, Eq)]
/// Error generated while processing improperly formatted endpoints.
pub enum EndpointParseError {
/// An address was specified without leading protocol information.
MissingProtocol,
/// An endpoint was specified using a protocol we (currently) do not understand.
UnknownProtocol,
/// Error while parsing the specific address.
Address(AddrParseError),
}
/// Protocol used by an endpoint.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum Protocol {
/// Standard plain text Tcp.
Tcp,
/// Tls 1.3 with PSK over Tcp.
Tls,
/// Quic protocol (over UDP).
Quic,
}
/// An endpoint defines a address and a protocol to use when communicating with it.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Endpoint {
proto: Protocol,
socket_addr: SocketAddr,
}
impl Endpoint {
/// Create a new `Endpoint` with given [`Protocol`] and address.
pub fn new(proto: Protocol, socket_addr: SocketAddr) -> Self {
Self { proto, socket_addr }
}
/// Get the [`Protocol`] used by this `Endpoint`.
pub fn proto(&self) -> Protocol {
self.proto
}
/// Get the [`SocketAddr`] used by this `Endpoint`.
pub fn address(&self) -> SocketAddr {
self.socket_addr
}
}
impl FromStr for Endpoint {
type Err = EndpointParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.split_once("://") {
None => Err(EndpointParseError::MissingProtocol),
Some((proto, socket)) => {
let proto = match proto.to_lowercase().as_str() {
"tcp" => Protocol::Tcp,
"quic" => Protocol::Quic,
"tls" => Protocol::Tls,
_ => return Err(EndpointParseError::UnknownProtocol),
};
let socket_addr = SocketAddr::from_str(socket)?;
Ok(Endpoint { proto, socket_addr })
}
}
}
}
impl fmt::Display for Endpoint {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_fmt(format_args!("{} {}", self.proto, self.socket_addr))
}
}
impl fmt::Display for Protocol {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
Self::Tcp => "Tcp",
Self::Tls => "Tls",
Self::Quic => "Quic",
})
}
}
impl fmt::Display for EndpointParseError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::MissingProtocol => f.write_str("missing leading protocol identifier"),
Self::UnknownProtocol => f.write_str("protocol for endpoint is not supported"),
Self::Address(e) => f.write_fmt(format_args!("failed to parse address: {e}")),
}
}
}
impl std::error::Error for EndpointParseError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
Self::Address(e) => Some(e),
_ => None,
}
}
}
impl From<AddrParseError> for EndpointParseError {
fn from(value: AddrParseError) -> Self {
Self::Address(value)
}
}

View File

@@ -0,0 +1,53 @@
use crate::{babel, subnet::Subnet};
/// This trait is used to filter incoming updates from peers. Only updates which pass all
/// configured filters on the local [`Router`](crate::router::Router) will actually be forwarded
/// to the [`Router`](crate::router::Router) for processing.
pub trait RouteUpdateFilter {
/// Judge an incoming update.
fn allow(&self, update: &babel::Update) -> bool;
}
/// Limit the subnet size of subnets announced in updates to be at most `N` bits. Note that "at
/// most" here means that the actual prefix length needs to be **AT LEAST** this value.
pub struct MaxSubnetSize<const N: u8>;
impl<const N: u8> RouteUpdateFilter for MaxSubnetSize<N> {
fn allow(&self, update: &babel::Update) -> bool {
update.subnet().prefix_len() >= N
}
}
/// Limit the subnet announced to be included in the given subnet.
pub struct AllowedSubnet {
subnet: Subnet,
}
impl AllowedSubnet {
/// Create a new `AllowedSubnet` filter, which only allows updates who's `Subnet` is contained
/// in the given `Subnet`.
pub fn new(subnet: Subnet) -> Self {
Self { subnet }
}
}
impl RouteUpdateFilter for AllowedSubnet {
fn allow(&self, update: &babel::Update) -> bool {
self.subnet.contains_subnet(&update.subnet())
}
}
/// Limit the announced subnets to those which contain the derived IP from the `RouterId`.
///
/// Since retractions can be sent by any node to indicate they don't have a route for the subnet,
/// these are also allowed.
pub struct RouterIdOwnsSubnet;
impl RouteUpdateFilter for RouterIdOwnsSubnet {
fn allow(&self, update: &babel::Update) -> bool {
update.metric().is_infinite()
|| update
.subnet()
.contains_ip(update.router_id().to_pubkey().address().into())
}
}

View File

@@ -0,0 +1,38 @@
//! Dedicated logic for
//! [intervals](https://datatracker.ietf.org/doc/html/rfc8966#name-solving-starvation-sequenci).
use std::time::Duration;
/// An interval in the babel protocol.
///
/// Intervals represent a duration, and are expressed in centiseconds (0.01 second / 10
/// milliseconds). `Interval` implements [`From`] [`u16`] to create a new interval from a raw
/// value, and [`From`] [`Duration`] to create a new `Interval` from an existing [`Duration`].
/// There are also implementation to convert back to the aforementioned types. Note that in case of
/// duration, millisecond precision is lost.
#[derive(Debug, Clone)]
pub struct Interval(u16);
impl From<Duration> for Interval {
fn from(value: Duration) -> Self {
Interval((value.as_millis() / 10) as u16)
}
}
impl From<Interval> for Duration {
fn from(value: Interval) -> Self {
Duration::from_millis(value.0 as u64 * 10)
}
}
impl From<u16> for Interval {
fn from(value: u16) -> Self {
Interval(value)
}
}
impl From<Interval> for u16 {
fn from(value: Interval) -> Self {
value.0
}
}

View File

@@ -0,0 +1,462 @@
use std::net::{IpAddr, Ipv6Addr};
use std::path::PathBuf;
#[cfg(feature = "message")]
use std::{future::Future, time::Duration};
use crate::cdn::Cdn;
use crate::tun::TunConfig;
use bytes::BytesMut;
use data::DataPlane;
use endpoint::Endpoint;
#[cfg(feature = "message")]
use message::TopicConfig;
#[cfg(feature = "message")]
use message::{
MessageId, MessageInfo, MessagePushResponse, MessageStack, PushMessageError, ReceivedMessage,
};
use metrics::Metrics;
use peer_manager::{PeerExists, PeerNotFound, PeerStats, PrivateNetworkKey};
use routing_table::{NoRouteSubnet, QueriedSubnet, RouteEntry};
use subnet::Subnet;
use tokio::net::TcpListener;
use tracing::{error, info, warn};
mod babel;
pub mod cdn;
mod connection;
pub mod crypto;
pub mod data;
pub mod endpoint;
pub mod filters;
mod interval;
#[cfg(feature = "message")]
pub mod message;
mod metric;
pub mod metrics;
pub mod packet;
mod peer;
pub mod peer_manager;
pub mod router;
mod router_id;
mod routing_table;
mod rr_cache;
mod seqno_cache;
mod sequence_number;
mod source_table;
pub mod subnet;
pub mod task;
mod tun;
/// The prefix of the global subnet used.
pub const GLOBAL_SUBNET_ADDRESS: IpAddr = IpAddr::V6(Ipv6Addr::new(0x400, 0, 0, 0, 0, 0, 0, 0));
/// The prefix length of the global subnet used.
pub const GLOBAL_SUBNET_PREFIX_LEN: u8 = 7;
/// Config for a mycelium [`Node`].
pub struct Config<M> {
/// The secret key of the node.
pub node_key: crypto::SecretKey,
/// Statically configured peers.
pub peers: Vec<Endpoint>,
/// Tun interface should be disabled.
pub no_tun: bool,
/// Listen port for TCP connections.
pub tcp_listen_port: u16,
/// Listen port for Quic connections.
pub quic_listen_port: Option<u16>,
/// Udp port for peer discovery.
pub peer_discovery_port: Option<u16>,
/// Name for the TUN device.
#[cfg(any(
target_os = "linux",
all(target_os = "macos", not(feature = "mactunfd")),
target_os = "windows"
))]
pub tun_name: String,
/// Configuration for a private network, if run in that mode. To enable private networking,
/// this must be a name + a PSK.
pub private_network_config: Option<(String, PrivateNetworkKey)>,
/// Implementation of the `Metrics` trait, used to expose information about the system
/// internals.
pub metrics: M,
/// Mark that's set on all packets that we send on the underlying network
pub firewall_mark: Option<u32>,
// tun_fd is android, iOS, macos on appstore specific option
// We can't create TUN device from the Rust code in android, iOS, and macos on appstore.
// So, we create the TUN device on Kotlin(android) or Swift(iOS, macos) then pass
// the TUN's file descriptor to mycelium.
#[cfg(any(
target_os = "android",
target_os = "ios",
all(target_os = "macos", feature = "mactunfd"),
))]
pub tun_fd: Option<i32>,
/// The maount of worker tasks spawned to process updates. Up to this amound of updates can be
/// processed in parallel. Because processing an update is a CPU bound task, it is pointless to
/// set this to a value which is higher than the amount of logical CPU cores available to the
/// system.
pub update_workers: usize,
pub cdn_cache: Option<PathBuf>,
/// Configuration for message topics, if this is not set the default config will be used.
#[cfg(feature = "message")]
pub topic_config: Option<TopicConfig>,
}
/// The Node is the main structure in mycelium. It governs the entire data flow.
pub struct Node<M> {
router: router::Router<M>,
peer_manager: peer_manager::PeerManager<M>,
_cdn: Option<Cdn>,
#[cfg(feature = "message")]
message_stack: message::MessageStack<M>,
}
/// General info about a node.
pub struct NodeInfo {
/// The overlay subnet in use by the node.
pub node_subnet: Subnet,
/// The public key of the node
pub node_pubkey: crypto::PublicKey,
}
impl<M> Node<M>
where
M: Metrics + Clone + Send + Sync + 'static,
{
/// Setup a new `Node` with the provided [`Config`].
pub async fn new(config: Config<M>) -> Result<Self, Box<dyn std::error::Error>> {
// If a private network is configured, validate network name
if let Some((net_name, _)) = &config.private_network_config {
if net_name.len() < 2 || net_name.len() > 64 {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidInput,
"network name must be between 2 and 64 characters",
)
.into());
}
}
let node_pub_key = crypto::PublicKey::from(&config.node_key);
let node_addr = node_pub_key.address();
let (tun_tx, tun_rx) = tokio::sync::mpsc::unbounded_channel();
let node_subnet = Subnet::new(
// Truncate last 64 bits of address.
// TODO: find a better way to do this.
Subnet::new(node_addr.into(), 64)
.expect("64 is a valid IPv6 prefix size; qed")
.network(),
64,
)
.expect("64 is a valid IPv6 prefix size; qed");
// Creating a new Router instance
let router = match router::Router::new(
config.update_workers,
tun_tx,
node_subnet,
vec![node_subnet],
(config.node_key, node_pub_key),
vec![
Box::new(filters::AllowedSubnet::new(
Subnet::new(GLOBAL_SUBNET_ADDRESS, GLOBAL_SUBNET_PREFIX_LEN)
.expect("Global subnet is properly defined; qed"),
)),
Box::new(filters::MaxSubnetSize::<64>),
Box::new(filters::RouterIdOwnsSubnet),
],
config.metrics.clone(),
) {
Ok(router) => {
info!(
"Router created. Pubkey: {:x}",
BytesMut::from(&router.node_public_key().as_bytes()[..])
);
router
}
Err(e) => {
error!("Error creating router: {e}");
panic!("Error creating router: {e}");
}
};
// Creating a new PeerManager instance
let pm = peer_manager::PeerManager::new(
router.clone(),
config.peers,
config.tcp_listen_port,
config.quic_listen_port,
config.peer_discovery_port.unwrap_or_default(),
config.peer_discovery_port.is_none(),
config.private_network_config,
config.metrics,
config.firewall_mark,
)?;
info!("Started peer manager");
#[cfg(feature = "message")]
let (tx, rx) = tokio::sync::mpsc::channel(100);
#[cfg(feature = "message")]
let msg_receiver = tokio_stream::wrappers::ReceiverStream::new(rx);
#[cfg(feature = "message")]
let msg_sender = tokio_util::sync::PollSender::new(tx);
#[cfg(not(feature = "message"))]
let msg_sender = futures::sink::drain();
let _data_plane = if config.no_tun {
warn!("Starting data plane without TUN interface, L3 functionality disabled");
DataPlane::new(
router.clone(),
// No tun so create a dummy stream for L3 packets which never yields
tokio_stream::pending(),
// Similarly, create a sink which just discards every packet we would receive
futures::sink::drain(),
msg_sender,
tun_rx,
)
} else {
#[cfg(not(any(
target_os = "linux",
target_os = "macos",
target_os = "windows",
target_os = "android",
target_os = "ios"
)))]
{
panic!("On this platform, you can only run with --no-tun");
}
#[cfg(any(
target_os = "linux",
target_os = "macos",
target_os = "windows",
target_os = "android",
target_os = "ios"
))]
{
#[cfg(any(
target_os = "linux",
all(target_os = "macos", not(feature = "mactunfd")),
target_os = "windows"
))]
let tun_config = TunConfig {
name: config.tun_name.clone(),
node_subnet: Subnet::new(node_addr.into(), 64)
.expect("64 is a valid subnet size for IPv6; qed"),
route_subnet: Subnet::new(GLOBAL_SUBNET_ADDRESS, GLOBAL_SUBNET_PREFIX_LEN)
.expect("Static configured TUN route is valid; qed"),
};
#[cfg(any(
target_os = "android",
target_os = "ios",
all(target_os = "macos", feature = "mactunfd"),
))]
let tun_config = TunConfig {
tun_fd: config.tun_fd.unwrap(),
};
let (rxhalf, txhalf) = tun::new(tun_config).await?;
info!("Node overlay IP: {node_addr}");
DataPlane::new(router.clone(), rxhalf, txhalf, msg_sender, tun_rx)
}
};
let cdn = config.cdn_cache.map(Cdn::new);
if let Some(ref cdn) = cdn {
let listener = TcpListener::bind("localhost:80").await?;
cdn.start(listener)?;
}
#[cfg(feature = "message")]
let ms = MessageStack::new(_data_plane, msg_receiver, config.topic_config);
Ok(Node {
router,
peer_manager: pm,
_cdn: cdn,
#[cfg(feature = "message")]
message_stack: ms,
})
}
/// Get information about the running `Node`
pub fn info(&self) -> NodeInfo {
NodeInfo {
node_subnet: self.router.node_tun_subnet(),
node_pubkey: self.router.node_public_key(),
}
}
/// Get information about the current peers in the `Node`
pub fn peer_info(&self) -> Vec<PeerStats> {
self.peer_manager.peers()
}
/// Add a new peer to the system identified by an [`Endpoint`].
pub fn add_peer(&self, endpoint: Endpoint) -> Result<(), PeerExists> {
self.peer_manager.add_peer(endpoint)
}
/// Remove an existing peer identified by an [`Endpoint`] from the system.
pub fn remove_peer(&self, endpoint: Endpoint) -> Result<(), PeerNotFound> {
self.peer_manager.delete_peer(&endpoint)
}
/// List all selected [`routes`](RouteEntry) in the system.
pub fn selected_routes(&self) -> Vec<RouteEntry> {
self.router.load_selected_routes()
}
/// List all fallback [`routes`](RouteEntry) in the system.
pub fn fallback_routes(&self) -> Vec<RouteEntry> {
self.router.load_fallback_routes()
}
/// List all [`queried subnets`](QueriedSubnet) in the system.
pub fn queried_subnets(&self) -> Vec<QueriedSubnet> {
self.router.load_queried_subnets()
}
/// List all [`subnets with no route`](NoRouteSubnet) in the system.
pub fn no_route_entries(&self) -> Vec<NoRouteSubnet> {
self.router.load_no_route_entries()
}
/// Get public key from the IP of `Node`
pub fn get_pubkey_from_ip(&self, ip: IpAddr) -> Option<crypto::PublicKey> {
self.router.get_pubkey(ip)
}
}
#[cfg(feature = "message")]
impl<M> Node<M>
where
M: Metrics + Clone + Send + 'static,
{
/// Wait for a messsage to arrive in the message stack.
///
/// An the optional `topic` is provided, only messages which have exactly the same value in
/// `topic` will be returned. The `pop` argument decides if the message is removed from the
/// internal queue or not. If `pop` is `false`, the same message will be returned on the next
/// call (with the same topic).
///
/// This method returns a future which will wait indefinitely until a message is received. It
/// is generally a good idea to put a limit on how long to wait by wrapping this in a [`tokio::time::timeout`].
pub fn get_message(
&self,
pop: bool,
topic: Option<Vec<u8>>,
) -> impl Future<Output = ReceivedMessage> + '_ {
// First reborrow only the message stack from self, then manually construct a future. This
// avoids a lifetime issue on the router, which is not sync. If a regular 'async' fn would
// be used here, we can't specify that at this point sadly.
let ms = &self.message_stack;
async move { ms.message(pop, topic).await }
}
/// Push a new message to the message stack.
///
/// The system will attempt to transmit the message for `try_duration`. A message is considered
/// transmitted when the receiver has indicated it completely received the message. If
/// `subscribe_reply` is `true`, the second return value will be [`Option::Some`], with a
/// watcher which will resolve if a reply for this exact message comes in. Since this relies on
/// the receiver actually sending a reply, ther is no guarantee that this will eventually
/// resolve.
pub fn push_message(
&self,
dst: IpAddr,
data: Vec<u8>,
topic: Option<Vec<u8>>,
try_duration: Duration,
subscribe_reply: bool,
) -> Result<MessagePushResponse, PushMessageError> {
self.message_stack.new_message(
dst,
data,
topic.unwrap_or_default(),
try_duration,
subscribe_reply,
)
}
/// Get the status of a message sent previously.
///
/// Returns [`Option::None`] if no message is found with the given id. Message info is only
/// retained for a limited time after a message has been received, or after the message has
/// been aborted due to a timeout.
pub fn message_status(&self, id: MessageId) -> Option<MessageInfo> {
self.message_stack.message_info(id)
}
/// Send a reply to a previously received message.
pub fn reply_message(
&self,
id: MessageId,
dst: IpAddr,
data: Vec<u8>,
try_duration: Duration,
) -> MessageId {
self.message_stack
.reply_message(id, dst, data, try_duration)
}
/// Get a list of all configured topics
pub fn topics(&self) -> Vec<Vec<u8>> {
self.message_stack.topics()
}
pub fn topic_allowed_sources(&self, topic: &Vec<u8>) -> Option<Vec<Subnet>> {
self.message_stack.topic_allowed_sources(topic)
}
/// Sets the default topic action to accept or reject. This decides how topics which don't have
/// an explicit whitelist get handled.
pub fn accept_unconfigured_topic(&self, accept: bool) {
self.message_stack.set_default_topic_action(accept)
}
/// Whether a topic without default configuration is accepted or not.
pub fn unconfigure_topic_action(&self) -> bool {
self.message_stack.get_default_topic_action()
}
/// Add a topic to the whitelist without any configured allowed sources.
pub fn add_topic_whitelist(&self, topic: Vec<u8>) {
self.message_stack.add_topic_whitelist(topic)
}
/// Remove a topic from the whitelist. Future messages will follow the default action.
pub fn remove_topic_whitelist(&self, topic: Vec<u8>) {
self.message_stack.remove_topic_whitelist(topic)
}
/// Add a new whitelisted source for a topic. This creates the topic if it does not exist yet.
pub fn add_topic_whitelist_src(&self, topic: Vec<u8>, src: Subnet) {
self.message_stack.add_topic_whitelist_src(topic, src)
}
/// Remove a whitelisted source for a topic.
pub fn remove_topic_whitelist_src(&self, topic: Vec<u8>, src: Subnet) {
self.message_stack.remove_topic_whitelist_src(topic, src)
}
/// Set the forward socket for a topic. Creates the topic if it doesn't exist.
pub fn set_topic_forward_socket(&self, topic: Vec<u8>, socket_path: std::path::PathBuf) {
self.message_stack
.set_topic_forward_socket(topic, Some(socket_path))
}
/// Get the forward socket for a topic, if any.
pub fn get_topic_forward_socket(&self, topic: &Vec<u8>) -> Option<std::path::PathBuf> {
self.message_stack.get_topic_forward_socket(topic)
}
/// Removes the forward socket for the topic, if one exists
pub fn delete_topic_forward_socket(&self, topic: Vec<u8>) {
self.message_stack.set_topic_forward_socket(topic, None)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,254 @@
use std::fmt;
use super::MessagePacket;
/// A message representing a "chunk" message.
///
/// The body of a chunk message has the following structure:
/// - 8 bytes: chunk index
/// - 8 bytes: chunk offset
/// - 8 bytes: chunk size
/// - remainder: chunk data of length based on field 3
pub struct MessageChunk {
buffer: MessagePacket,
}
impl MessageChunk {
/// Create a new `MessageChunk` in the provided [`MessagePacket`].
pub fn new(mut buffer: MessagePacket) -> Self {
buffer.set_used_buffer_size(24);
buffer.header_mut().flags_mut().set_chunk();
Self { buffer }
}
/// Return the index of the chunk in the message, as written in the body.
pub fn chunk_idx(&self) -> u64 {
u64::from_be_bytes(
self.buffer.buffer()[..8]
.try_into()
.expect("Buffer contains a size field of valid length; qed"),
)
}
/// Set the index of the chunk in the message body.
pub fn set_chunk_idx(&mut self, chunk_idx: u64) {
self.buffer.buffer_mut()[..8].copy_from_slice(&chunk_idx.to_be_bytes())
}
/// Return the chunk offset in the message, as written in the body.
pub fn chunk_offset(&self) -> u64 {
u64::from_be_bytes(
self.buffer.buffer()[8..16]
.try_into()
.expect("Buffer contains a size field of valid length; qed"),
)
}
/// Set the offset of the chunk in the message body.
pub fn set_chunk_offset(&mut self, chunk_offset: u64) {
self.buffer.buffer_mut()[8..16].copy_from_slice(&chunk_offset.to_be_bytes())
}
/// Return the size of the chunk in the message, as written in the body.
pub fn chunk_size(&self) -> u64 {
// Shield against a corrupt value.
u64::min(
u64::from_be_bytes(
self.buffer.buffer()[16..24]
.try_into()
.expect("Buffer contains a size field of valid length; qed"),
),
self.buffer.buffer().len() as u64 - 24,
)
}
/// Set the size of the chunk in the message body.
pub fn set_chunk_size(&mut self, chunk_size: u64) {
self.buffer.buffer_mut()[16..24].copy_from_slice(&chunk_size.to_be_bytes())
}
/// Return a reference to the chunk data in the message.
pub fn data(&self) -> &[u8] {
&self.buffer.buffer()[24..24 + self.chunk_size() as usize]
}
/// Set the chunk data in this message. This will also set the size field to the proper value.
pub fn set_chunk_data(&mut self, data: &[u8]) -> Result<(), InsufficientChunkSpace> {
let buf = self.buffer.buffer_mut();
let available_space = buf.len() - 24;
if data.len() > available_space {
return Err(InsufficientChunkSpace {
available: available_space,
needed: data.len(),
});
}
// Slicing based on data.len() is fine here as we just checked to make sure we can handle
// this capacity.
buf[24..24 + data.len()].copy_from_slice(data);
self.set_chunk_size(data.len() as u64);
// Also set the extra space used by the buffer on the underlying packet.
self.buffer.set_used_buffer_size(24 + data.len());
Ok(())
}
/// Convert the `MessageChunk` into a reply. This does nothing if it is already a reply.
pub fn into_reply(mut self) -> Self {
self.buffer.header_mut().flags_mut().set_ack();
// We want to leave the length field in tact but don't want to copy the data in the reply.
// This needs additional work on the underlying buffer.
// TODO
self
}
/// Consumes this `MessageChunk`, returning the underlying [`MessagePacket`].
pub fn into_inner(self) -> MessagePacket {
self.buffer
}
}
/// An error indicating not enough space is availbe in a message to set the chunk data.
#[derive(Debug)]
pub struct InsufficientChunkSpace {
/// Amount of space available in the chunk.
pub available: usize,
/// Amount of space needed to set the chunk data
pub needed: usize,
}
impl fmt::Display for InsufficientChunkSpace {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"Insufficient capacity available, needed {} bytes, have {} bytes",
self.needed, self.available
)
}
}
impl std::error::Error for InsufficientChunkSpace {}
#[cfg(test)]
mod tests {
use std::array;
use crate::{crypto::PacketBuffer, message::MessagePacket};
use super::MessageChunk;
#[test]
fn chunk_flag_set() {
let mc = MessageChunk::new(MessagePacket::new(PacketBuffer::new()));
let mp = mc.into_inner();
assert!(mp.header().flags().chunk());
}
#[test]
fn read_chunk_idx() {
let mut pb = PacketBuffer::new();
pb.buffer_mut()[12..20].copy_from_slice(&[0, 0, 0, 0, 0, 0, 100, 73]);
let ms = MessageChunk::new(MessagePacket::new(pb));
assert_eq!(ms.chunk_idx(), 25_673);
}
#[test]
fn write_chunk_idx() {
let mut ms = MessageChunk::new(MessagePacket::new(PacketBuffer::new()));
ms.set_chunk_idx(723);
// Since we don't work with packet buffer we don't have to account for the message packet
// header.
assert_eq!(&ms.buffer.buffer()[..8], &[0, 0, 0, 0, 0, 0, 2, 211]);
assert_eq!(ms.chunk_idx(), 723);
}
#[test]
fn read_chunk_offset() {
let mut pb = PacketBuffer::new();
pb.buffer_mut()[20..28].copy_from_slice(&[0, 0, 0, 0, 0, 20, 40, 60]);
let ms = MessageChunk::new(MessagePacket::new(pb));
assert_eq!(ms.chunk_offset(), 1_321_020);
}
#[test]
fn write_chunk_offset() {
let mut ms = MessageChunk::new(MessagePacket::new(PacketBuffer::new()));
ms.set_chunk_offset(1_000_000);
// Since we don't work with packet buffer we don't have to account for the message packet
// header.
assert_eq!(&ms.buffer.buffer()[8..16], &[0, 0, 0, 0, 0, 15, 66, 64]);
assert_eq!(ms.chunk_offset(), 1_000_000);
}
#[test]
fn read_chunk_size() {
let mut pb = PacketBuffer::new();
pb.buffer_mut()[28..36].copy_from_slice(&[0, 0, 0, 0, 0, 0, 3, 232]);
let ms = MessageChunk::new(MessagePacket::new(pb));
assert_eq!(ms.chunk_size(), 1_000);
}
#[test]
fn write_chunk_size() {
let mut ms = MessageChunk::new(MessagePacket::new(PacketBuffer::new()));
ms.set_chunk_size(1_300);
// Since we don't work with packet buffer we don't have to account for the message packet
// header.
assert_eq!(&ms.buffer.buffer()[16..24], &[0, 0, 0, 0, 0, 0, 5, 20]);
assert_eq!(ms.chunk_size(), 1_300);
}
#[test]
fn read_chunk_data() {
const CHUNK_DATA: &[u8] = &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let mut pb = PacketBuffer::new();
// Set data len
pb.buffer_mut()[28..36].copy_from_slice(&CHUNK_DATA.len().to_be_bytes());
pb.buffer_mut()[36..36 + CHUNK_DATA.len()].copy_from_slice(CHUNK_DATA);
let ms = MessageChunk::new(MessagePacket::new(pb));
assert_eq!(ms.chunk_size(), 16);
assert_eq!(ms.data(), CHUNK_DATA);
}
#[test]
fn write_chunk_data() {
const CHUNK_DATA: &[u8] = &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let mut ms = MessageChunk::new(MessagePacket::new(PacketBuffer::new()));
let res = ms.set_chunk_data(CHUNK_DATA);
assert!(res.is_ok());
// Since we don't work with packet buffer we don't have to account for the message packet
// header.
// Check and make sure size is properly set.
assert_eq!(&ms.buffer.buffer()[16..24], &[0, 0, 0, 0, 0, 0, 0, 16]);
assert_eq!(ms.chunk_size(), 16);
assert_eq!(ms.data(), CHUNK_DATA);
}
#[test]
fn write_chunk_data_oversized() {
let data: [u8; 1500] = array::from_fn(|_| 0xFF);
let mut ms = MessageChunk::new(MessagePacket::new(PacketBuffer::new()));
let res = ms.set_chunk_data(&data);
assert!(res.is_err());
}
}

View File

@@ -0,0 +1,131 @@
use super::{MessageChecksum, MessagePacket, MESSAGE_CHECKSUM_LENGTH};
/// A message representing a "done" message.
///
/// The body of a done message has the following structure:
/// - 8 bytes: chunks transmitted
/// - 32 bytes: checksum of the transmitted data
pub struct MessageDone {
buffer: MessagePacket,
}
impl MessageDone {
/// Create a new `MessageDone` in the provided [`MessagePacket`].
pub fn new(mut buffer: MessagePacket) -> Self {
buffer.set_used_buffer_size(40);
buffer.header_mut().flags_mut().set_done();
Self { buffer }
}
/// Return the amount of chunks in the message, as written in the body.
pub fn chunk_count(&self) -> u64 {
u64::from_be_bytes(
self.buffer.buffer()[..8]
.try_into()
.expect("Buffer contains a size field of valid length; qed"),
)
}
/// Set the amount of chunks field of the message body.
pub fn set_chunk_count(&mut self, chunk_count: u64) {
self.buffer.buffer_mut()[..8].copy_from_slice(&chunk_count.to_be_bytes())
}
/// Get the checksum of the message from the body.
pub fn checksum(&self) -> MessageChecksum {
MessageChecksum::from_bytes(
self.buffer.buffer()[8..8 + MESSAGE_CHECKSUM_LENGTH]
.try_into()
.expect("Buffer contains enough data for a checksum; qed"),
)
}
/// Set the checksum of the message in the body.
pub fn set_checksum(&mut self, checksum: MessageChecksum) {
self.buffer.buffer_mut()[8..8 + MESSAGE_CHECKSUM_LENGTH]
.copy_from_slice(checksum.as_bytes())
}
/// Convert the `MessageDone` into a reply. This does nothing if it is already a reply.
pub fn into_reply(mut self) -> Self {
self.buffer.header_mut().flags_mut().set_ack();
self
}
/// Consumes this `MessageDone`, returning the underlying [`MessagePacket`].
pub fn into_inner(self) -> MessagePacket {
self.buffer
}
}
#[cfg(test)]
mod tests {
use crate::{
crypto::PacketBuffer,
message::{MessageChecksum, MessagePacket},
};
use super::MessageDone;
#[test]
fn done_flag_set() {
let md = MessageDone::new(MessagePacket::new(PacketBuffer::new()));
let mp = md.into_inner();
assert!(mp.header().flags().done());
}
#[test]
fn read_chunk_count() {
let mut pb = PacketBuffer::new();
pb.buffer_mut()[12..20].copy_from_slice(&[0, 0, 0, 0, 0, 0, 73, 55]);
let ms = MessageDone::new(MessagePacket::new(pb));
assert_eq!(ms.chunk_count(), 18_743);
}
#[test]
fn write_chunk_count() {
let mut ms = MessageDone::new(MessagePacket::new(PacketBuffer::new()));
ms.set_chunk_count(10_000);
// Since we don't work with packet buffer we don't have to account for the message packet
// header.
assert_eq!(&ms.buffer.buffer()[..8], &[0, 0, 0, 0, 0, 0, 39, 16]);
assert_eq!(ms.chunk_count(), 10_000);
}
#[test]
fn read_checksum() {
const CHECKSUM: MessageChecksum = MessageChecksum::from_bytes([
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B,
0x1C, 0x1D, 0x1E, 0x1F,
]);
let mut pb = PacketBuffer::new();
pb.buffer_mut()[20..52].copy_from_slice(CHECKSUM.as_bytes());
let ms = MessageDone::new(MessagePacket::new(pb));
assert_eq!(ms.checksum(), CHECKSUM);
}
#[test]
fn write_checksum() {
const CHECKSUM: MessageChecksum = MessageChecksum::from_bytes([
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B,
0x1C, 0x1D, 0x1E, 0x1F,
]);
let mut ms = MessageDone::new(MessagePacket::new(PacketBuffer::new()));
ms.set_checksum(CHECKSUM);
// Since we don't work with packet buffer we don't have to account for the message packet
// header.
assert_eq!(&ms.buffer.buffer()[8..40], CHECKSUM.as_bytes());
assert_eq!(ms.checksum(), CHECKSUM);
}
}

View File

@@ -0,0 +1,101 @@
use super::MessagePacket;
/// A message representing an init message.
///
/// The body of an init message has the following structure:
/// - 8 bytes size
pub struct MessageInit {
buffer: MessagePacket,
}
impl MessageInit {
/// Create a new `MessageInit` in the provided [`MessagePacket`].
pub fn new(mut buffer: MessagePacket) -> Self {
buffer.set_used_buffer_size(9);
buffer.header_mut().flags_mut().set_init();
Self { buffer }
}
/// Return the length of the message, as written in the body.
pub fn length(&self) -> u64 {
u64::from_be_bytes(
self.buffer.buffer()[..8]
.try_into()
.expect("Buffer contains a size field of valid length; qed"),
)
}
/// Return the topic of the message, as written in the body.
pub fn topic(&self) -> &[u8] {
let topic_len = self.buffer.buffer()[8] as usize;
&self.buffer.buffer()[9..9 + topic_len]
}
/// Set the length field of the message body.
pub fn set_length(&mut self, length: u64) {
self.buffer.buffer_mut()[..8].copy_from_slice(&length.to_be_bytes())
}
/// Set the topic in the message body.
///
/// # Panics
///
/// This function panics if the topic is longer than 255 bytes.
pub fn set_topic(&mut self, topic: &[u8]) {
assert!(
topic.len() <= u8::MAX as usize,
"Topic can be 255 bytes long at most"
);
self.buffer.set_used_buffer_size(9 + topic.len());
self.buffer.buffer_mut()[8] = topic.len() as u8;
self.buffer.buffer_mut()[9..9 + topic.len()].copy_from_slice(topic);
}
/// Convert the `MessageInit` into a reply. This does nothing if it is already a reply.
pub fn into_reply(mut self) -> Self {
self.buffer.header_mut().flags_mut().set_ack();
self
}
/// Consumes this `MessageInit`, returning the underlying [`MessagePacket`].
pub fn into_inner(self) -> MessagePacket {
self.buffer
}
}
#[cfg(test)]
mod tests {
use crate::{crypto::PacketBuffer, message::MessagePacket};
use super::MessageInit;
#[test]
fn init_flag_set() {
let mi = MessageInit::new(MessagePacket::new(PacketBuffer::new()));
let mp = mi.into_inner();
assert!(mp.header().flags().init());
}
#[test]
fn read_length() {
let mut pb = PacketBuffer::new();
pb.buffer_mut()[12..20].copy_from_slice(&[0, 0, 0, 0, 2, 3, 4, 5]);
let ms = MessageInit::new(MessagePacket::new(pb));
assert_eq!(ms.length(), 33_752_069);
}
#[test]
fn write_length() {
let mut ms = MessageInit::new(MessagePacket::new(PacketBuffer::new()));
ms.set_length(3_432_634_632);
// Since we don't work with packet buffer we don't have to account for the message packet
// header.
assert_eq!(&ms.buffer.buffer()[..8], &[0, 0, 0, 0, 204, 153, 217, 8]);
assert_eq!(ms.length(), 3_432_634_632);
}
}

View File

@@ -0,0 +1,230 @@
use crate::subnet::Subnet;
use core::fmt;
use serde::{
de::{Deserialize, Deserializer, MapAccess, Visitor},
Deserialize as DeserializeMacro,
};
use std::collections::HashMap;
use std::path::PathBuf;
/// Configuration for a topic whitelist, including allowed subnets and optional forward socket
#[derive(Debug, Default, Clone)]
pub struct TopicWhitelistConfig {
/// Subnets that are allowed to send messages to this topic
subnets: Vec<Subnet>,
/// Optional Unix domain socket path to forward messages to
forward_socket: Option<PathBuf>,
}
impl TopicWhitelistConfig {
/// Create a new empty whitelist config
pub fn new() -> Self {
Self::default()
}
/// Get the list of whitelisted subnets
pub fn subnets(&self) -> &Vec<Subnet> {
&self.subnets
}
/// Get the forward socket path, if any
pub fn forward_socket(&self) -> Option<&PathBuf> {
self.forward_socket.as_ref()
}
/// Set the forward socket path
pub fn set_forward_socket(&mut self, path: Option<PathBuf>) {
self.forward_socket = path;
}
/// Add a subnet to the whitelist
pub fn add_subnet(&mut self, subnet: Subnet) {
self.subnets.push(subnet);
}
/// Remove a subnet from the whitelist
pub fn remove_subnet(&mut self, subnet: &Subnet) {
self.subnets.retain(|s| s != subnet);
}
}
#[derive(Debug, Default, Clone)]
pub struct TopicConfig {
/// The default action to to take if no acl is defined for a topic.
default: MessageAction,
/// Explicitly configured whitelists for topics. Ip's which aren't part of the whitelist will
/// not be allowed to send messages to that topic. If a topic is not in this map, the default
/// action will be used.
whitelist: HashMap<Vec<u8>, TopicWhitelistConfig>,
}
impl TopicConfig {
/// Get the [`default action`](MessageAction) if the topic is not configured.
pub fn default(&self) -> MessageAction {
self.default
}
/// Set the default [`action`](MessageAction) which does not have a whitelist configured.
pub fn set_default(&mut self, default: MessageAction) {
self.default = default;
}
/// Get the fully configured whitelist
pub fn whitelist(&self) -> &HashMap<Vec<u8>, TopicWhitelistConfig> {
&self.whitelist
}
/// Insert a new topic in the whitelist, without any configured allowed sources.
pub fn add_topic_whitelist(&mut self, topic: Vec<u8>) {
self.whitelist.entry(topic).or_default();
}
/// Set the forward socket for a topic. Does nothing if the topic doesn't exist.
pub fn set_topic_forward_socket(&mut self, topic: Vec<u8>, socket_path: Option<PathBuf>) {
self.whitelist
.entry(topic)
.and_modify(|c| c.set_forward_socket(socket_path));
}
/// Get the forward socket for a topic, if any.
pub fn get_topic_forward_socket(&self, topic: &Vec<u8>) -> Option<&PathBuf> {
self.whitelist
.get(topic)
.and_then(|config| config.forward_socket())
}
/// Remove a topic from the whitelist. Future messages will follow the default action.
pub fn remove_topic_whitelist(&mut self, topic: &Vec<u8>) {
self.whitelist.remove(topic);
}
/// Adds a new whitelisted source for a topic. This creates the topic if it does not exist yet.
pub fn add_topic_whitelist_src(&mut self, topic: Vec<u8>, src: Subnet) {
self.whitelist.entry(topic).or_default().add_subnet(src);
}
/// Removes a whitelisted source for a topic.
///
/// If the last source is removed for a topic, the entry remains, and must be cleared by calling
/// [`Self::remove_topic_whitelist`] to fall back to the default action. Note that an empty
/// whitelist effectively blocks all messages for a topic.
///
/// This does nothing if the topic does not exist.
pub fn remove_topic_whitelist_src(&mut self, topic: &Vec<u8>, src: Subnet) {
if let Some(whitelist_config) = self.whitelist.get_mut(topic) {
whitelist_config.remove_subnet(&src);
}
}
}
#[derive(Debug, Default, Clone, Copy, DeserializeMacro)]
pub enum MessageAction {
/// Accept the message
#[default]
Accept,
/// Reject the message
Reject,
}
// Helper function to parse a subnet from a string
fn parse_subnet_str<E>(s: &str) -> Result<Subnet, E>
where
E: serde::de::Error,
{
// Try to parse as a subnet (with prefix)
if let Ok(ipnet) = s.parse::<ipnet::IpNet>() {
return Subnet::new(ipnet.addr(), ipnet.prefix_len())
.map_err(|e| serde::de::Error::custom(format!("Invalid subnet prefix length: {e}")));
}
// Try to parse as an IP address (convert to /32 or /128 subnet)
if let Ok(ip) = s.parse::<std::net::IpAddr>() {
let prefix_len = match ip {
std::net::IpAddr::V4(_) => 32,
std::net::IpAddr::V6(_) => 128,
};
return Subnet::new(ip, prefix_len)
.map_err(|e| serde::de::Error::custom(format!("Invalid subnet prefix length: {e}")));
}
Err(serde::de::Error::custom(format!(
"Invalid subnet or IP address: {s}",
)))
}
// Define a struct for deserializing the whitelist config
#[derive(DeserializeMacro)]
struct WhitelistConfigData {
#[serde(default)]
subnets: Vec<String>,
#[serde(default)]
forward_socket: Option<String>,
}
// Add this implementation right after the TopicConfig struct definition
impl<'de> Deserialize<'de> for TopicConfig {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct TopicConfigVisitor;
impl<'de> Visitor<'de> for TopicConfigVisitor {
type Value = TopicConfig;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a topic configuration")
}
fn visit_map<V>(self, mut map: V) -> Result<TopicConfig, V::Error>
where
V: MapAccess<'de>,
{
let mut default = MessageAction::default();
let mut whitelist = HashMap::new();
while let Some(key) = map.next_key::<String>()? {
if key == "default" {
default = map.next_value()?;
} else {
// Try to parse as a WhitelistConfigData first
if let Ok(config_data) = map.next_value::<WhitelistConfigData>() {
let mut whitelist_config = TopicWhitelistConfig::default();
// Process subnets
for subnet_str in config_data.subnets {
let subnet = parse_subnet_str(&subnet_str)?;
whitelist_config.add_subnet(subnet);
}
// Process forward_socket
if let Some(socket_path) = config_data.forward_socket {
whitelist_config
.set_forward_socket(Some(PathBuf::from(socket_path)));
}
// Convert string key to Vec<u8>
whitelist.insert(key.into_bytes(), whitelist_config);
} else {
// Fallback to old format: just a list of subnets
let subnet_strs = map.next_value::<Vec<String>>()?;
let mut whitelist_config = TopicWhitelistConfig::default();
for subnet_str in subnet_strs {
let subnet = parse_subnet_str(&subnet_str)?;
whitelist_config.add_subnet(subnet);
}
// Convert string key to Vec<u8>
whitelist.insert(key.into_bytes(), whitelist_config);
}
}
}
Ok(TopicConfig { default, whitelist })
}
}
deserializer.deserialize_map(TopicConfigVisitor)
}
}

View File

@@ -0,0 +1,144 @@
//! Dedicated logic for
//! [metrics](https://datatracker.ietf.org/doc/html/rfc8966#metric-computation).
use core::fmt;
use std::ops::{Add, Sub};
/// Value of the infinite metric.
const METRIC_INFINITE: u16 = 0xFFFF;
/// A `Metric` is used to indicate the cost associated with a route. A lower Metric means a route
/// is more favorable.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd)]
pub struct Metric(u16);
impl Metric {
/// Create a new `Metric` with the given value.
pub const fn new(value: u16) -> Self {
Metric(value)
}
/// Creates a new infinite `Metric`.
pub const fn infinite() -> Self {
Metric(METRIC_INFINITE)
}
/// Checks if this metric indicates a retracted route.
pub const fn is_infinite(&self) -> bool {
self.0 == METRIC_INFINITE
}
/// Checks if this metric represents a directly connected route.
pub const fn is_direct(&self) -> bool {
self.0 == 0
}
/// Computes the absolute value of the difference between this and another `Metric`.
pub fn delta(&self, rhs: &Self) -> Metric {
Metric(if self > rhs {
self.0 - rhs.0
} else {
rhs.0 - self.0
})
}
}
impl fmt::Display for Metric {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.is_infinite() {
f.pad("Infinite")
} else {
f.write_fmt(format_args!("{}", self.0))
}
}
}
impl From<u16> for Metric {
fn from(value: u16) -> Self {
Metric(value)
}
}
impl From<Metric> for u16 {
fn from(value: Metric) -> Self {
value.0
}
}
impl Add for Metric {
type Output = Self;
fn add(self, rhs: Metric) -> Self::Output {
if self.is_infinite() || rhs.is_infinite() {
return Metric::infinite();
}
Metric(
self.0
.checked_add(rhs.0)
.map(|r| if r == u16::MAX { r - 1 } else { r })
.unwrap_or(u16::MAX - 1),
)
}
}
impl Add<&Metric> for &Metric {
type Output = Metric;
fn add(self, rhs: &Metric) -> Self::Output {
if self.is_infinite() || rhs.is_infinite() {
return Metric::infinite();
}
Metric(
self.0
.checked_add(rhs.0)
.map(|r| if r == u16::MAX { r - 1 } else { r })
.unwrap_or(u16::MAX - 1),
)
}
}
impl Add<&Metric> for Metric {
type Output = Self;
fn add(self, rhs: &Metric) -> Self::Output {
if self.is_infinite() || rhs.is_infinite() {
return Metric::infinite();
}
Metric(
self.0
.checked_add(rhs.0)
.map(|r| if r == u16::MAX { r - 1 } else { r })
.unwrap_or(u16::MAX - 1),
)
}
}
impl Add<Metric> for &Metric {
type Output = Metric;
fn add(self, rhs: Metric) -> Self::Output {
if self.is_infinite() || rhs.is_infinite() {
return Metric::infinite();
}
Metric(
self.0
.checked_add(rhs.0)
.map(|r| if r == u16::MAX { r - 1 } else { r })
.unwrap_or(u16::MAX - 1),
)
}
}
impl Sub<Metric> for Metric {
type Output = Metric;
fn sub(self, rhs: Metric) -> Self::Output {
if rhs.is_infinite() {
panic!("Can't subtract an infinite metric");
}
if self.is_infinite() {
return Metric::infinite();
}
Metric(self.0.saturating_sub(rhs.0))
}
}

View File

@@ -0,0 +1,195 @@
//! This module is used for collection of runtime metrics of a `mycelium` system. The main item of
//! interest is the [`Metrics`] trait. Users can provide their own implementation of this, or use
//! the default provided implementation to disable gathering metrics.
use crate::peer_manager::PeerType;
/// The collection of all metrics exported by a [`mycelium node`](crate::Node). It is up to the
/// user to provide an implementation which implements the methods for metrics they are interested
/// in. All methods have a default implementation, so if the user is not interested in any metrics,
/// a NOOP handler can be implemented as follows:
///
/// ```rust
/// use mycelium::metrics::Metrics;
///
/// #[derive(Clone)]
/// struct NoMetrics;
/// impl Metrics for NoMetrics {}
/// ```
pub trait Metrics {
/// The [`Router`](crate::router::Router) received a new Hello TLV from a peer.
#[inline]
fn router_process_hello(&self) {}
/// The [`Router`](crate::router::Router) received a new IHU TLV from a peer.
#[inline]
fn router_process_ihu(&self) {}
/// The [`Router`](crate::router::Router) received a new Seqno request TLV from a peer.
#[inline]
fn router_process_seqno_request(&self) {}
/// The [`Router`](crate::router::Router) received a new Route request TLV from a peer.
/// Additionally, it is recorded if this is a wildcard request (route table dump request)
/// or a request for a specific subnet.
#[inline]
fn router_process_route_request(&self, _wildcard: bool) {}
/// The [`Router`](crate::router::Router) received a new Update TLV from a peer.
#[inline]
fn router_process_update(&self) {}
/// The [`Router`](crate::router::Router) tried to send an update to a peer, but before sending
/// it we found out the peer is actually already dead.
///
/// This can happen, since a peer is a remote entity we have no control over, and it can be
/// removed at any time for any reason. However, in normal operation, the amount of times this
/// happens should be fairly small compared to the amount of updates we send/receive.
#[inline]
fn router_update_dead_peer(&self) {}
/// The amount of TLV's received from peers, to be processed by the
/// [`Router`](crate::router::Router).
#[inline]
fn router_received_tlv(&self) {}
/// The [`Router`](crate::router::Router) dropped a received TLV before processing it, as the
/// peer who sent it has already died in the meantime.
#[inline]
fn router_tlv_source_died(&self) {}
/// The [`Router`](crate::router::Router) dropped a received TLV before processing it, because
/// it coulnd't keep up
#[inline]
fn router_tlv_discarded(&self) {}
/// A [`Peer`](crate::peer::Peer) was added to the [`Router`](crate::router::Router).
#[inline]
fn router_peer_added(&self) {}
/// A [`Peer`](crate::peer::Peer) was removed from the [`Router`](crate::router::Router).
#[inline]
fn router_peer_removed(&self) {}
/// A [`Peer`](crate::peer::Peer) informed the [`Router`](crate::router::Router) it died, or
/// the router otherwise noticed the Peer is dead.
#[inline]
fn router_peer_died(&self) {}
/// The [`Router`](crate::router::Router) ran a route selection procedure.
#[inline]
fn router_route_selection_ran(&self) {}
/// A [`SourceKey`](crate::source_table::SourceKey) expired and got cleaned up by the [`Router`](crate::router::Router).
#[inline]
fn router_source_key_expired(&self) {}
/// A [`RouteKey`](crate::routing_table::RouteKey) expired, and the router either set the
/// [`Metric`](crate::metric::Metric) of the route to infinity, or cleaned up the route entry
/// altogether.
#[inline]
fn router_route_key_expired(&self, _removed: bool) {}
/// A route which expired was actually the selected route for the
/// [`Subnet`](crate::subnet::Subnet). Note that [`Self::router_route_key_expired`] will
/// also have been called.
#[inline]
fn router_selected_route_expired(&self) {}
/// The [`Router`](crate::router::Router) sends a "triggered" update to it's peers.
#[inline]
fn router_triggered_update(&self) {}
/// The [`Router`](crate::router::Router) extracted a packet for the local subnet.
#[inline]
fn router_route_packet_local(&self) {}
/// The [`Router`](crate::router::Router) forwarded a packet to a peer.
#[inline]
fn router_route_packet_forward(&self) {}
/// The [`Router`](crate::router::Router) dropped a packet it was routing because it's TTL
/// reached 0.
#[inline]
fn router_route_packet_ttl_expired(&self) {}
/// The [`Router`](crate::router::Router) dropped a packet it was routing because there was no
/// route for the destination IP.
#[inline]
fn router_route_packet_no_route(&self) {}
/// The [`Router`](crate::router::Router) replied to a seqno request with a local route, which
/// is more recent (bigger seqno) than the request.
#[inline]
fn router_seqno_request_reply_local(&self) {}
/// The [`Router`](crate::router::Router) replied to a seqno request by bumping its own seqno
/// and advertising the local route.
#[inline]
fn router_seqno_request_bump_seqno(&self) {}
/// The [`Router`](crate::router::Router) dropped a seqno request because the TTL reached 0.
#[inline]
fn router_seqno_request_dropped_ttl(&self) {}
/// The [`Router`](crate::router::Router) forwarded a seqno request to a feasible route.
#[inline]
fn router_seqno_request_forward_feasible(&self) {}
/// The [`Router`](crate::router::Router) forwarded a seqno request to a (potentially)
/// unfeasible route.
#[inline]
fn router_seqno_request_forward_unfeasible(&self) {}
/// The [`Router`](crate::router::Router) dropped a seqno request becase none of the other
/// handling methods applied.
#[inline]
fn router_seqno_request_unhandled(&self) {}
/// The [`time`](std::time::Duration) used by the [`Router`](crate::router::Router) to handle a
/// control packet.
#[inline]
fn router_time_spent_handling_tlv(&self, _duration: std::time::Duration, _tlv_type: &str) {}
/// The [`time`](std::time::Duration) used by the [`Router`](crate::router::Router) to
/// periodically propagate selected routes to peers.
#[inline]
fn router_time_spent_periodic_propagating_selected_routes(
&self,
_duration: std::time::Duration,
) {
}
/// An update was processed and accepted by the router, but did not run route selection.
#[inline]
fn router_update_skipped_route_selection(&self) {}
/// An update was denied by a configured filter.
#[inline]
fn router_update_denied_by_filter(&self) {}
/// An update was accepted by the router filters, but was otherwise unfeasible or a retraction,
/// for an unknown subnet.
#[inline]
fn router_update_not_interested(&self) {}
/// A new [`Peer`](crate::peer::Peer) was added to the
/// [`PeerManager`](crate::peer_manager::PeerManager) while it is running.
#[inline]
fn peer_manager_peer_added(&self, _pt: PeerType) {}
/// Sets the amount of [`Peers`](crate::peer::Peer) known by the
/// [`PeerManager`](crate::peer_manager::PeerManager).
#[inline]
fn peer_manager_known_peers(&self, _amount: usize) {}
/// The [`PeerManager`](crate::peer_manager::PeerManager) started an attempt to connect to a
/// remote endpoint.
#[inline]
fn peer_manager_connection_attempted(&self) {}
/// The [`PeerManager`](crate::peer_manager::PeerManager) finished an attempt to connect to a
/// remote endpoint. The connection could have failed.
#[inline]
fn peer_manager_connection_finished(&self) {}
}

View File

@@ -0,0 +1,134 @@
use bytes::{Buf, BufMut, BytesMut};
pub use control::ControlPacket;
pub use data::DataPacket;
use tokio_util::codec::{Decoder, Encoder};
mod control;
mod data;
/// Current version of the protocol being used.
const PROTOCOL_VERSION: u8 = 1;
/// The size of a `Packet` header on the wire, in bytes.
const PACKET_HEADER_SIZE: usize = 4;
#[derive(Debug, Clone)]
pub enum Packet {
DataPacket(DataPacket),
ControlPacket(ControlPacket),
}
#[derive(Debug, Clone, Copy)]
#[repr(u8)]
pub enum PacketType {
DataPacket = 0,
ControlPacket = 1,
}
pub struct Codec {
packet_type: Option<PacketType>,
data_packet_codec: data::Codec,
control_packet_codec: control::Codec,
}
impl Codec {
pub fn new() -> Self {
Codec {
packet_type: None,
data_packet_codec: data::Codec::new(),
control_packet_codec: control::Codec::new(),
}
}
}
impl Decoder for Codec {
type Item = Packet;
type Error = std::io::Error;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
// Determine the packet_type
let packet_type = if let Some(packet_type) = self.packet_type {
packet_type
} else {
// Check we can read the header
if src.remaining() <= PACKET_HEADER_SIZE {
return Ok(None);
}
let mut header = [0; PACKET_HEADER_SIZE];
header.copy_from_slice(&src[..PACKET_HEADER_SIZE]);
src.advance(PACKET_HEADER_SIZE);
// For now it's a hard error to not follow the 1 defined protocol version
if header[0] != PROTOCOL_VERSION {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
"Unknown protocol version",
));
};
let packet_type_byte = header[1];
let packet_type = match packet_type_byte {
0 => PacketType::DataPacket,
1 => PacketType::ControlPacket,
_ => {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
"Invalid packet type",
));
}
};
self.packet_type = Some(packet_type);
packet_type
};
// Decode packet based on determined packet_type
match packet_type {
PacketType::DataPacket => {
match self.data_packet_codec.decode(src) {
Ok(Some(p)) => {
self.packet_type = None; // Reset state
Ok(Some(Packet::DataPacket(p)))
}
Ok(None) => Ok(None),
Err(e) => Err(e),
}
}
PacketType::ControlPacket => {
match self.control_packet_codec.decode(src) {
Ok(Some(p)) => {
self.packet_type = None; // Reset state
Ok(Some(Packet::ControlPacket(p)))
}
Ok(None) => Ok(None),
Err(e) => Err(e),
}
}
}
}
}
impl Encoder<Packet> for Codec {
type Error = std::io::Error;
fn encode(&mut self, item: Packet, dst: &mut BytesMut) -> Result<(), Self::Error> {
match item {
Packet::DataPacket(datapacket) => {
dst.put_slice(&[PROTOCOL_VERSION, 0, 0, 0]);
self.data_packet_codec.encode(datapacket, dst)
}
Packet::ControlPacket(controlpacket) => {
dst.put_slice(&[PROTOCOL_VERSION, 1, 0, 0]);
self.control_packet_codec.encode(controlpacket, dst)
}
}
}
}
impl Default for Codec {
fn default() -> Self {
Self::new()
}
}

View File

@@ -0,0 +1,64 @@
use std::{io, net::IpAddr, time::Duration};
use bytes::BytesMut;
use tokio_util::codec::{Decoder, Encoder};
use crate::{
babel, metric::Metric, peer::Peer, router_id::RouterId, sequence_number::SeqNo, subnet::Subnet,
};
pub type ControlPacket = babel::Tlv;
pub struct Codec {
// TODO: wrapper to make it easier to deserialize
codec: babel::Codec,
}
impl ControlPacket {
pub fn new_hello(dest_peer: &Peer, interval: Duration) -> Self {
let tlv: babel::Tlv =
babel::Hello::new_unicast(dest_peer.hello_seqno(), (interval.as_millis() / 10) as u16)
.into();
dest_peer.increment_hello_seqno();
tlv
}
pub fn new_ihu(rx_cost: Metric, interval: Duration, dest_address: Option<IpAddr>) -> Self {
babel::Ihu::new(rx_cost, (interval.as_millis() / 10) as u16, dest_address).into()
}
pub fn new_update(
interval: Duration,
seqno: SeqNo,
metric: Metric,
subnet: Subnet,
router_id: RouterId,
) -> Self {
babel::Update::new(interval, seqno, metric, subnet, router_id).into()
}
}
impl Codec {
pub fn new() -> Self {
Codec {
codec: babel::Codec::new(),
}
}
}
impl Decoder for Codec {
type Item = ControlPacket;
type Error = std::io::Error;
fn decode(&mut self, buf: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
self.codec.decode(buf)
}
}
impl Encoder<ControlPacket> for Codec {
type Error = io::Error;
fn encode(&mut self, message: ControlPacket, buf: &mut BytesMut) -> Result<(), Self::Error> {
self.codec.encode(message, buf)
}
}

View File

@@ -0,0 +1,154 @@
use std::net::Ipv6Addr;
use bytes::{Buf, BufMut, BytesMut};
use tokio_util::codec::{Decoder, Encoder};
/// Size of the header start for a data packet (before the IP addresses).
const DATA_PACKET_HEADER_SIZE: usize = 4;
/// Mask to extract data length from
const DATA_PACKET_LEN_MASK: u32 = (1 << 16) - 1;
#[derive(Debug, Clone)]
pub struct DataPacket {
pub raw_data: Vec<u8>, // encrypted data itself, then append the nonce
/// Max amount of hops for the packet.
pub hop_limit: u8,
pub src_ip: Ipv6Addr,
pub dst_ip: Ipv6Addr,
}
pub struct Codec {
header_vals: Option<HeaderValues>,
src_ip: Option<Ipv6Addr>,
dest_ip: Option<Ipv6Addr>,
}
/// Data from the DataPacket header.
#[derive(Clone, Copy)]
struct HeaderValues {
len: u16,
hop_limit: u8,
}
impl Codec {
pub fn new() -> Self {
Codec {
header_vals: None,
src_ip: None,
dest_ip: None,
}
}
}
impl Decoder for Codec {
type Item = DataPacket;
type Error = std::io::Error;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
// Determine the length of the data
let HeaderValues { len, hop_limit } = if let Some(header_vals) = self.header_vals {
header_vals
} else {
// Check we have enough data to decode
if src.len() < DATA_PACKET_HEADER_SIZE {
return Ok(None);
}
let raw_header = src.get_u32();
// Hop limit is the last 8 bits.
let hop_limit = (raw_header & 0xFF) as u8;
let data_len = ((raw_header >> 8) & DATA_PACKET_LEN_MASK) as u16;
let header_vals = HeaderValues {
len: data_len,
hop_limit,
};
self.header_vals = Some(header_vals);
header_vals
};
let data_len = len as usize;
// Determine the source IP
let src_ip = if let Some(src_ip) = self.src_ip {
src_ip
} else {
if src.len() < 16 {
return Ok(None);
}
// Decode octets
let mut ip_bytes = [0u8; 16];
ip_bytes.copy_from_slice(&src[..16]);
let src_ip = Ipv6Addr::from(ip_bytes);
src.advance(16);
self.src_ip = Some(src_ip);
src_ip
};
// Determine the destination IP
let dest_ip = if let Some(dest_ip) = self.dest_ip {
dest_ip
} else {
if src.len() < 16 {
return Ok(None);
}
// Decode octets
let mut ip_bytes = [0u8; 16];
ip_bytes.copy_from_slice(&src[..16]);
let dest_ip = Ipv6Addr::from(ip_bytes);
src.advance(16);
self.dest_ip = Some(dest_ip);
dest_ip
};
// Check we have enough data to decode
if src.len() < data_len {
return Ok(None);
}
// Decode octets
let mut data = vec![0u8; data_len];
data.copy_from_slice(&src[..data_len]);
src.advance(data_len);
// Reset state
self.header_vals = None;
self.dest_ip = None;
self.src_ip = None;
Ok(Some(DataPacket {
raw_data: data,
hop_limit,
dst_ip: dest_ip,
src_ip,
}))
}
}
impl Encoder<DataPacket> for Codec {
type Error = std::io::Error;
fn encode(&mut self, item: DataPacket, dst: &mut BytesMut) -> Result<(), Self::Error> {
dst.reserve(item.raw_data.len() + DATA_PACKET_HEADER_SIZE + 16 + 16);
let mut raw_header = 0;
// Add length of the data
raw_header |= (item.raw_data.len() as u32) << 8;
// And hop limit
raw_header |= item.hop_limit as u32;
dst.put_u32(raw_header);
// Write the source IP
dst.put_slice(&item.src_ip.octets());
// Write the destination IP
dst.put_slice(&item.dst_ip.octets());
// Write the data
dst.extend_from_slice(&item.raw_data);
Ok(())
}
}

View File

@@ -0,0 +1,401 @@
use futures::{SinkExt, StreamExt};
use std::{
error::Error,
io,
sync::{
atomic::{AtomicBool, AtomicU64, Ordering},
Arc, RwLock, Weak,
},
};
use tokio::{
select,
sync::{mpsc, Notify},
};
use tokio_util::codec::Framed;
use tracing::{debug, error, info, trace};
use crate::{
connection::{self, Connection},
packet::{self, Packet},
};
use crate::{
packet::{ControlPacket, DataPacket},
sequence_number::SeqNo,
};
/// The maximum amount of packets to immediately send if they are ready when the first one is
/// received.
const PACKET_COALESCE_WINDOW: usize = 50;
/// The default link cost assigned to new peers before their actual cost is known.
///
/// In theory, the best value would be U16::MAX - 1, however this value would take too long to be
/// flushed out of the smoothed metric. A default of a 1000 (1 second) should be sufficiently large
/// to cover very bad connections, so they also converge to a smaller value. While there is no
/// issue with converging to a higher value (in other words, underestimating the latency to a
/// peer), this means that bad peers would briefly be more likely to be selected. Additionally,
/// since the latency increases, downstream peers would eventually find that the announced route
/// would become unfeasible, and send a seqno request (which should solve this efficiently). As a
/// tradeoff, it means it takes longer for new peers in the network to decrease to their actual
/// metric (in comparisson with a lower starting metric), though this is in itself a usefull thing
/// to have as it means peers joining the network would need to have some stability before being
/// selected as hop.
const DEFAULT_LINK_COST: u16 = 1000;
/// Multiplier for smoothed metric calculation of the existing smoothed metric.
const EXISTING_METRIC_FACTOR: u32 = 9;
/// Divisor for smoothed metric calcuation of the combined metric
const TOTAL_METRIC_DIVISOR: u32 = 10;
#[derive(Debug, Clone)]
/// A peer represents a directly connected participant in the network.
pub struct Peer {
inner: Arc<PeerInner>,
}
/// A weak reference to a peer, which does not prevent it from being cleaned up. This can be used
/// to check liveliness of the [`Peer`] instance it originated from.
pub struct PeerRef {
inner: Weak<PeerInner>,
}
impl Peer {
pub fn new<C: Connection + Unpin + Send + 'static>(
router_data_tx: mpsc::Sender<DataPacket>,
router_control_tx: mpsc::UnboundedSender<(ControlPacket, Peer)>,
connection: C,
dead_peer_sink: mpsc::Sender<Peer>,
bytes_written: Arc<AtomicU64>,
bytes_read: Arc<AtomicU64>,
) -> Result<Self, io::Error> {
// Wrap connection so we can get access to the counters.
let connection = connection::Tracked::new(bytes_read, bytes_written, connection);
// Data channel for peer
let (to_peer_data, mut from_routing_data) = mpsc::unbounded_channel::<DataPacket>();
// Control channel for peer
let (to_peer_control, mut from_routing_control) =
mpsc::unbounded_channel::<ControlPacket>();
let death_notifier = Arc::new(Notify::new());
let death_watcher = death_notifier.clone();
let peer = Peer {
inner: Arc::new(PeerInner {
state: RwLock::new(PeerState::new()),
to_peer_data,
to_peer_control,
connection_identifier: connection.identifier()?,
static_link_cost: connection.static_link_cost()?,
death_notifier,
alive: AtomicBool::new(true),
}),
};
// Framed for peer
// Used to send and receive packets from a TCP stream
let framed = Framed::with_capacity(connection, packet::Codec::new(), 128 << 10);
let (mut sink, mut stream) = framed.split();
{
let peer = peer.clone();
tokio::spawn(async move {
let mut needs_flush = false;
loop {
select! {
// Received over the TCP stream
frame = stream.next() => {
match frame {
Some(Ok(packet)) => {
match packet {
Packet::DataPacket(packet) => {
// An error here means the receiver is dropped/closed,
// this is not recoverable.
if let Err(error) = router_data_tx.send(packet).await{
error!("Error sending to to_routing_data: {}", error);
break
}
}
Packet::ControlPacket(packet) => {
if let Err(error) = router_control_tx.send((packet, peer.clone())) {
// An error here means the receiver is dropped/closed,
// this is not recoverable.
error!("Error sending to to_routing_control: {}", error);
break
}
}
}
}
Some(Err(e)) => {
error!("Frame error from {}: {e}", peer.connection_identifier());
break;
},
None => {
info!("Stream to {} is closed", peer.connection_identifier());
break;
}
}
}
rv = from_routing_data.recv(), if !needs_flush => {
match rv {
None => break,
Some(packet) => {
needs_flush = true;
if let Err(e) = sink.feed(Packet::DataPacket(packet)).await {
error!("Failed to feed data packet to connection: {e}");
break
}
for _ in 1..PACKET_COALESCE_WINDOW {
// There can be 2 cases of errors here, empty channel and no more
// senders. In both cases we don't really care at this point.
if let Ok(packet) = from_routing_data.try_recv() {
if let Err(e) = sink.feed(Packet::DataPacket(packet)).await {
error!("Failed to feed data packet to connection: {e}");
break
}
trace!("Instantly queued ready packet to transfer to peer");
} else {
// No packets ready, flush currently buffered ones
break
}
}
}
}
}
rv = from_routing_control.recv(), if !needs_flush => {
match rv {
None => break,
Some(packet) => {
needs_flush = true;
if let Err(e) = sink.feed(Packet::ControlPacket(packet)).await {
error!("Failed to feed control packet to connection: {e}");
break
}
for _ in 1..PACKET_COALESCE_WINDOW {
// There can be 2 cases of errors here, empty channel and no more
// senders. In both cases we don't really care at this point.
if let Ok(packet) = from_routing_control.try_recv() {
if let Err(e) = sink.feed(Packet::ControlPacket(packet)).await {
error!("Failed to feed data packet to connection: {e}");
break
}
} else {
// No packets ready, flush currently buffered ones
break
}
}
}
}
}
r = sink.flush(), if needs_flush => {
if let Err(err) = r {
error!("Failed to flush peer connection: {err}");
break
}
needs_flush = false;
}
_ = death_watcher.notified() => {
// Attempt gracefull shutdown
let mut framed = sink.reunite(stream).expect("SplitSink and SplitStream here can only be part of the same original Framned; Qed");
let _ = framed.close().await;
break;
}
}
}
// Notify router we are dead, also modify our internal state to declare that.
// Relaxed ordering is fine, we just care that the variable is set.
peer.inner.alive.store(false, Ordering::Relaxed);
let remote_id = peer.connection_identifier().clone();
debug!("Notifying router peer {remote_id} is dead");
if let Err(e) = dead_peer_sink.send(peer).await {
error!("Peer {remote_id} could not notify router of termination: {e}");
}
});
};
Ok(peer)
}
/// Get current sequence number for this peer.
pub fn hello_seqno(&self) -> SeqNo {
self.inner.state.read().unwrap().hello_seqno
}
/// Adds 1 to the sequence number of this peer .
pub fn increment_hello_seqno(&self) {
self.inner.state.write().unwrap().hello_seqno += 1;
}
pub fn time_last_received_hello(&self) -> tokio::time::Instant {
self.inner.state.read().unwrap().time_last_received_hello
}
pub fn set_time_last_received_hello(&self, time: tokio::time::Instant) {
self.inner.state.write().unwrap().time_last_received_hello = time
}
/// For sending data packets towards a peer instance on this node.
/// It's send over the to_peer_data channel and read from the corresponding receiver.
/// The receiver sends the packet over the TCP stream towards the destined peer instance on another node
pub fn send_data_packet(&self, data_packet: DataPacket) -> Result<(), Box<dyn Error>> {
Ok(self.inner.to_peer_data.send(data_packet)?)
}
/// For sending control packets towards a peer instance on this node.
/// It's send over the to_peer_control channel and read from the corresponding receiver.
/// The receiver sends the packet over the TCP stream towards the destined peer instance on another node
pub fn send_control_packet(&self, control_packet: ControlPacket) -> Result<(), Box<dyn Error>> {
Ok(self.inner.to_peer_control.send(control_packet)?)
}
/// Get the cost to use the peer, i.e. the additional impact on the [`crate::metric::Metric`]
/// for using this `Peer`.
///
/// This is a smoothed value, which is calculated over the recent history of link cost.
pub fn link_cost(&self) -> u16 {
self.inner.state.read().unwrap().link_cost + self.inner.static_link_cost
}
/// Sets the link cost based on the provided value.
///
/// The link cost is not set to the given value, but rather to an average of recent values.
/// This makes sure short-lived, hard spikes of the link cost of a peer don't influence the
/// routing.
pub fn set_link_cost(&self, new_link_cost: u16) {
// Calculate new link cost by multiplying (i.e. scaling) old and new link cost and
// averaging them.
let mut inner = self.inner.state.write().unwrap();
inner.link_cost = (((inner.link_cost as u32) * EXISTING_METRIC_FACTOR
+ (new_link_cost as u32) * (TOTAL_METRIC_DIVISOR - EXISTING_METRIC_FACTOR))
/ TOTAL_METRIC_DIVISOR) as u16;
}
/// Identifier for the connection to the `Peer`.
pub fn connection_identifier(&self) -> &String {
&self.inner.connection_identifier
}
pub fn time_last_received_ihu(&self) -> tokio::time::Instant {
self.inner.state.read().unwrap().time_last_received_ihu
}
pub fn set_time_last_received_ihu(&self, time: tokio::time::Instant) {
self.inner.state.write().unwrap().time_last_received_ihu = time
}
/// Notify this `Peer` that it died.
///
/// While some [`Connection`] types can immediately detect that the connection itself is
/// broken, not all of them can. In this scenario, we need to rely on an outside signal to tell
/// us that we have, in fact, died.
pub fn died(&self) {
self.inner.alive.store(false, Ordering::Relaxed);
self.inner.death_notifier.notify_one();
}
/// Checks if the connection of this `Peer` is still alive.
///
/// For connection types which don't have (real time) state information, this might return a
/// false positive if the connection has actually died, but the Peer did not notice this (yet)
/// and hasn't been informed.
pub fn alive(&self) -> bool {
self.inner.alive.load(Ordering::Relaxed)
}
/// Create a new [`PeerRef`] that refers to this `Peer` instance.
pub fn refer(&self) -> PeerRef {
PeerRef {
inner: Arc::downgrade(&self.inner),
}
}
}
impl PeerRef {
/// Contructs a new `PeerRef` which is not associated with any actually [`Peer`].
/// [`PeerRef::alive`] will always return false when called on this `PeerRef`.
pub fn new() -> Self {
PeerRef { inner: Weak::new() }
}
/// Check if the connection of the [`Peer`] this `PeerRef` points to is still alive.
pub fn alive(&self) -> bool {
if let Some(peer) = self.inner.upgrade() {
peer.alive.load(Ordering::Relaxed)
} else {
false
}
}
/// Attempts to convert this `PeerRef` into a full [`Peer`].
pub fn upgrade(&self) -> Option<Peer> {
self.inner.upgrade().map(|inner| Peer { inner })
}
}
impl Default for PeerRef {
fn default() -> Self {
Self::new()
}
}
impl PartialEq for Peer {
fn eq(&self, other: &Self) -> bool {
Arc::ptr_eq(&self.inner, &other.inner)
}
}
#[derive(Debug)]
struct PeerInner {
state: RwLock<PeerState>,
to_peer_data: mpsc::UnboundedSender<DataPacket>,
to_peer_control: mpsc::UnboundedSender<ControlPacket>,
/// Used to identify peer based on its connection params.
connection_identifier: String,
/// Static cost of using this link, to be added to the announced metric for routes through this
/// Peer.
static_link_cost: u16,
/// Channel to notify the connection of its decease.
death_notifier: Arc<Notify>,
/// Keep track if the connection is alive.
alive: AtomicBool,
}
#[derive(Debug)]
struct PeerState {
hello_seqno: SeqNo,
time_last_received_hello: tokio::time::Instant,
link_cost: u16,
time_last_received_ihu: tokio::time::Instant,
}
impl PeerState {
/// Create a new `PeerInner`, holding the mutable state of a [`Peer`]
fn new() -> Self {
// Initialize last_sent_hello_seqno to 0
let hello_seqno = SeqNo::default();
let link_cost = DEFAULT_LINK_COST;
// Initialize time_last_received_hello to now
let time_last_received_hello = tokio::time::Instant::now();
// Initialiwe time_last_send_ihu
let time_last_received_ihu = tokio::time::Instant::now();
Self {
hello_seqno,
link_cost,
time_last_received_ihu,
time_last_received_hello,
}
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,60 @@
use core::fmt;
use crate::crypto::PublicKey;
/// A `RouterId` uniquely identifies a router in the network.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct RouterId {
pk: PublicKey,
zone: [u8; 2],
rnd: [u8; 6],
}
impl RouterId {
/// Size in bytes of a `RouterId`
pub const BYTE_SIZE: usize = 40;
/// Create a new `RouterId` from a [`PublicKey`].
pub fn new(pk: PublicKey) -> Self {
Self {
pk,
zone: [0; 2],
rnd: rand::random(),
}
}
/// View this `RouterId` as a byte array.
pub fn as_bytes(&self) -> [u8; Self::BYTE_SIZE] {
let mut out = [0; Self::BYTE_SIZE];
out[..32].copy_from_slice(self.pk.as_bytes());
out[32..34].copy_from_slice(&self.zone);
out[34..].copy_from_slice(&self.rnd);
out
}
/// Converts this `RouterId` to a [`PublicKey`].
pub fn to_pubkey(self) -> PublicKey {
self.pk
}
}
impl From<[u8; Self::BYTE_SIZE]> for RouterId {
fn from(bytes: [u8; Self::BYTE_SIZE]) -> RouterId {
RouterId {
pk: PublicKey::from(<&[u8] as TryInto<[u8; 32]>>::try_into(&bytes[..32]).unwrap()),
zone: bytes[32..34].try_into().unwrap(),
rnd: bytes[34..Self::BYTE_SIZE].try_into().unwrap(),
}
}
}
impl fmt::Display for RouterId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let RouterId { pk, zone, rnd } = self;
f.write_fmt(format_args!(
"{pk}-{}-{}",
faster_hex::hex_string(zone),
faster_hex::hex_string(rnd)
))
}
}

View File

@@ -0,0 +1,687 @@
use std::{
net::{IpAddr, Ipv6Addr},
ops::Deref,
sync::{Arc, Mutex, MutexGuard},
};
use ip_network_table_deps_treebitmap::IpLookupTable;
use iter::{RoutingTableNoRouteIter, RoutingTableQueryIter};
use subnet_entry::SubnetEntry;
use tokio::{select, sync::mpsc, time::Duration};
use tokio_util::sync::CancellationToken;
use tracing::{error, trace};
use crate::{crypto::SharedSecret, peer::Peer, subnet::Subnet};
pub use iter::RoutingTableIter;
pub use iter_mut::RoutingTableIterMut;
pub use no_route::NoRouteSubnet;
pub use queried_subnet::QueriedSubnet;
pub use route_entry::RouteEntry;
pub use route_key::RouteKey;
pub use route_list::RouteList;
mod iter;
mod iter_mut;
mod no_route;
mod queried_subnet;
mod route_entry;
mod route_key;
mod route_list;
mod subnet_entry;
const NO_ROUTE_EXPIRATION: Duration = Duration::from_secs(60);
pub enum Routes {
Exist(RouteListReadGuard),
Queried,
NoRoute,
None,
}
impl Routes {
/// Returns the selected route if one exists.
pub fn selected(&self) -> Option<&RouteEntry> {
if let Routes::Exist(routes) = self {
routes.selected()
} else {
None
}
}
/// Returns true if there are no routes
pub fn is_none(&self) -> bool {
!matches!(self, Routes::Exist { .. })
}
}
impl From<&SubnetEntry> for Routes {
fn from(value: &SubnetEntry) -> Self {
match value {
SubnetEntry::Exists { list } => {
Routes::Exist(RouteListReadGuard { inner: list.load() })
}
SubnetEntry::Queried { .. } => Routes::Queried,
SubnetEntry::NoRoute { .. } => Routes::NoRoute,
}
}
}
impl From<Option<&SubnetEntry>> for Routes {
fn from(value: Option<&SubnetEntry>) -> Self {
match value {
Some(v) => v.into(),
None => Routes::None,
}
}
}
/// The routing table holds a list of route entries for every known subnet.
#[derive(Clone)]
pub struct RoutingTable {
writer: Arc<Mutex<left_right::WriteHandle<RoutingTableInner, RoutingTableOplogEntry>>>,
reader: left_right::ReadHandle<RoutingTableInner>,
shared: Arc<RoutingTableShared>,
}
struct RoutingTableShared {
expired_route_entry_sink: mpsc::Sender<RouteKey>,
cancel_token: CancellationToken,
}
#[derive(Default)]
struct RoutingTableInner {
table: IpLookupTable<Ipv6Addr, Arc<SubnetEntry>>,
}
/// Hold an exclusive write lock over the routing table. While this item is in scope, no other
/// calls can get a mutable refernce to the content of a routing table. Once this guard goes out of
/// scope, changes to the contained RouteList will be applied.
pub struct WriteGuard<'a> {
routing_table: &'a RoutingTable,
/// Owned copy of the RouteList, this is populated once mutable access the the RouteList has
/// been requested.
value: Arc<SubnetEntry>,
/// Did the RouteList exist initially?
exists: bool,
/// The subnet we are writing to.
subnet: Subnet,
expired_route_entry_sink: mpsc::Sender<RouteKey>,
cancellation_token: CancellationToken,
}
impl RoutingTable {
/// Create a new empty RoutingTable. The passed channel is used to notify an external observer
/// of route entry expiration events. It is the callers responsibility to ensure these events
/// are properly handled.
///
/// # Panics
///
/// This will panic if not executed in the context of a tokio runtime.
pub fn new(expired_route_entry_sink: mpsc::Sender<RouteKey>) -> Self {
let (writer, reader) = left_right::new();
let writer = Arc::new(Mutex::new(writer));
let cancel_token = CancellationToken::new();
let shared = Arc::new(RoutingTableShared {
expired_route_entry_sink,
cancel_token,
});
RoutingTable {
writer,
reader,
shared,
}
}
/// Get a list of the routes for the most precises [`Subnet`] known which contains the given
/// [`IpAddr`].
pub fn best_routes(&self, ip: IpAddr) -> Routes {
let IpAddr::V6(ip) = ip else {
panic!("Only IPv6 is supported currently");
};
self.reader
.enter()
.expect("Write handle is saved on the router so it is not dropped yet.")
.table
.longest_match(ip)
.map(|(_, _, rl)| rl.as_ref())
.into()
}
/// Get a list of all routes for the given subnet. Changes to the RoutingTable after this
/// method returns will not be visible and require this method to be called again to be
/// observed.
pub fn routes(&self, subnet: Subnet) -> Routes {
let subnet_ip = if let IpAddr::V6(ip) = subnet.address() {
ip
} else {
return Routes::None;
};
self.reader
.enter()
.expect("Write handle is saved on the router so it is not dropped yet.")
.table
.exact_match(subnet_ip, subnet.prefix_len().into())
.map(Arc::as_ref)
.into()
}
/// Gets continued read access to the `RoutingTable`. While the returned
/// [`guard`](RoutingTableReadGuard) is held, updates to the `RoutingTable` will be blocked.
pub fn read(&self) -> RoutingTableReadGuard {
RoutingTableReadGuard {
guard: self
.reader
.enter()
.expect("Write handle is saved on RoutingTable, so this is always Some; qed"),
}
}
/// Locks the `RoutingTable` for continued write access. While the returned
/// [`guard`](RoutingTableWriteGuard) is held, methods trying to mutate the `RoutingTable`, or
/// get mutable access otherwise, will be blocked. When the [`guard`](`RoutingTableWriteGuard`)
/// is dropped, all queued changes will be applied.
pub fn write(&self) -> RoutingTableWriteGuard {
RoutingTableWriteGuard {
write_guard: self.writer.lock().unwrap(),
read_guard: self
.reader
.enter()
.expect("Write handle is saved on RoutingTable, so this is always Some; qed"),
expired_route_entry_sink: self.shared.expired_route_entry_sink.clone(),
cancel_token: self.shared.cancel_token.clone(),
}
}
/// Get mutable access to the list of routes for the given [`Subnet`].
pub fn routes_mut(&self, subnet: Subnet) -> Option<WriteGuard> {
let subnet_address = if let IpAddr::V6(ip) = subnet.address() {
ip
} else {
panic!("IP v4 addresses are not supported")
};
let value = self
.reader
.enter()
.expect("Write handle is saved next to read handle so this is always Some; qed")
.table
.exact_match(subnet_address, subnet.prefix_len().into())?
.clone();
if matches!(*value, SubnetEntry::Exists { .. }) {
Some(WriteGuard {
routing_table: self,
// If we didn't find a route list in the route table we create a new empty list,
// therefore we immediately own it.
value,
exists: true,
subnet,
expired_route_entry_sink: self.shared.expired_route_entry_sink.clone(),
cancellation_token: self.shared.cancel_token.clone(),
})
} else {
None
}
}
/// Adds a new [`Subnet`] to the `RoutingTable`. The returned [`WriteGuard`] can be used to
/// insert entries. If no entry is inserted before the guard is dropped, the [`Subnet`] won't
/// be added.
pub fn add_subnet(&self, subnet: Subnet, shared_secret: SharedSecret) -> WriteGuard {
if !matches!(subnet.address(), IpAddr::V6(_)) {
panic!("IP v4 addresses are not supported")
};
let value = Arc::new(SubnetEntry::Exists {
list: Arc::new(RouteList::new(shared_secret)).into(),
});
WriteGuard {
routing_table: self,
value,
exists: false,
subnet,
expired_route_entry_sink: self.shared.expired_route_entry_sink.clone(),
cancellation_token: self.shared.cancel_token.clone(),
}
}
/// Gets the selected route for an IpAddr if one exists.
///
/// # Panics
///
/// This will panic if the IP address is not an IPV6 address.
pub fn selected_route(&self, address: IpAddr) -> Option<RouteEntry> {
let IpAddr::V6(ip) = address else {
panic!("IP v4 addresses are not supported")
};
self.reader
.enter()
.expect("Write handle is saved on RoutingTable, so this is always Some; qed")
.table
.longest_match(ip)
.and_then(|(_, _, rl)| {
let SubnetEntry::Exists { list } = &**rl else {
return None;
};
let rl = list.load();
if rl.is_empty() || !rl[0].selected() {
None
} else {
Some(rl[0].clone())
}
})
}
/// Marks a subnet as queried in the route table.
///
/// This function will not do anything if the subnet contains valid routes.
pub fn mark_queried(&self, subnet: Subnet, query_timeout: tokio::time::Instant) {
if !matches!(subnet.address(), IpAddr::V6(_)) {
panic!("IP v4 addresses are not supported")
};
// Start a task to expire the queried state if we didn't have any results in time.
{
// We only need the write handle in the task
let writer = self.writer.clone();
let cancel_token = self.shared.cancel_token.clone();
tokio::task::spawn(async move {
select! {
_ = cancel_token.cancelled() => {
// Future got cancelled, nothing to do
return
}
_ = tokio::time::sleep_until(query_timeout) => {
// Timeout fired, mark as no route
}
}
let expiry = tokio::time::Instant::now() + NO_ROUTE_EXPIRATION;
// Scope this so the lock for the write_handle goes out of scope when we are done
// here, as we don't want to hold the write_handle lock while sleeping for the
// second timeout.
{
let mut write_handle = writer.lock().expect("Can lock writer");
write_handle.append(RoutingTableOplogEntry::QueryExpired(
subnet,
Arc::new(SubnetEntry::NoRoute { expiry }),
));
write_handle.flush();
}
// TODO: Check if we are indeed marked as NoRoute here, if we aren't this can be
// cancelled now
select! {
_ = cancel_token.cancelled() => {
// Future got cancelled, nothing to do
return
}
_ = tokio::time::sleep_until(expiry) => {
// Timeout fired, remove no route entry
}
}
let mut write_handle = writer.lock().expect("Can lock writer");
write_handle.append(RoutingTableOplogEntry::NoRouteExpired(subnet));
write_handle.flush();
});
}
let mut write_handle = self.writer.lock().expect("Can lock writer");
write_handle.append(RoutingTableOplogEntry::Queried(
subnet,
Arc::new(SubnetEntry::Queried { query_timeout }),
));
write_handle.flush();
}
}
pub struct RouteListReadGuard {
inner: arc_swap::Guard<Arc<RouteList>>,
}
impl Deref for RouteListReadGuard {
type Target = RouteList;
fn deref(&self) -> &Self::Target {
self.inner.deref()
}
}
/// A write guard over the [`RoutingTable`]. While this guard is held, updates won't be able to
/// complete.
pub struct RoutingTableWriteGuard<'a> {
write_guard: MutexGuard<'a, left_right::WriteHandle<RoutingTableInner, RoutingTableOplogEntry>>,
read_guard: left_right::ReadGuard<'a, RoutingTableInner>,
expired_route_entry_sink: mpsc::Sender<RouteKey>,
cancel_token: CancellationToken,
}
impl<'a, 'b> RoutingTableWriteGuard<'a> {
pub fn iter_mut(&'b mut self) -> RoutingTableIterMut<'a, 'b> {
RoutingTableIterMut::new(
&mut self.write_guard,
self.read_guard.table.iter(),
self.expired_route_entry_sink.clone(),
self.cancel_token.clone(),
)
}
}
impl Drop for RoutingTableWriteGuard<'_> {
fn drop(&mut self) {
self.write_guard.publish();
}
}
/// A read guard over the [`RoutingTable`]. While this guard is held, updates won't be able to
/// complete.
pub struct RoutingTableReadGuard<'a> {
guard: left_right::ReadGuard<'a, RoutingTableInner>,
}
impl RoutingTableReadGuard<'_> {
pub fn iter(&self) -> RoutingTableIter {
RoutingTableIter::new(self.guard.table.iter())
}
/// Create an iterator for all queried subnets in the routing table
pub fn iter_queries(&self) -> RoutingTableQueryIter {
RoutingTableQueryIter::new(self.guard.table.iter())
}
/// Create an iterator for all subnets which are currently marked as `NoRoute` in the routing
/// table.
pub fn iter_no_route(&self) -> RoutingTableNoRouteIter {
RoutingTableNoRouteIter::new(self.guard.table.iter())
}
}
impl WriteGuard<'_> {
/// Loads the current [`RouteList`].
#[inline]
pub fn routes(&self) -> RouteListReadGuard {
let SubnetEntry::Exists { list } = &*self.value else {
panic!("Write guard for non-route SubnetEntry")
};
RouteListReadGuard { inner: list.load() }
}
/// Get mutable access to the [`RouteList`]. This will update the [`RouteList`] in place
/// without locking the [`RoutingTable`].
// TODO: Proper abstractions
pub fn update_routes<
F: FnMut(&mut RouteList, &mpsc::Sender<RouteKey>, &CancellationToken) -> bool,
>(
&mut self,
mut op: F,
) -> bool {
let mut res = false;
let mut delete = false;
if let SubnetEntry::Exists { list } = &*self.value {
list.rcu(|rl| {
let mut new_val = rl.clone();
let v = Arc::make_mut(&mut new_val);
res = op(v, &self.expired_route_entry_sink, &self.cancellation_token);
delete = v.is_empty();
new_val
});
if delete && self.exists {
trace!(subnet = %self.subnet, "Deleting subnet which became empty after updating");
let mut writer = self.routing_table.writer.lock().unwrap();
writer.append(RoutingTableOplogEntry::Delete(self.subnet));
writer.publish();
}
res
} else {
false
}
}
/// Set the [`RouteEntry`] with the given [`neighbour`](Peer) as the selected route.
pub fn set_selected(&mut self, neighbour: &Peer) {
if let SubnetEntry::Exists { list } = &*self.value {
list.rcu(|routes| {
let mut new_routes = routes.clone();
let routes = Arc::make_mut(&mut new_routes);
let Some(pos) = routes.iter().position(|re| re.neighbour() == neighbour) else {
error!(
neighbour = neighbour.connection_identifier(),
"Failed to select route entry with given route key, no such entry"
);
return new_routes;
};
// We don't need a check for an empty list here, since we found a selected route there
// _MUST_ be at least 1 entry.
// Set the first element to unselected, then select the proper element so this also works
// in case the existing route is "reselected".
routes[0].set_selected(false);
routes[pos].set_selected(true);
routes.swap(0, pos);
new_routes
});
}
}
/// Unconditionally unselects the selected route, if one is present.
///
/// In case no route is selected, this is a no-op.
pub fn unselect(&mut self) {
if let SubnetEntry::Exists { list } = &*self.value {
list.rcu(|v| {
let mut new_val = v.clone();
let new_ref = Arc::make_mut(&mut new_val);
if let Some(e) = new_ref.get_mut(0) {
e.set_selected(false);
}
new_val
});
}
}
}
impl Drop for WriteGuard<'_> {
fn drop(&mut self) {
// FIXME: try to get rid of clones on the Arc here
if let SubnetEntry::Exists { list } = &*self.value {
let value = list.load();
match self.exists {
// The route list did not exist, and now it is not empty, so an entry was added. We
// need to add the route list to the routing table.
false if !value.is_empty() => {
trace!(subnet = %self.subnet, "Inserting new route list for subnet");
let mut writer = self.routing_table.writer.lock().unwrap();
writer.append(RoutingTableOplogEntry::Upsert(
self.subnet,
Arc::clone(&self.value),
));
writer.publish();
}
// There was an existing route list which is now empty, so the entry for this subnet
// needs to be deleted in the routing table.
true if value.is_empty() => {
trace!(subnet = %self.subnet, "Removing route list for subnet");
let mut writer = self.routing_table.writer.lock().unwrap();
writer.append(RoutingTableOplogEntry::Delete(self.subnet));
writer.publish();
}
// Nothing to do in these cases. Either no value was inserted in a non existing
// routelist, or an existing one was updated in place.
_ => {}
}
}
}
}
/// Operations allowed on the left_right for the routing table.
enum RoutingTableOplogEntry {
/// Insert or Update the value for the given subnet.
Upsert(Subnet, Arc<SubnetEntry>),
/// Mark a subnet as queried.
Queried(Subnet, Arc<SubnetEntry>),
/// Delete the entry for the given subnet.
Delete(Subnet),
/// The route request for a subnet expired, if it is still in query state mark it as not
/// existing
QueryExpired(Subnet, Arc<SubnetEntry>),
/// The marker for explicitly not having a route to a subnet has expired
NoRouteExpired(Subnet),
}
/// Convert an [`IpAddr`] into an [`Ipv6Addr`]. Panics if the contained addrss is not an IPv6
/// address.
fn expect_ipv6(ip: IpAddr) -> Ipv6Addr {
let IpAddr::V6(ip) = ip else {
panic!("Expected ipv6 address")
};
ip
}
impl left_right::Absorb<RoutingTableOplogEntry> for RoutingTableInner {
fn absorb_first(&mut self, operation: &mut RoutingTableOplogEntry, _other: &Self) {
match operation {
RoutingTableOplogEntry::Upsert(subnet, list) => {
self.table.insert(
expect_ipv6(subnet.address()),
subnet.prefix_len().into(),
Arc::clone(list),
);
}
RoutingTableOplogEntry::Queried(subnet, se) => {
// Mark a query only if we don't have a valid entry
let entry = self
.table
.exact_match(expect_ipv6(subnet.address()), subnet.prefix_len().into())
.map(Arc::deref);
// If we have no route, transition to query, if we have a route or existing query,
// do nothing
if matches!(entry, None | Some(SubnetEntry::NoRoute { .. })) {
self.table.insert(
expect_ipv6(subnet.address()),
subnet.prefix_len().into(),
Arc::clone(se),
);
}
}
RoutingTableOplogEntry::Delete(subnet) => {
self.table
.remove(expect_ipv6(subnet.address()), subnet.prefix_len().into());
}
RoutingTableOplogEntry::QueryExpired(subnet, nre) => {
if let Some(entry) = self
.table
.exact_match(expect_ipv6(subnet.address()), subnet.prefix_len().into())
{
if let SubnetEntry::Queried { .. } = &**entry {
self.table.insert(
expect_ipv6(subnet.address()),
subnet.prefix_len().into(),
Arc::clone(nre),
);
}
}
}
RoutingTableOplogEntry::NoRouteExpired(subnet) => {
if let Some(entry) = self
.table
.exact_match(expect_ipv6(subnet.address()), subnet.prefix_len().into())
{
if let SubnetEntry::NoRoute { .. } = &**entry {
self.table
.remove(expect_ipv6(subnet.address()), subnet.prefix_len().into());
}
}
}
}
}
fn sync_with(&mut self, first: &Self) {
for (k, ss, v) in first.table.iter() {
self.table.insert(k, ss, v.clone());
}
}
fn absorb_second(&mut self, operation: RoutingTableOplogEntry, _: &Self) {
match operation {
RoutingTableOplogEntry::Upsert(subnet, list) => {
self.table.insert(
expect_ipv6(subnet.address()),
subnet.prefix_len().into(),
list,
);
}
RoutingTableOplogEntry::Queried(subnet, se) => {
// Mark a query only if we don't have a valid entry
let entry = self
.table
.exact_match(expect_ipv6(subnet.address()), subnet.prefix_len().into())
.map(Arc::deref);
// If we have no route, transition to query, if we have a route or existing query,
// do nothing
if matches!(entry, None | Some(SubnetEntry::NoRoute { .. })) {
self.table.insert(
expect_ipv6(subnet.address()),
subnet.prefix_len().into(),
se,
);
}
}
RoutingTableOplogEntry::Delete(subnet) => {
self.table
.remove(expect_ipv6(subnet.address()), subnet.prefix_len().into());
}
RoutingTableOplogEntry::QueryExpired(subnet, nre) => {
if let Some(entry) = self
.table
.exact_match(expect_ipv6(subnet.address()), subnet.prefix_len().into())
{
if let SubnetEntry::Queried { .. } = &**entry {
self.table.insert(
expect_ipv6(subnet.address()),
subnet.prefix_len().into(),
nre,
);
}
}
}
RoutingTableOplogEntry::NoRouteExpired(subnet) => {
if let Some(entry) = self
.table
.exact_match(expect_ipv6(subnet.address()), subnet.prefix_len().into())
{
if let SubnetEntry::NoRoute { .. } = &**entry {
self.table
.remove(expect_ipv6(subnet.address()), subnet.prefix_len().into());
}
}
}
}
}
}
impl Drop for RoutingTableShared {
fn drop(&mut self) {
self.cancel_token.cancel();
}
}

View File

@@ -0,0 +1,100 @@
use std::{net::Ipv6Addr, sync::Arc};
use crate::subnet::Subnet;
use super::{subnet_entry::SubnetEntry, NoRouteSubnet, QueriedSubnet, RouteListReadGuard};
/// An iterator over a [`routing table`](super::RoutingTable) giving read only access to
/// [`RouteList`]'s.
pub struct RoutingTableIter<'a>(
ip_network_table_deps_treebitmap::Iter<'a, Ipv6Addr, Arc<SubnetEntry>>,
);
impl<'a> RoutingTableIter<'a> {
/// Create a new `RoutingTableIter` which will iterate over all entries in a [`RoutingTable`].
pub(super) fn new(
inner: ip_network_table_deps_treebitmap::Iter<'a, Ipv6Addr, Arc<SubnetEntry>>,
) -> Self {
Self(inner)
}
}
impl Iterator for RoutingTableIter<'_> {
type Item = (Subnet, RouteListReadGuard);
fn next(&mut self) -> Option<Self::Item> {
for (ip, prefix_size, rl) in self.0.by_ref() {
if let SubnetEntry::Exists { list } = &**rl {
return Some((
Subnet::new(ip.into(), prefix_size as u8)
.expect("Routing table contains valid subnets"),
RouteListReadGuard { inner: list.load() },
));
}
}
None
}
}
/// Iterator over queried routes in the routing table.
pub struct RoutingTableQueryIter<'a>(
ip_network_table_deps_treebitmap::Iter<'a, Ipv6Addr, Arc<SubnetEntry>>,
);
impl<'a> RoutingTableQueryIter<'a> {
/// Create a new `RoutingTableQueryIter` which will iterate over all queried entries in a [`RoutingTable`].
pub(super) fn new(
inner: ip_network_table_deps_treebitmap::Iter<'a, Ipv6Addr, Arc<SubnetEntry>>,
) -> Self {
Self(inner)
}
}
impl Iterator for RoutingTableQueryIter<'_> {
type Item = QueriedSubnet;
fn next(&mut self) -> Option<Self::Item> {
for (ip, prefix_size, rl) in self.0.by_ref() {
if let SubnetEntry::Queried { query_timeout } = &**rl {
return Some(QueriedSubnet::new(
Subnet::new(ip.into(), prefix_size as u8)
.expect("Routing table contains valid subnets"),
*query_timeout,
));
}
}
None
}
}
/// Iterator for entries which are explicitly marked as "no route"in the routing table.
pub struct RoutingTableNoRouteIter<'a>(
ip_network_table_deps_treebitmap::Iter<'a, Ipv6Addr, Arc<SubnetEntry>>,
);
impl<'a> RoutingTableNoRouteIter<'a> {
/// Create a new `RoutingTableNoRouteIter` which will iterate over all entries in a [`RoutingTable`]
/// which are explicitly marked as `NoRoute`
pub(super) fn new(
inner: ip_network_table_deps_treebitmap::Iter<'a, Ipv6Addr, Arc<SubnetEntry>>,
) -> Self {
Self(inner)
}
}
impl Iterator for RoutingTableNoRouteIter<'_> {
type Item = NoRouteSubnet;
fn next(&mut self) -> Option<Self::Item> {
for (ip, prefix_size, rl) in self.0.by_ref() {
if let SubnetEntry::NoRoute { expiry } = &**rl {
return Some(NoRouteSubnet::new(
Subnet::new(ip.into(), prefix_size as u8)
.expect("Routing table contains valid subnets"),
*expiry,
));
}
}
None
}
}

View File

@@ -0,0 +1,107 @@
use tokio::sync::mpsc;
use tokio_util::sync::CancellationToken;
use tracing::trace;
use crate::subnet::Subnet;
use super::{
subnet_entry::SubnetEntry, RouteKey, RouteList, RoutingTableInner, RoutingTableOplogEntry,
};
use std::{
net::Ipv6Addr,
sync::{Arc, MutexGuard},
};
/// An iterator over a [`routing table`](super::RoutingTable), yielding mutable access to the
/// entries in the table.
pub struct RoutingTableIterMut<'a, 'b> {
write_guard:
&'b mut MutexGuard<'a, left_right::WriteHandle<RoutingTableInner, RoutingTableOplogEntry>>,
iter: ip_network_table_deps_treebitmap::Iter<'b, Ipv6Addr, Arc<SubnetEntry>>,
expired_route_entry_sink: mpsc::Sender<RouteKey>,
cancel_token: CancellationToken,
}
impl<'a, 'b> RoutingTableIterMut<'a, 'b> {
pub(super) fn new(
write_guard: &'b mut MutexGuard<
'a,
left_right::WriteHandle<RoutingTableInner, RoutingTableOplogEntry>,
>,
iter: ip_network_table_deps_treebitmap::Iter<'b, Ipv6Addr, Arc<SubnetEntry>>,
expired_route_entry_sink: mpsc::Sender<RouteKey>,
cancel_token: CancellationToken,
) -> Self {
Self {
write_guard,
iter,
expired_route_entry_sink,
cancel_token,
}
}
/// Get the next item in this iterator. This is not implemented as the [`Iterator`] trait,
/// since we hand out items which are lifetime bound to this struct.
pub fn next<'c>(&'c mut self) -> Option<(Subnet, RoutingTableIterMutEntry<'a, 'c>)> {
for (ip, prefix_size, rl) in self.iter.by_ref() {
if matches!(&**rl, SubnetEntry::Exists { .. }) {
let subnet = Subnet::new(ip.into(), prefix_size as u8)
.expect("Routing table contains valid subnets");
return Some((
subnet,
RoutingTableIterMutEntry {
writer: self.write_guard,
store: Arc::clone(rl),
subnet,
expired_route_entry_sink: self.expired_route_entry_sink.clone(),
cancellation_token: self.cancel_token.clone(),
},
));
};
}
None
}
}
/// A smart pointer giving mutable access to a [`RouteList`].
pub struct RoutingTableIterMutEntry<'a, 'b> {
writer:
&'b mut MutexGuard<'a, left_right::WriteHandle<RoutingTableInner, RoutingTableOplogEntry>>,
/// Owned copy of the RouteList, this is populated once mutable access the the RouteList has
/// been requested.
store: Arc<SubnetEntry>,
/// The subnet we are writing to.
subnet: Subnet,
expired_route_entry_sink: mpsc::Sender<RouteKey>,
cancellation_token: CancellationToken,
}
impl RoutingTableIterMutEntry<'_, '_> {
/// Updates the routes for this entry
pub fn update_routes<F: FnMut(&mut RouteList, &mpsc::Sender<RouteKey>, &CancellationToken)>(
&mut self,
mut op: F,
) {
let mut delete = false;
if let SubnetEntry::Exists { list } = &*self.store {
list.rcu(|rl| {
let mut new_val = rl.clone();
let v = Arc::make_mut(&mut new_val);
op(v, &self.expired_route_entry_sink, &self.cancellation_token);
delete = v.is_empty();
new_val
});
if delete {
trace!(subnet = %self.subnet, "Queue subnet for deletion since route list is now empty");
self.writer
.append(RoutingTableOplogEntry::Delete(self.subnet));
}
}
}
}

View File

@@ -0,0 +1,35 @@
use tokio::time::Instant;
use crate::subnet::Subnet;
/// Information about a [`subnet`](Subnet) which is currently marked as NoRoute.
#[derive(Debug, Clone, Copy)]
pub struct NoRouteSubnet {
/// The subnet which has no route.
subnet: Subnet,
/// Time at which the entry expires. After this timeout expires, the entry is removed and a new
/// query can be performed.
entry_expires: Instant,
}
impl NoRouteSubnet {
/// Create a new `NoRouteSubnet` for the given [`subnet`](Subnet), expiring at the provided
/// [`time`](Instant).
pub fn new(subnet: Subnet, entry_expires: Instant) -> Self {
Self {
subnet,
entry_expires,
}
}
/// The [`subnet`](Subnet) for which there is no route.
pub fn subnet(&self) -> Subnet {
self.subnet
}
/// The moment this entry expires. Once this timeout expires, a new query can be launched for
/// route discovery for this [`subnet`](Subnet).
pub fn entry_expires(&self) -> Instant {
self.entry_expires
}
}

View File

@@ -0,0 +1,35 @@
use tokio::time::Instant;
use crate::subnet::Subnet;
/// Information about a [`subnet`](Subnet) which is currently in the queried state
#[derive(Debug, Clone, Copy)]
pub struct QueriedSubnet {
/// The subnet which was queried.
subnet: Subnet,
/// Time at which the query expires. If no feasible updates come in before this, the subnet is
/// marked as no route temporarily.
query_expires: Instant,
}
impl QueriedSubnet {
/// Create a new `QueriedSubnet` for the given [`subnet`](Subnet), expiring at the provided
/// [`time`](Instant).
pub fn new(subnet: Subnet, query_expires: Instant) -> Self {
Self {
subnet,
query_expires,
}
}
/// The [`subnet`](Subnet) being queried.
pub fn subnet(&self) -> Subnet {
self.subnet
}
/// The moment this query expires. If no route is discovered before this, the [`subnet`](Subnet)
/// is marked as no route temporarily.
pub fn query_expires(&self) -> Instant {
self.query_expires
}
}

Some files were not shown because too many files have changed in this diff Show More