Compare commits

...

57 Commits

Author SHA1 Message Date
Chip Senkbeil 3fe1fba339
Correct wget usage for installation 9 months ago
Chip Senkbeil 48f7eb74ec
Update readme example to use --daemon instead of & for background manager 10 months ago
Chip Senkbeil 96abcefdc5
Add extra debug logging when starting a manager 11 months ago
Chip Senkbeil 22f3c2dd76
Fix bugs in set permissions for CLI and distant-local 11 months ago
Chip Senkbeil 0320e7fe24
Bump to v0.20.0 11 months ago
Chip Senkbeil 9e48300e83
Fix zombies being leftover from distant launch manager://localhost when servers self-terminate 11 months ago
Chip Senkbeil e304e6a689
Fix shutting down killed connections from a manager 11 months ago
Chip Senkbeil 8972013716
Refactor capabilities to version for manager, integrate version checking for client/server/manager, and define protocol version (#219) 11 months ago
Chip Senkbeil 0efb5aee4c
Add --shell support to CLI (#218) 11 months ago
Chip Senkbeil 56b3b8f4f1
Fix CLI commands with --format json not outputting errors in JSON 11 months ago
Chip Senkbeil eb23b4e1ad
Fix win service 11 months ago
Chip Senkbeil dc7e9b5309
Bump to alpha.12 11 months ago
Chip Senkbeil e0b8769087
Fix return code of --help and --version on cli 11 months ago
Chip Senkbeil 9bc50886bb
Update latest tagging with custom code that uses a personal access token to trigger workflows 11 months ago
Chip Senkbeil bd3b068651
Add workflow to tag latest 11 months ago
Chip Senkbeil c61393750a
Bump minimum version of Rust to 1.70.0 11 months ago
Chip Senkbeil 2abaf0b814
Use sparse checkout during publish 11 months ago
Chip Senkbeil 0e03fc3011
Reintroduce checkout to publish step 11 months ago
Chip Senkbeil cb8ea0507f
Bump to 0.20.0-alpha.11 and restore ci tests 11 months ago
Chip Senkbeil 8a34fec1f7
Update README 11 months ago
Chip Senkbeil 6feeb2d012
Tweaking release config until it works 11 months ago
Chip Senkbeil fefbe19a3c
Switch to stripping using cargo and supporting a latest release tag 11 months ago
Chip Senkbeil be7a15caa0
Refactor generation commands to use --output for files and printing to stdout by default 11 months ago
Chip Senkbeil 84ea28402d
Add support for distant spawn -c 'cmd str' 11 months ago
Chip Senkbeil b74cba28df
Bump to v0.20.0-alpha.10 11 months ago
Chip Senkbeil f4180f6245
Change search default to not use standard filters, and provide options to set filters manually 11 months ago
Chip Senkbeil c250acdfb4
Fix search task exiting on failing to start a search with distant-local 11 months ago
Chip Senkbeil 1836f20a2a
Bump to 0.20.0-alpha.9 12 months ago
Chip Senkbeil 9096a7d81b
Fix destination username & password parsing to accept full character set 12 months ago
Chip Senkbeil 7c08495904
Switch to unbounded channels for `Reply` (#207) 12 months ago
Chip Senkbeil da75801639
Fix server hangup (#206) 12 months ago
Nagy Botond 8009cc9361
fix(parser): allow `-` (hyphen) to appear in usernames (#203) 12 months ago
Chip Senkbeil 4fb9045152
Support sequential batch processing (#201) 12 months ago
Chip Senkbeil efad345a0d
Add header support to request & response (#200) 12 months ago
Chip Senkbeil 6ba3ded188
Fix not serializing when only renamed set, reset field name to timestamp from ts 12 months ago
Chip Senkbeil c4c46f80a9
Remove Formatter code by inlining logic for search and watch 12 months ago
Chip Senkbeil 791a41c29e
Refactor Change to use single path & support renamed detail field (#196) 12 months ago
Chip Senkbeil a36263e7e1
Fix makefile 1 year ago
Chip Senkbeil 6f98e44723
Bump to alpha.8 1 year ago
Chip Senkbeil 72cc998595
Update change to include timestamp and details fields 1 year ago
Chip Senkbeil 4eaae55d53
Refactor to use debouncer for file watching and support configuration (#195) 1 year ago
Chip Senkbeil 9da7679081
Support alternative file watching implementation for MacOS 1 year ago
Chip Senkbeil 009996b554
Remove crossbeam-channel feature from notify dependency (https://github.com/notify-rs/notify/issues/380) 1 year ago
Chip Senkbeil b163094d49
Update to test READMEs 1 year ago
Chip Senkbeil 3225471e28
Add some basic readmes 1 year ago
Chip Senkbeil 9f345eb31b
Update changelog for v0.20.0-alpha.7 release 1 year ago
Chip Senkbeil e99329d9a9
Refactor local crate & update/clean dependencies (#191) 1 year ago
Chip Senkbeil 40c265e35b
Update changelog to reflect new manager service install feature 1 year ago
Chip Senkbeil af903013f6
Support installing manager service with custom arguments 1 year ago
Chip Senkbeil 76dc7cf1fa
Refactor into protocol crate & change capabilities -> version (#189) 1 year ago
Chip Senkbeil 95c0d0c0d1
Fix bad test reference 1 year ago
Chip Senkbeil 528dea0917
Fix windows old auth reference 1 year ago
Chip Senkbeil 8cf7f11269
Refactor authentication into distant-auth 1 year ago
Chip Senkbeil 2042684c97
Update changelog with --lsp change 1 year ago
Chip Senkbeil 31aff1e282
Refactor --lsp [SCHEME] to just take the scheme and not the :// 1 year ago
Chip Senkbeil ea0424e2f4
Feat: set permissions support (#184) 1 year ago
Chip Senkbeil 137b4dc289
Bump to 0.20.0-alpha.7 as next version 1 year ago

@ -4,11 +4,13 @@ on:
push:
paths-ignore:
- '**.md'
- 'Makefile.toml'
branches:
- master
pull_request:
paths-ignore:
- '**.md'
- 'Makefile.toml'
branches:
- master
@ -38,16 +40,12 @@ jobs:
toolchain: stable
components: clippy
- uses: Swatinem/rust-cache@v2
with:
key: "ci-clippy-${{ matrix.os }}"
- name: Check Cargo availability
run: cargo --version
- name: distant-net (all features)
run: cargo clippy -p distant-net --all-targets --verbose --all-features
- name: distant-core (all features)
run: cargo clippy -p distant-core --all-targets --verbose --all-features
- name: distant-ssh2 (all features)
run: cargo clippy -p distant-ssh2 --all-targets --verbose --all-features
- name: distant (all features)
run: cargo clippy --all-targets --verbose --all-features
- name: Run clippy (all features)
run: cargo clippy --workspace --all-targets --verbose --all-features
rustfmt:
name: "Verify code formatting (${{ matrix.os }})"
runs-on: ${{ matrix.os }}
@ -71,6 +69,8 @@ jobs:
toolchain: stable
components: rustfmt
- uses: Swatinem/rust-cache@v2
with:
key: "ci-rustfmt-${{ matrix.os }}"
- name: Check Cargo availability
run: cargo --version
- run: cargo fmt --all -- --check
@ -87,7 +87,7 @@ jobs:
- { rust: stable, os: windows-latest, target: x86_64-pc-windows-msvc }
- { rust: stable, os: macos-latest }
- { rust: stable, os: ubuntu-latest }
- { rust: 1.64.0, os: ubuntu-latest }
- { rust: 1.70.0, os: ubuntu-latest }
steps:
- uses: actions/checkout@v3
- name: Install Rust ${{ matrix.rust }}
@ -100,6 +100,8 @@ jobs:
with:
tool: cargo-nextest@0.9.45
- uses: Swatinem/rust-cache@v2
with:
key: "ci-tests-${{ matrix.os }}-${{ matrix.rust }}-${{ matrix.target }}"
- name: Check Cargo availability
run: cargo --version
- uses: nick-fields/retry@v2
@ -155,56 +157,13 @@ jobs:
New-ItemProperty -Path $registryPath -Name $name -Value $value -PropertyType String -Force
}
- name: Extend Windows retry count to be more resilient
if: matrix.os == 'windows-latest'
run: echo "NEXTEST_RETRIES=9" >> $GITHUB_ENV
shell: bash
- name: Run net tests (default features)
run: cargo nextest run --profile ci --release -p distant-net
- name: Build core (default features)
run: cargo build --release -p distant-core
- name: Run core tests (all features)
run: cargo nextest run --profile ci --release --all-features -p distant-core
if: matrix.os == 'windows-latest'
- name: Ensure /run/sshd exists on Unix
run: mkdir -p /run/sshd
if: matrix.os == 'ubuntu-latest'
- name: Build ssh2 (default features)
run: cargo build --release -p distant-ssh2
- name: Run ssh2 client tests (all features)
run: cargo nextest run --profile ci --release --all-features -p distant-ssh2 ssh2::client
- name: Build CLI (no default features)
run: cargo build --release --no-default-features
- name: Build CLI (default features)
run: cargo build --release
- name: Run CLI tests (all features)
run: cargo nextest run --profile ci --release --all-features
ssh-launch-tests:
name: "Test ssh launch using Rust ${{ matrix.rust }} on ${{ matrix.os }}"
runs-on: ${{ matrix.os }}
env:
RUSTFLAGS: --cfg ci
RUST_LOG: trace
strategy:
fail-fast: false
matrix:
include:
- { rust: stable, os: macos-latest }
- { rust: stable, os: ubuntu-latest }
steps:
- uses: actions/checkout@v3
- name: Install Rust ${{ matrix.rust }}
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: ${{ matrix.rust }}
- uses: taiki-e/install-action@v1
with:
tool: cargo-nextest@0.9.45
- uses: Swatinem/rust-cache@v2
- name: Check Cargo availability
run: cargo --version
- name: Install distant cli for use in launch tests
run: |
cargo install --path .
echo "DISTANT_PATH=$HOME/.cargo/bin/distant" >> $GITHUB_ENV
- name: Run ssh2 launch tests (all features)
run: cargo nextest run --profile ci --release --all-features -p distant-ssh2 ssh2::launched
- name: Run all workspace tests (all features)
run: cargo nextest run --profile ci --release --all-features --workspace
- name: Run all doc tests (all features)
run: cargo test --release --all-features --workspace --doc

@ -0,0 +1,24 @@
name: 'Tag latest'
on:
push:
branches:
- master
jobs:
action:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Tag latest and push
env:
GITHUB_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
run: |
git config user.name "${GITHUB_ACTOR}"
git config user.email "${GITHUB_ACTOR}@users.noreply.github.com"
origin_url="$(git config --get remote.origin.url)"
origin_url="${origin_url/#https:\/\//https:\/\/$GITHUB_TOKEN@}" # add token to URL
git tag latest --force
git push "$origin_url" --tags --force

@ -0,0 +1,28 @@
name: 'Lock Threads'
on:
schedule:
- cron: '0 3 * * *'
workflow_dispatch:
permissions:
issues: write
pull-requests: write
concurrency:
group: lock
jobs:
action:
runs-on: ubuntu-latest
steps:
- uses: dessant/lock-threads@v4
with:
issue-inactive-days: '30'
issue-comment: >
I'm going to lock this issue because it has been closed for _30 days_ ⏳.
This helps our maintainers find and focus on the active issues.
If you have found a problem that seems similar to this, please open a new
issue and complete the issue template so we can capture all the details
necessary to investigate further.
process-only: 'issues'

@ -5,402 +5,312 @@ on:
tags:
- v[0-9]+.[0-9]+.[0-9]+
- v[0-9]+.[0-9]+.[0-9]+-**
- latest
# Status of Targets:
#
# ✅ x86_64-apple-darwin
# ✅ aarch64-apple-darwin
#
# ✅ x86_64-pc-windows-msvc
# ✅ aarch64-pc-windows-msvc
#
# ✅ x86_64-unknown-linux-gnu
# ✅ aarch64-unknown-linux-gnu
# ❌ aarch64-linux-android (fails due to termios)
# ✅ armv7-unknown-linux-gnueabihf
#
# ✅ x86_64-unknown-linux-musl
# ✅ aarch64-unknown-linux-musl
#
# ✅ x86_64-unknown-freebsd
# ❓ aarch64-unknown-freebsd (works manually, but cannot cross-compile via CI)
#
# ❌ x86_64-unknown-netbsd (fails due to termios)
# ❌ aarch64-unknown-netbsd (???)
#
# ❌ x86_64-unknown-openbsd (fails due to rustc internal error at end)
# ❌ aarch64-unknown-openbsd (fails due to openssl-src)
#
jobs:
macos:
name: "Build release on MacOS"
name: "Build release on MacOS (${{ matrix.target }})"
runs-on: macos-11.0
if: startsWith(github.ref, 'refs/tags/')
env:
UPLOAD_NAME: macos
X86_ARCH: x86_64-apple-darwin
ARM_ARCH: aarch64-apple-darwin
X86_DIR: target/x86_64-apple-darwin/release
ARM_DIR: target/aarch64-apple-darwin/release
BUILD_BIN: distant
UNIVERSAL_REL_BIN: distant-macos
strategy:
matrix:
target:
- x86_64-apple-darwin
- aarch64-apple-darwin
steps:
- uses: actions/checkout@v3
- name: Install Rust (x86)
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
target: ${{ env.X86_ARCH }}
- name: Install Rust (ARM)
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
target: ${{ env.ARM_ARCH }}
- uses: Swatinem/rust-cache@v2
- name: Build binary (x86_64)
run: |
cargo build --release --all-features --target ${{ env.X86_ARCH }}
ls -l ./${{ env.X86_DIR }}
strip ./${{ env.X86_DIR }}/${{ env.BUILD_BIN }}
- name: Build binary (aarch64)
run: |
cargo build --release --all-features --target ${{ env.ARM_ARCH }}
ls -l ./${{ env.ARM_DIR }}
strip ./${{ env.ARM_DIR }}/${{ env.BUILD_BIN }}
- name: Unify binaries
run: |
lipo -create -output ${{ env.UNIVERSAL_REL_BIN }} \
./${{ env.X86_DIR }}/${{ env.BUILD_BIN }} \
./${{ env.ARM_DIR }}/${{ env.BUILD_BIN }}
chmod +x ./${{ env.UNIVERSAL_REL_BIN }}
- name: Upload
uses: actions/upload-artifact@v2
with:
name: ${{ env.UPLOAD_NAME }}
path: |
${{ env.UNIVERSAL_REL_BIN }}
windows:
name: "Build release on Windows"
runs-on: windows-latest
if: startsWith(github.ref, 'refs/tags/')
env:
UPLOAD_NAME: win64
X86_ARCH: x86_64-pc-windows-msvc
X86_DIR: target/x86_64-pc-windows-msvc/release
BUILD_BIN: distant.exe
X86_REL_BIN: distant-win64.exe
steps:
- uses: actions/checkout@v2
- name: Install Rust (MSVC)
- name: Install Rust (${{ matrix.target }})
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
target: ${{ env.X86_ARCH }}
- uses: Swatinem/rust-cache@v2
- name: Build binary (x86_64)
run: |
cargo build --release --all-features --target ${{ env.X86_ARCH }}
ls -l ./${{ env.X86_DIR }}
strip ./${{ env.X86_DIR }}/${{ env.BUILD_BIN }}
mv ./${{ env.X86_DIR }}/${{ env.BUILD_BIN }} ./${{ env.X86_REL_BIN }}
chmod +x ./${{ env.X86_REL_BIN }}
- name: Upload
uses: actions/upload-artifact@v2
with:
name: ${{ env.UPLOAD_NAME }}
path: |
${{ env.X86_REL_BIN }}
linux_gnu_x86:
name: "Build release on Linux (GNU x86)"
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
env:
UPLOAD_NAME: linux64-gnu-x86
X86_GNU_ARCH: x86_64-unknown-linux-gnu
X86_GNU_DIR: target/x86_64-unknown-linux-gnu/release
BUILD_BIN: distant
X86_GNU_REL_BIN: distant-linux64-gnu-x86
steps:
- uses: actions/checkout@v2
- name: Install Rust (GNU x86)
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
target: ${{ env.X86_GNU_ARCH }}
- uses: Swatinem/rust-cache@v2
- name: Build binary (GNU x86_64)
run: |
cargo build --release --all-features --target ${{ env.X86_GNU_ARCH }}
ls -l ./${{ env.X86_GNU_DIR }}
strip ./${{ env.X86_GNU_DIR }}/${{ env.BUILD_BIN }}
mv ./${{ env.X86_GNU_DIR }}/${{ env.BUILD_BIN }} ./${{ env.X86_GNU_REL_BIN }}
chmod +x ./${{ env.X86_GNU_REL_BIN }}
- name: Upload
uses: actions/upload-artifact@v2
with:
name: ${{ env.UPLOAD_NAME }}
path: |
${{ env.X86_GNU_REL_BIN }}
linux_gnu_aarch64:
name: "Build release on Linux (GNU aarch64)"
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
env:
UPLOAD_NAME: linux64-gnu-aarch64
AARCH64_GNU_ARCH: aarch64-unknown-linux-gnu
AARCH64_GNU_DIR: target/aarch64-unknown-linux-gnu/release
BUILD_BIN: distant
AARCH64_GNU_REL_BIN: distant-linux64-gnu-aarch64
steps:
- uses: actions/checkout@v2
- name: Install Rust (GNU aarch64)
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
target: ${{ env.AARCH64_GNU_ARCH }}
target: ${{ matrix.target }}
override: true
- uses: Swatinem/rust-cache@v2
- name: Install linker & binutils (gcc-aarch64-linux-gnu)
- name: Build binary (${{ matrix.target }})
run: |
sudo apt update
sudo apt install -y gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu
- name: Build binary (GNU aarch64)
run: |
cargo build --release --all-features --target ${{ env.AARCH64_GNU_ARCH }}
ls -l ./${{ env.AARCH64_GNU_DIR }}
/usr/aarch64-linux-gnu/bin/strip ./${{ env.AARCH64_GNU_DIR }}/${{ env.BUILD_BIN }}
mv ./${{ env.AARCH64_GNU_DIR }}/${{ env.BUILD_BIN }} ./${{ env.AARCH64_GNU_REL_BIN }}
chmod +x ./${{ env.AARCH64_GNU_REL_BIN }}
cargo build --release --all-features --target ${{ matrix.target }}
mv ./target/${{ matrix.target }}/release/distant ./distant-${{ matrix.target }}
chmod +x ./distant-${{ matrix.target }}
- name: Upload
uses: actions/upload-artifact@v2
with:
name: ${{ env.UPLOAD_NAME }}
path: |
${{ env.AARCH64_GNU_REL_BIN }}
name: ${{ matrix.target }}
path: ./distant-${{ matrix.target }}
if-no-files-found: error
retention-days: 5
linux_gnu_arm_v7:
name: "Build release on Linux (GNU arm-v7)"
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
env:
UPLOAD_NAME: linux64-gnu-arm-v7
ARMV7_GNU_ARCH: armv7-unknown-linux-gnueabihf
ARMV7_GNU_DIR: target/armv7-unknown-linux-gnueabihf/release
BUILD_BIN: distant
ARMV7_GNU_REL_BIN: distant-linux64-gnu-arm-v7
macos_unify:
name: "Build universal binary on MacOS"
needs: [macos]
runs-on: macos-11.0
steps:
- uses: actions/checkout@v2
- name: Install Rust (GNU arm-v7)
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
target: ${{ env.ARMV7_GNU_ARCH }}
- uses: Swatinem/rust-cache@v2
- name: Install linker & binutils (gcc-arm-linux-gnueabihf)
run: |
sudo apt update
sudo apt install -y gcc-arm-linux-gnueabihf binutils-arm-linux-gnueabihf
- name: Build binary (GNU arm-v7)
- uses: actions/download-artifact@v2
- name: Unify binaries
run: |
cargo build --release --all-features --target ${{ env.ARMV7_GNU_ARCH }}
ls -l ./${{ env.ARMV7_GNU_DIR }}
/usr/arm-linux-gnueabihf/bin/strip ./${{ env.ARMV7_GNU_DIR }}/${{ env.BUILD_BIN }}
mv ./${{ env.ARMV7_GNU_DIR }}/${{ env.BUILD_BIN }} ./${{ env.ARMV7_GNU_REL_BIN }}
chmod +x ./${{ env.ARMV7_GNU_REL_BIN }}
lipo -create -output distant-universal-apple-darwin \
./x86_64-apple-darwin/distant-x86_64-apple-darwin \
./aarch64-apple-darwin/distant-aarch64-apple-darwin
chmod +x ./distant-universal-apple-darwin
- name: Upload
uses: actions/upload-artifact@v2
with:
name: ${{ env.UPLOAD_NAME }}
path: |
${{ env.ARMV7_GNU_REL_BIN }}
name: universal-apple-darwin
path: ./distant-universal-apple-darwin
if-no-files-found: error
retention-days: 5
# NOTE: For musl, we only support ssh2 and not libssh for the time being due to some
# build issue with libssh-rs-sys not finding the symbol ENGINE_cleanup in libcrypto
linux_musl_x86:
name: "Build release on Linux (musl x86)"
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
env:
UPLOAD_NAME: linux64-musl-x86
X86_MUSL_ARCH: x86_64-unknown-linux-musl
X86_MUSL_DIR: target/x86_64-unknown-linux-musl/release
BUILD_BIN: distant
X86_MUSL_REL_BIN: distant-linux64-musl-x86
windows:
name: "Build release on Windows (${{ matrix.target }})"
runs-on: windows-latest
strategy:
matrix:
target:
- x86_64-pc-windows-msvc
- aarch64-pc-windows-msvc
steps:
- uses: actions/checkout@v2
- name: Install Rust (MUSL x86)
- name: Install Rust (${{ matrix.target }})
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
target: ${{ env.X86_MUSL_ARCH }}
target: ${{ matrix.target }}
override: true
- name: Install musl tools
run: |
sudo apt update
sudo apt install -y musl-tools
- uses: Swatinem/rust-cache@v2
- name: Build binary (MUSL x86_64)
- name: Build binary (${{ matrix.target }})
run: |
cargo build --release --no-default-features --features ssh2 --target ${{ env.X86_MUSL_ARCH }}
ls -l ./${{ env.X86_MUSL_DIR }}
strip ./${{ env.X86_MUSL_DIR }}/${{ env.BUILD_BIN }}
mv ./${{ env.X86_MUSL_DIR }}/${{ env.BUILD_BIN }} ./${{ env.X86_MUSL_REL_BIN }}
chmod +x ./${{ env.X86_MUSL_REL_BIN }}
cargo build --release --all-features --target ${{ matrix.target }}
mv ./target/${{ matrix.target }}/release/distant.exe ./distant-${{ matrix.target }}.exe
chmod +x ./distant-${{ matrix.target }}.exe
- name: Upload
uses: actions/upload-artifact@v2
with:
name: ${{ env.UPLOAD_NAME }}
path: |
${{ env.X86_MUSL_REL_BIN }}
name: ${{ matrix.target }}
path: ./distant-${{ matrix.target }}.exe
if-no-files-found: error
retention-days: 5
# NOTE: For musl, we only support ssh2 and not libssh for the time being due to some
# build issue with libssh-rs-sys not finding the symbol ENGINE_cleanup in libcrypto
linux_musl_aarch64:
name: "Build release on Linux (musl aarch64)"
linux:
name: "Build release on Linux (${{ matrix.target }})"
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
env:
UPLOAD_NAME: linux64-musl-aarch64
AARCH64_MUSL_ARCH: aarch64-unknown-linux-musl
AARCH64_MUSL_DIR: target/aarch64-unknown-linux-musl/release
BUILD_BIN: distant
AARCH64_MUSL_REL_BIN: distant-linux64-musl-aarch64
strategy:
matrix:
include:
- target: x86_64-unknown-linux-gnu
build: --all-features
cargo: cargo
- target: aarch64-unknown-linux-gnu
build: --all-features
deps: gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu
cargo: cargo
- target: armv7-unknown-linux-gnueabihf
build: --all-features
deps: gcc-arm-linux-gnueabihf binutils-arm-linux-gnueabihf
cargo: cargo
- target: x86_64-unknown-linux-musl
build: --no-default-features --features ssh2
deps: musl-tools
cargo: cargo
- target: aarch64-unknown-linux-musl
build: --no-default-features --features ssh2
deps: musl-tools gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu
cargo: cross
prepare: |
curl -L "https://github.com/cross-rs/cross/releases/download/v0.2.5/cross-x86_64-unknown-linux-musl.tar.gz" |
tar xz -C $HOME/.cargo/bin
- target: x86_64-unknown-freebsd
build: --all-features
cargo: cross
prepare: |
curl -L "https://github.com/cross-rs/cross/releases/download/v0.2.5/cross-x86_64-unknown-linux-musl.tar.gz" |
tar xz -C $HOME/.cargo/bin
steps:
- uses: actions/checkout@v2
- name: Install Rust (MUSL aarch64)
- name: Install Rust (${{ matrix.target }})
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
target: ${{ env.AARCH64_MUSL_ARCH }}
target: ${{ matrix.target }}
override: true
- name: Install musl tools
- uses: Swatinem/rust-cache@v2
- name: Install dependencies
if: ${{ matrix.deps }}
run: |
sudo apt update
sudo apt install -y musl-tools gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu
- name: Install cross
env:
LINK: https://github.com/cross-rs/cross/releases/download
CROSS_VERSION: 0.2.4
CROSS_FILE: cross-x86_64-unknown-linux-musl
run: |
curl -L "$LINK/v$CROSS_VERSION/$CROSS_FILE.tar.gz" |
tar xz -C $HOME/.cargo/bin
- uses: Swatinem/rust-cache@v2
- name: Build binary (MUSL aarch64)
sudo apt install -y ${{ matrix.deps }}
- name: Preparing system
if: ${{ matrix.prepare }}
run: ${{ matrix.prepare }}
- name: Build binary (${{ matrix.target }})
run: |
cross build --release --no-default-features --features ssh2 --target ${{ env.AARCH64_MUSL_ARCH }}
ls -l ./${{ env.AARCH64_MUSL_DIR }}
aarch64-linux-gnu-strip ./${{ env.AARCH64_MUSL_DIR }}/${{ env.BUILD_BIN }}
mv ./${{ env.AARCH64_MUSL_DIR }}/${{ env.BUILD_BIN }} ./${{ env.AARCH64_MUSL_REL_BIN }}
chmod +x ./${{ env.AARCH64_MUSL_REL_BIN }}
${{ matrix.cargo }} build --release ${{ matrix.build }} --target ${{ matrix.target }}
mv ./target/${{ matrix.target }}/release/distant ./distant-${{ matrix.target }}
chmod +x ./distant-${{ matrix.target }}
- name: Upload
uses: actions/upload-artifact@v2
with:
name: ${{ env.UPLOAD_NAME }}
path: |
${{ env.AARCH64_MUSL_REL_BIN }}
name: ${{ matrix.target }}
path: ./distant-${{ matrix.target }}
if-no-files-found: error
retention-days: 5
# bsd:
# name: "Build release on ${{ matrix.os.name }} (${{ matrix.os.target }})"
# runs-on: ${{ matrix.os.host }}
# strategy:
# matrix:
# os:
# - name: freebsd
# architecture: x86-64
# version: '13.2'
# host: macos-12
# target: x86_64-unknown-freebsd
# build: --all-features
# prepare: sudo pkg install -y openssl gmake lang/rust devel/llvm-devel
# - name: netbsd
# architecture: x86-64
# version: '9.3'
# host: macos-12
# target: x86_64-unknown-netbsd
# build: --all-features
# prepare: |
# PATH="/usr/pkg/sbin:/usr/pkg/bin:$PATH"
# PKG_PATH="https://cdn.NetBSD.org/pub/pkgsrc/packages"
# PKG_PATH="$PKG_PATH/NetBSD/x86_64/9.3/All/"
# export PATH PKG_PATH
# sudo -E pkg_add -I gmake rust
# cargo update --dry-run
# - name: openbsd
# architecture: x86-64
# version: '7.3'
# host: macos-12
# target: x86_64-unknown-openbsd
# build: --all-features
# prepare: |
# sudo pkg_add -I gmake rust llvm
# sed -i 's/lto = true/lto = false/' Cargo.toml
# steps:
# - uses: actions/checkout@v3
# - uses: Swatinem/rust-cache@v2
# - name: Build in VM
# uses: cross-platform-actions/action@v0.15.0
# env:
# CARGO_INCREMENTAL: 0
# with:
# environment_variables: CARGO_INCREMENTAL
# operating_system: ${{ matrix.os.name }}
# architecture: ${{ matrix.os.architecture }}
# version: ${{ matrix.os.version }}
# shell: bash
# run: |
# ${{ matrix.os.prepare }}
# cargo build --release ${{ matrix.os.build }} --target ${{ matrix.os.target }}
# mv ./target/${{ matrix.os.target }}/release/distant ./distant-${{ matrix.os.target }}
# chmod +x ./distant-${{ matrix.os.target }}
# - name: Upload
# uses: actions/upload-artifact@v2
# with:
# name: ${{ matrix.os.target }}
# path: ./distant-${{ matrix.os.target }}
# if-no-files-found: error
# retention-days: 5
publish:
needs: [macos, windows, linux_gnu_x86, linux_gnu_aarch64, linux_gnu_arm_v7, linux_musl_x86, linux_musl_aarch64]
needs: [macos, macos_unify, windows, linux]
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
permissions:
contents: write
env:
MACOS: macos
MACOS_UNIVERSAL_BIN: distant-macos
WIN64: win64
WIN64_BIN: distant-win64.exe
LINUX64_GNU_X86: linux64-gnu-x86
LINUX64_GNU_X86_BIN: distant-linux64-gnu-x86
LINUX64_GNU_AARCH64: linux64-gnu-aarch64
LINUX64_GNU_AARCH64_BIN: distant-linux64-gnu-aarch64
LINUX64_GNU_ARMV7: linux64-gnu-arm-v7
LINUX64_GNU_ARMV7_BIN: distant-linux64-gnu-arm-v7
LINUX64_MUSL_X86: linux64-musl-x86
LINUX64_MUSL_X86_BIN: distant-linux64-musl-x86
LINUX64_MUSL_AARCH64: linux64-musl-aarch64
LINUX64_MUSL_AARCH64_BIN: distant-linux64-musl-aarch64
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
sparse-checkout: |
CHANGELOG.md
sparse-checkout-cone-mode: false
- uses: actions/download-artifact@v2
- name: Generate MacOS SHA256 checksums
run: |
cd ${{ env.MACOS }}
sha256sum ${{ env.MACOS_UNIVERSAL_BIN }} > ${{ env.MACOS_UNIVERSAL_BIN }}.sha256sum
echo "SHA_MACOS_BIN=$(cat ${{ env.MACOS_UNIVERSAL_BIN }}.sha256sum)" >> $GITHUB_ENV
- name: Generate Win64 SHA256 checksums
run: |
cd ${{ env.WIN64 }}
sha256sum ${{ env.WIN64_BIN }} > ${{ env.WIN64_BIN }}.sha256sum
echo "SHA_WIN64_BIN=$(cat ${{ env.WIN64_BIN }}.sha256sum)" >> $GITHUB_ENV
- name: Generate Linux64 (gnu x86) SHA256 checksums
run: |
cd ${{ env.LINUX64_GNU_X86 }}
sha256sum ${{ env.LINUX64_GNU_X86_BIN }} > ${{ env.LINUX64_GNU_X86_BIN }}.sha256sum
echo "SHA_LINUX64_GNU_X86_BIN=$(cat ${{ env.LINUX64_GNU_X86_BIN }}.sha256sum)" >> $GITHUB_ENV
- name: Generate Linux64 (gnu aarch64) SHA256 checksums
- name: Generate SHA256 checksums
run: |
cd ${{ env.LINUX64_GNU_AARCH64 }}
sha256sum ${{ env.LINUX64_GNU_AARCH64_BIN }} > ${{ env.LINUX64_GNU_AARCH64_BIN }}.sha256sum
echo "SHA_LINUX64_GNU_AARCH64_BIN=$(cat ${{ env.LINUX64_GNU_AARCH64_BIN }}.sha256sum)" >> $GITHUB_ENV
- name: Generate Linux64 (gnu arm-v7) SHA256 checksums
run: |
cd ${{ env.LINUX64_GNU_ARMV7 }}
sha256sum ${{ env.LINUX64_GNU_ARMV7_BIN }} > ${{ env.LINUX64_GNU_ARMV7_BIN }}.sha256sum
echo "SHA_LINUX64_GNU_ARMV7_BIN=$(cat ${{ env.LINUX64_GNU_ARMV7_BIN }}.sha256sum)" >> $GITHUB_ENV
- name: Generate Linux64 (musl x86) SHA256 checksums
run: |
cd ${{ env.LINUX64_MUSL_X86 }}
sha256sum ${{ env.LINUX64_MUSL_X86_BIN }} > ${{ env.LINUX64_MUSL_X86_BIN }}.sha256sum
echo "SHA_LINUX64_MUSL_X86_BIN=$(cat ${{ env.LINUX64_MUSL_X86_BIN }}.sha256sum)" >> $GITHUB_ENV
- name: Generate Linux64 (musl aarch64) SHA256 checksums
run: |
cd ${{ env.LINUX64_MUSL_AARCH64 }}
sha256sum ${{ env.LINUX64_MUSL_AARCH64_BIN }} > ${{ env.LINUX64_MUSL_AARCH64_BIN }}.sha256sum
echo "SHA_LINUX64_MUSL_AARCH64_BIN=$(cat ${{ env.LINUX64_MUSL_AARCH64_BIN }}.sha256sum)" >> $GITHUB_ENV
for i in $(find . -name "distant-*" -type f); do
echo "Generating checksum for ${i}"
sha256sum "${i}" > "${i}.sha256sum"
done
- name: Determine git tag
if: github.event_name == 'push'
run: |
TAG_NAME=${{ github.ref }}
echo "TAG_NAME=${TAG_NAME#refs/tags/}" >> $GITHUB_ENV
echo "TAG_VERSION=${TAG_NAME#refs/tags/v}" >> $GITHUB_ENV
- name: Check git tag for pre-release
- name: Check git tag for pre-release or latest
id: check-tag
run: |
if [[ ${{ github.ref }} =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+-.*$ ]]; then
echo ::set-output name=match::true
echo "is_prerelease=true" >> $GITHUB_OUTPUT
elif [[ ${{ github.ref }} =~ ^refs/tags/latest$ ]]; then
echo "is_latest=true" >> $GITHUB_OUTPUT
fi
- name: Print pre-release status
run: |
echo "Is ${{ github.ref }} a pre-release: ${{ steps.check-tag.outputs.match }}"
echo "Is ${{ github.ref }} pre-release: ${{ steps.check-tag.outputs.is_prerelease }}"
echo "Is ${{ github.ref }} latest: ${{ steps.check-tag.outputs.is_latest }}"
- name: Get Changelog Entry
id: changelog
uses: mindsers/changelog-reader-action@v2
with:
version: ${{ env.TAG_VERSION }}
path: "./CHANGELOG.md"
- name: Publish
if: ${{ steps.check-tag.outputs.is_latest != 'true' }}
- name: Publish (latest)
if: ${{ steps.check-tag.outputs.is_latest == 'true' }}
uses: softprops/action-gh-release@v1
with:
name: Latest Build
fail_on_unmatched_files: true
target_commitish: ${{ github.sha }}
draft: false
prerelease: true
files: |
**/distant-*
body: |
This is the latest commit (${{ github.sha }}) built for testing.
This is not guaranteed to pass all tests or even function properly.
- name: Publish (release)
if: ${{ steps.check-tag.outputs.is_latest != 'true' }}
uses: softprops/action-gh-release@v1
with:
name: distant ${{ env.TAG_NAME }}
fail_on_unmatched_files: true
target_commitish: ${{ github.sha }}
draft: false
prerelease: ${{ steps.check-tag.outputs.match == 'true' }}
prerelease: ${{ steps.check-tag.outputs.is_prerelease == 'true' }}
files: |
${{ env.MACOS }}/${{ env.MACOS_UNIVERSAL_BIN }}
${{ env.WIN64 }}/${{ env.WIN64_BIN }}
${{ env.LINUX64_GNU_X86 }}/${{ env.LINUX64_GNU_X86_BIN }}
${{ env.LINUX64_GNU_AARCH64 }}/${{ env.LINUX64_GNU_AARCH64_BIN }}
${{ env.LINUX64_GNU_ARMV7 }}/${{ env.LINUX64_GNU_ARMV7_BIN }}
${{ env.LINUX64_MUSL_X86 }}/${{ env.LINUX64_MUSL_X86_BIN }}
${{ env.LINUX64_MUSL_AARCH64 }}/${{ env.LINUX64_MUSL_AARCH64_BIN }}
**/*.sha256sum
**/distant-*
body: |
## Release Notes
${{ steps.changelog.outputs.changes }}
## Binaries
Standalone binaries are built out for Windows (x86_64), MacOS (Intel & ARM), and Linux (x86_64, aarch64, armv7).
- **linux64-gnu-x86** is the x86-64 release on Linux using libc
- **linux64-gnu-aarch64** is the aarch64 release on Linux using libc
- **linux64-gnu-arm-v7** is the arm-v7 release on Linux using libc (for Raspberry PI)
- **linux64-musl-x86** is the x86-64 release on Linux using musl (static binary, no libc dependency)
- **linux64-musl-aarch64** is the aarch64 release on Linux using musl (static binary, no libc dependency)
- **macos** is a universal binary for Mac OS that supports x86-64 and aarch64 (ARM) platforms
- **win64** is the x86-64 release on Windows using MSVC
## SHA256 Checksums
```
${{ env.SHA_MACOS_BIN }}
${{ env.SHA_WIN64_BIN }}
${{ env.SHA_LINUX64_GNU_X86_BIN }}
${{ env.SHA_LINUX64_GNU_AARCH64_BIN }}
${{ env.SHA_LINUX64_GNU_ARMV7_BIN }}
${{ env.SHA_LINUX64_MUSL_X86_BIN }}
${{ env.SHA_LINUX64_MUSL_AARCH64_BIN }}
```

@ -7,6 +7,221 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
### Fixed
- Bug in `distant fs set-permissions` where partial permissions such as `go-w`
would result in clearing all permissions
- Bug in `distant-local` implementation of `SetPermissions` where read-only
status was being set/cleared prior to Unix permissions being applied,
resulting in applying an invalid change to the permissions
## [0.20.0]
All changes described in these alpha releases:
- [Alpha 13](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.13)
- [Alpha 12](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.12)
- [Alpha 11](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.11)
- [Alpha 10](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.10)
- [Alpha 9](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.9)
- [Alpha 8](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.8)
- [Alpha 7](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.7)
- [Alpha 6](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.6)
- [Alpha 5](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.5)
- [Alpha 4](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.4)
- [Alpha 3](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.3)
- [Alpha 2](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.2)
- [Alpha 1](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.1)
### Fixed
- When terminating a connection using `distant manager kill`, the connection is
now properly dropped, resulting servers waiting to terminate due to
`--shutdown lonely=N` to now shutdown accordingly
- Zombies from spawned servers via `distant launch manager://localhost` are now
properly terminated by checking the exit status of processes
## [0.20.0-alpha.13]
### Added
- Support for `--shell` with optional path to an explicit shell as an option
when executing `distant spawn` in order to run the command within a shell
rather than directly
- `semver` crate to be used for version information in protocol and manager
- `is_compatible_with` function to root of `distant-protocol` crate that checks
if a provided version is compatible with the protocol
### Changed
- `distant_protocol::PROTOCOL_VERSION` now uses the crate's major, minor, and
patch version at compile-time (parsed via `const-str` crate) to streamline
version handling between crate and protocol
- Protocol and manager now supply a version request instead of capabilities and
the capabilities of protocol are now a `Vec<String>` to contain a set of more
broad capabilities instead of every possible request type
### Fixed
- CLI commands like `distant manager select` will now output errors in a JSON
format when configured to communicate using JSON
- `distant-ssh2` no longer caches the remote family globally, but instead
caches it per `Ssh` instance
### Removed
- `Cmd::program` and `Cmd::arguments` functions as they were misleading (didn't
do what `distant-local` or `distant-ssh2` do)
- Removed `Capability` and `Capabilities` from protocol and manager
## [0.20.0-alpha.12]
### Changed
- Minimum Rust version is now `1.70.0` due to bump in `grep-cli` minimum
requirement. This technically applied to v0.20.0-alpha.11, but wasn't caught
until the dependency updated
### Fixed
- `distant --help` will now return exit code of 0
- `distant --version` will now return exit code of 0
## [0.20.0-alpha.11]
### Added
- CLI now supports `-c <STR>` and `--cmd <STR>` to use a given string as the
command as an alternative to `-- <CMD> <ARG> <ARG>`
- Add build for FreeBSD
### Changed
- Cli no longer uses `-c` as shorthand for specifying a config file
- `--file` option for generating completion has been renamed to `--output`
- CLI command to generate config files now defaults to printing to stdout with
`--output` providing the option to write to a file
- Artifacts built now use format of `distant-<TRIPLE>`
## [0.20.0-alpha.10]
### Added
- `use_hidden`, `use_ignore_files`, `use_parent_ignore_files`,
`use_git_ignore`, `use_global_git_ignore`, and `use_git_exclude` as new
options for searching
### Changed
- Searching now disables all standard filters by default with re-introducing
the ability to set the filters by individual options
### Fixed
- Failing to start a search will no longer cause the search task to exit when
using the local server, which would result in no more searches being able to
be executed
## [0.20.0-alpha.9]
### Added
- `Request` and `Response` types from `distant-net` now support an optional
`Header` to send miscellaneous information
### Changed
- `Change` structure now provides a single `path` instead of `paths` with the
`distant-local` implementation sending a separate `Changed` event per path
- `ChangeDetails` now includes a `renamed` field to capture the new path name
when known
- `DistantApi` now handles batch requests in parallel, returning the results in
order. To achieve the previous sequential processing of batch requests, the
header value `sequence` needs to be set to true
- Rename `GenericServerRef` to `ServerRef` and remove `ServerRef` trait,
refactoring `TcpServerRef`, `UnixSocketServerRef`, and `WindowsPipeServerRef`
to use the struct instead of `Box<dyn ServerRef>`
- Update `Reply` trait and associated implementations to be non-blocking &
synchronous as opposed to asynchronous to avoid deadlocks and also be more
performant
### Fixed
- Username and password now support full character sets outside of `@` for
passwords and `:` and `@` for usernames
## [0.20.0-alpha.8]
### Added
- `distant-local` now has two features: `macos-fsevent` and `macos-kqueue`.
These are used to indicate what kind of file watching to support (for MacOS).
The default is `macos-fsevent`.
- `[server.watch]` configuration is now available with the following
settings:
- `native = <bool>` to specify whether to use native watching or polling
(default true)
- `poll_interval = <secs>` to specify seconds to wait between polling
attempts (only for polling watcher)
- `compare_contents = <bool>` to specify how polling watcher will evaluate a
file change (default false)
- `debounce_timeout = <secs>` to specify how long to wait before sending a
change notification (will aggregate and merge changes)
- `debounce_tick_rate = <secs>` to specify how long to wait between event
aggregation loops
- `distant-protocol` response for a change now supports these additional
fields:
- `timestamp` (serialized as `ts`) to communicate the seconds since unix
epoch when the event was received
- `details` containing `attributes` (clarify changes on attribute kind) and
`extra` (to convey arbitrary platform-specific extra information)
### Changed
- Bump minimum Rust version to 1.68.0
### Removed
- `crossbeam-channel` dependency removed from notify by disabling its feature
in order to avoid a `tokio::spawn` issue (https://github.com/notify-rs/notify/issues/380)
### Fixed
- usernames with `-` (hyphen) we're rejected as invalid
## [0.20.0-alpha.7]
### Added
- New `SetPermissions` enum variant on protocol request
- New `set_permissions` method available `DistantApi` and implemented by local
server (ssh unavailable due to https://github.com/wez/wezterm/issues/3784)
- Implementation of `DistantChannelExt::set_permissions`
- `distant version` to display information about connected server
- `distant manager service install` now accepts additional arguments to provide
the manager on startup
### Changed
- CLI `--lsp [<SCHEME>]` scheme now expects just the scheme and not `://`
- Moved `distant_net::common::authentication` to separate crate `distant-auth`
- Moved `distant_net::common::authentication::Keychain` to
`distant_net::common::Keychain`
- Moved `distant_net::common::transport::framed::codec::encryption::SecretKey`
and similar to `distant_net::common::SecretKey`
- Search matches reported with `match` key are now inlined as either a byte
array or a string and no longer an object with a `type` and `value` field
- Unset options and values are not now returned in `JSON` serialization versus
the explicit `null` value provided
- `Capabilities` message type has been changed to `Version` with new struct to
report the version information that includes a server version string,
protocol version tuple, and capabilities
- `distant_core::api::local` moved to `distant_local`
### Removed
- `distant capabilities` has been removed in favor of `distant version`
## [0.20.0-alpha.6]
### Changed
@ -428,8 +643,15 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
pending upon full channel and no longer locks up
- stdout, stderr, and stdin of `RemoteProcess` no longer cause deadlock
[Unreleased]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.6...HEAD
[0.20.0-alpha.5]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.5...v0.20.0-alpha.6
[Unreleased]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.13...HEAD
[0.20.0-alpha.13]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.12...v0.20.0-alpha.13
[0.20.0-alpha.12]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.11...v0.20.0-alpha.12
[0.20.0-alpha.11]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.10...v0.20.0-alpha.11
[0.20.0-alpha.10]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.9...v0.20.0-alpha.10
[0.20.0-alpha.9]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.8...v0.20.0-alpha.9
[0.20.0-alpha.8]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.7...v0.20.0-alpha.8
[0.20.0-alpha.7]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.6...v0.20.0-alpha.7
[0.20.0-alpha.6]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.5...v0.20.0-alpha.6
[0.20.0-alpha.5]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.4...v0.20.0-alpha.5
[0.20.0-alpha.4]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.3...v0.20.0-alpha.4
[0.20.0-alpha.3]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.2...v0.20.0-alpha.3

286
Cargo.lock generated

@ -223,9 +223,9 @@ dependencies = [
[[package]]
name = "async-once-cell"
version = "0.4.4"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b49bd4c5b769125ea6323601c39815848972880efd33ffb2d01f9f909adc699"
checksum = "fddec5f567375e0a634f94bc8dab1059b9d59a8aba12134c32f5ee21ce3f5f89"
[[package]]
name = "async-process"
@ -474,9 +474,9 @@ dependencies = [
[[package]]
name = "clap"
version = "4.2.7"
version = "4.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34d21f9bf1b425d2968943631ec91202fe5e837264063503708b83013f8fc938"
checksum = "93aae7a4192245f70fe75dd9157fc7b4a5bf53e88d30bd4396f7d8f9284d5acc"
dependencies = [
"clap_builder",
"clap_derive",
@ -485,9 +485,9 @@ dependencies = [
[[package]]
name = "clap_builder"
version = "4.2.7"
version = "4.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "914c8c79fb560f238ef6429439a30023c862f7a28e688c58f7203f12b29970bd"
checksum = "4f423e341edefb78c9caba2d9c7f7687d0e72e89df3ce3394554754393ac3990"
dependencies = [
"anstream",
"anstyle",
@ -498,18 +498,18 @@ dependencies = [
[[package]]
name = "clap_complete"
version = "4.2.3"
version = "4.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1594fe2312ec4abf402076e407628f5c313e54c32ade058521df4ee34ecac8a8"
checksum = "a04ddfaacc3bc9e6ea67d024575fafc2a813027cf374b8f24f7bc233c6b6be12"
dependencies = [
"clap",
]
[[package]]
name = "clap_derive"
version = "4.2.0"
version = "4.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f9644cd56d6b87dbe899ef8b053e331c0637664e9e21a33dfcdc36093f5c5c4"
checksum = "191d9573962933b4027f932c600cd252ce27a8ad5979418fe78e43c07996f27b"
dependencies = [
"heck",
"proc-macro2",
@ -519,9 +519,9 @@ dependencies = [
[[package]]
name = "clap_lex"
version = "0.4.1"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a2dd5a6fe8c6e3502f568a6353e5273bbb15193ad9a89e457b9970798efbea1"
checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b"
[[package]]
name = "colorchoice"
@ -571,6 +571,12 @@ version = "0.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913"
[[package]]
name = "const-str"
version = "0.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aca749d3d3f5b87a0d6100509879f9cf486ab510803a4a4e1001da1ff61c2bd6"
[[package]]
name = "convert_case"
version = "0.4.0"
@ -807,7 +813,7 @@ dependencies = [
[[package]]
name = "distant"
version = "0.20.0-alpha.6"
version = "0.20.0"
dependencies = [
"anyhow",
"assert_cmd",
@ -820,8 +826,10 @@ dependencies = [
"dialoguer",
"directories",
"distant-core",
"distant-local",
"distant-ssh2",
"env_logger",
"file-mode",
"flexi_logger",
"fork",
"indoc",
@ -842,46 +850,75 @@ dependencies = [
"test-log",
"tokio",
"toml_edit",
"typed-path",
"which",
"whoami",
"windows-service",
"winsplit",
]
[[package]]
name = "distant-auth"
version = "0.20.0"
dependencies = [
"async-trait",
"derive_more",
"env_logger",
"log",
"serde",
"test-log",
"tokio",
]
[[package]]
name = "distant-core"
version = "0.20.0-alpha.6"
version = "0.20.0"
dependencies = [
"assert_fs",
"async-trait",
"bitflags 2.3.1",
"bytes",
"derive_more",
"distant-net",
"distant-protocol",
"env_logger",
"futures",
"grep",
"hex",
"log",
"num_cpus",
"once_cell",
"rand",
"regex",
"serde",
"serde_bytes",
"serde_json",
"strum",
"test-log",
"tokio",
]
[[package]]
name = "distant-local"
version = "0.20.0"
dependencies = [
"assert_fs",
"async-trait",
"distant-core",
"env_logger",
"grep",
"ignore",
"indoc",
"log",
"notify",
"notify-debouncer-full",
"num_cpus",
"once_cell",
"portable-pty 0.8.1",
"predicates",
"rand",
"regex",
"rstest",
"schemars",
"serde",
"serde_bytes",
"serde_json",
"shell-words",
"strum",
"test-log",
"tokio",
"tokio-util",
"walkdir",
"whoami",
"winsplit",
@ -889,12 +926,14 @@ dependencies = [
[[package]]
name = "distant-net"
version = "0.20.0-alpha.6"
version = "0.20.0"
dependencies = [
"async-trait",
"bytes",
"chacha20poly1305",
"const-str",
"derive_more",
"distant-auth",
"dyn-clone",
"env_logger",
"flate2",
@ -904,8 +943,9 @@ dependencies = [
"p256",
"paste",
"rand",
"rmp",
"rmp-serde",
"schemars",
"semver 1.0.17",
"serde",
"serde_bytes",
"serde_json",
@ -916,9 +956,26 @@ dependencies = [
"tokio",
]
[[package]]
name = "distant-protocol"
version = "0.20.0"
dependencies = [
"bitflags 2.3.1",
"const-str",
"derive_more",
"regex",
"rmp",
"rmp-serde",
"semver 1.0.17",
"serde",
"serde_bytes",
"serde_json",
"strum",
]
[[package]]
name = "distant-ssh2"
version = "0.20.0-alpha.6"
version = "0.20.0"
dependencies = [
"anyhow",
"assert_fs",
@ -1098,6 +1155,24 @@ dependencies = [
"subtle",
]
[[package]]
name = "file-id"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e13be71e6ca82e91bc0cb862bebaac0b2d1924a5a1d970c822b2f98b63fda8c3"
dependencies = [
"winapi-util",
]
[[package]]
name = "file-mode"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "773ea145485772b8d354624b32adbe20e776353d3e48c7b03ef44e3455e9815c"
dependencies = [
"libc",
]
[[package]]
name = "filedescriptor"
version = "0.8.2"
@ -1157,9 +1232,9 @@ dependencies = [
[[package]]
name = "flexi_logger"
version = "0.25.4"
version = "0.25.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "611de758a8869ffffa7524aafdb48658c64dae95cdce49654d68a8442e500d89"
checksum = "37e7b68b1f7ce9c62856598e99cd6742b9cedb6186b47aa989a82640f20bfa9b"
dependencies = [
"chrono",
"glob",
@ -1783,12 +1858,9 @@ dependencies = [
[[package]]
name = "log"
version = "0.4.17"
version = "0.4.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
dependencies = [
"cfg-if",
]
checksum = "518ef76f2f87365916b142844c16d8fefd85039bc5699050210a7778ee1cd1de"
[[package]]
name = "memchr"
@ -1910,9 +1982,9 @@ checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be"
[[package]]
name = "notify"
version = "5.2.0"
version = "6.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "729f63e1ca555a43fe3efa4f3efdf4801c479da85b432242a7b726f353c88486"
checksum = "4d9ba6c734de18ca27c8cef5cd7058aa4ac9f63596131e4c7e41e579319032a2"
dependencies = [
"bitflags 1.3.2",
"crossbeam-channel",
@ -1922,11 +1994,22 @@ dependencies = [
"kqueue",
"libc",
"mio",
"serde",
"walkdir",
"windows-sys 0.45.0",
]
[[package]]
name = "notify-debouncer-full"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f4812c1eb49be776fb8df4961623bdc01ec9dfdc1abe8211ceb09150a2e64219"
dependencies = [
"file-id",
"notify",
"parking_lot 0.12.1",
"walkdir",
]
[[package]]
name = "ntapi"
version = "0.4.1"
@ -1938,12 +2021,11 @@ dependencies = [
[[package]]
name = "nu-ansi-term"
version = "0.46.0"
version = "0.47.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84"
checksum = "1df031e117bca634c262e9bd3173776844b6c17a90b3741c9163663b4385af76"
dependencies = [
"overload",
"winapi",
"windows-sys 0.45.0",
]
[[package]]
@ -1988,9 +2070,9 @@ dependencies = [
[[package]]
name = "once_cell"
version = "1.17.1"
version = "1.17.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3"
checksum = "9670a07f94779e00908f3e686eab508878ebb390ba6e604d3a284c00e8d0487b"
[[package]]
name = "opaque-debug"
@ -2036,12 +2118,6 @@ dependencies = [
"num-traits",
]
[[package]]
name = "overload"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39"
[[package]]
name = "p256"
version = "0.13.2"
@ -2056,9 +2132,9 @@ dependencies = [
[[package]]
name = "papergrid"
version = "0.7.1"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1526bb6aa9f10ec339fb10360f22c57edf81d5678d0278e93bc12a47ffbe4b01"
checksum = "1fdfe703c51ddc52887ad78fc69cd2ea78d895ffcd6e955c9d03566db8ab5bb1"
dependencies = [
"bytecount",
"fnv",
@ -2519,13 +2595,13 @@ dependencies = [
[[package]]
name = "regex"
version = "1.8.1"
version = "1.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af83e617f331cc6ae2da5443c602dfa5af81e517212d9d611a5b3ba1777b5370"
checksum = "81ca098a9821bd52d6b24fd8b10bd081f47d39c22778cafaa75a2857a62c6390"
dependencies = [
"aho-corasick 1.0.1",
"memchr",
"regex-syntax 0.7.1",
"regex-syntax 0.7.2",
]
[[package]]
@ -2542,9 +2618,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
[[package]]
name = "regex-syntax"
version = "0.7.1"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a5996294f19bd3aae0453a862ad728f60e6600695733dd5df01da90c54363a3c"
checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78"
[[package]]
name = "rfc6979"
@ -2669,30 +2745,6 @@ dependencies = [
"winapi-util",
]
[[package]]
name = "schemars"
version = "0.8.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02c613288622e5f0c3fdc5dbd4db1c5fbe752746b1d1a56a0630b78fd00de44f"
dependencies = [
"dyn-clone",
"schemars_derive",
"serde",
"serde_json",
]
[[package]]
name = "schemars_derive"
version = "0.8.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "109da1e6b197438deb6db99952990c7f959572794b80ff93707d55a232545e7c"
dependencies = [
"proc-macro2",
"quote",
"serde_derive_internals",
"syn 1.0.109",
]
[[package]]
name = "scopeguard"
version = "1.1.0"
@ -2727,6 +2779,9 @@ name = "semver"
version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed"
dependencies = [
"serde",
]
[[package]]
name = "semver-parser"
@ -2766,17 +2821,6 @@ dependencies = [
"syn 2.0.16",
]
[[package]]
name = "serde_derive_internals"
version = "0.26.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "serde_json"
version = "1.0.96"
@ -2790,9 +2834,9 @@ dependencies = [
[[package]]
name = "serde_spanned"
version = "0.6.1"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0efd8caf556a6cebd3b285caf480045fcc1ac04f6bd786b09a6f11af30c4fcf4"
checksum = "93107647184f6027e3b7dcb2e11034cf95ffa1e3a682c67951963ac69c1c007d"
dependencies = [
"serde",
]
@ -3058,9 +3102,9 @@ dependencies = [
[[package]]
name = "sysinfo"
version = "0.28.4"
version = "0.29.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4c2f3ca6693feb29a89724516f016488e9aafc7f37264f898593ee4b942f31b"
checksum = "02f1dc6930a439cc5d154221b5387d153f8183529b07c19aca24ea31e0a167e1"
dependencies = [
"cfg-if",
"core-foundation-sys",
@ -3073,9 +3117,9 @@ dependencies = [
[[package]]
name = "tabled"
version = "0.10.0"
version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56c3ee73732ffceaea7b8f6b719ce3bb17f253fa27461ffeaf568ebd0cdb4b85"
checksum = "da1a2e56bbf7bfdd08aaa7592157a742205459eff774b73bc01809ae2d99dc2a"
dependencies = [
"papergrid",
"tabled_derive",
@ -3084,9 +3128,9 @@ dependencies = [
[[package]]
name = "tabled_derive"
version = "0.5.0"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "beca1b4eaceb4f2755df858b88d9b9315b7ccfd1ffd0d7a48a52602301f01a57"
checksum = "99f688a08b54f4f02f0a3c382aefdb7884d3d69609f785bd253dc033243e3fe4"
dependencies = [
"heck",
"proc-macro-error",
@ -3248,9 +3292,9 @@ dependencies = [
[[package]]
name = "tokio"
version = "1.28.1"
version = "1.28.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0aa32867d44e6f2ce3385e89dceb990188b8bb0fb25b0cf576647a6f98ac5105"
checksum = "94d7b1cfd2aa4011f2de74c2c4c63665e27a71006b0a192dcd2710272e73dfa2"
dependencies = [
"autocfg",
"bytes",
@ -3276,20 +3320,6 @@ dependencies = [
"syn 2.0.16",
]
[[package]]
name = "tokio-util"
version = "0.7.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d"
dependencies = [
"bytes",
"futures-core",
"futures-sink",
"pin-project-lite",
"tokio",
"tracing",
]
[[package]]
name = "toml"
version = "0.5.11"
@ -3301,18 +3331,18 @@ dependencies = [
[[package]]
name = "toml_datetime"
version = "0.6.1"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622"
checksum = "5a76a9312f5ba4c2dec6b9161fdf25d87ad8a09256ccea5a556fef03c706a10f"
dependencies = [
"serde",
]
[[package]]
name = "toml_edit"
version = "0.19.8"
version = "0.19.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "239410c8609e8125456927e6707163a3b1fdb40561e4b803bc041f466ccfdc13"
checksum = "2380d56e8670370eee6566b0bfd4265f65b3f432e8c6d85623f728d4fa31f739"
dependencies = [
"indexmap",
"serde",
@ -3321,26 +3351,6 @@ dependencies = [
"winnow",
]
[[package]]
name = "tracing"
version = "0.1.37"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8"
dependencies = [
"cfg-if",
"pin-project-lite",
"tracing-core",
]
[[package]]
name = "tracing-core"
version = "0.1.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a"
dependencies = [
"once_cell",
]
[[package]]
name = "typed-path"
version = "0.3.2"

@ -3,7 +3,7 @@ name = "distant"
description = "Operate on a remote computer through file and process manipulation"
categories = ["command-line-utilities"]
keywords = ["cli"]
version = "0.20.0-alpha.6"
version = "0.20.0"
authors = ["Chip Senkbeil <chip@senkbeil.org>"]
edition = "2021"
homepage = "https://github.com/chipsenkbeil/distant"
@ -12,12 +12,20 @@ readme = "README.md"
license = "MIT OR Apache-2.0"
[workspace]
members = ["distant-core", "distant-net", "distant-ssh2"]
members = [
"distant-auth",
"distant-core",
"distant-local",
"distant-net",
"distant-protocol",
"distant-ssh2",
]
[profile.release]
opt-level = 'z'
lto = true
codegen-units = 1
strip = true
[features]
default = ["libssh", "ssh2"]
@ -25,49 +33,52 @@ libssh = ["distant-ssh2/libssh"]
ssh2 = ["distant-ssh2/ssh2"]
[dependencies]
anyhow = "1.0.70"
anyhow = "1.0.71"
async-trait = "0.1.68"
clap = { version = "4.2.1", features = ["derive"] }
clap_complete = "4.2.0"
clap = { version = "4.3.0", features = ["derive"] }
clap_complete = "4.3.0"
config = { version = "0.13.3", default-features = false, features = ["toml"] }
derive_more = { version = "0.99.17", default-features = false, features = ["display", "from", "error", "is_variant"] }
dialoguer = { version = "0.10.3", default-features = false }
distant-core = { version = "=0.20.0-alpha.6", path = "distant-core", features = ["schemars"] }
directories = "5.0.0"
flexi_logger = "0.25.3"
dialoguer = { version = "0.10.4", default-features = false }
distant-core = { version = "=0.20.0", path = "distant-core" }
distant-local = { version = "=0.20.0", path = "distant-local" }
directories = "5.0.1"
file-mode = "0.1.2"
flexi_logger = "0.25.5"
indoc = "2.0.1"
log = "0.4.17"
once_cell = "1.17.1"
log = "0.4.18"
once_cell = "1.17.2"
rand = { version = "0.8.5", features = ["getrandom"] }
rpassword = "7.2.0"
serde = { version = "1.0.159", features = ["derive"] }
serde_json = "1.0.95"
serde = { version = "1.0.163", features = ["derive"] }
serde_json = "1.0.96"
shell-words = "1.1.0"
service-manager = { version = "0.2.0", features = ["clap", "serde"] }
tabled = "0.10.0"
tokio = { version = "1.27.0", features = ["full"] }
toml_edit = { version = "0.19.8", features = ["serde"] }
terminal_size = "0.2.5"
tabled = "0.12.0"
tokio = { version = "1.28.2", features = ["full"] }
toml_edit = { version = "0.19.10", features = ["serde"] }
terminal_size = "0.2.6"
termwiz = "0.20.0"
typed-path = "0.3.2"
which = "4.4.0"
winsplit = "0.1.0"
whoami = "1.4.0"
# Optional native SSH functionality
distant-ssh2 = { version = "=0.20.0-alpha.6", path = "distant-ssh2", default-features = false, features = ["serde"], optional = true }
distant-ssh2 = { version = "=0.20.0", path = "distant-ssh2", default-features = false, features = ["serde"], optional = true }
[target.'cfg(unix)'.dependencies]
fork = "0.1.21"
[target.'cfg(windows)'.dependencies]
sysinfo = "0.28.4"
sysinfo = "0.29.0"
windows-service = "0.6.0"
[dev-dependencies]
assert_cmd = "2.0.10"
assert_fs = "1.0.12"
assert_cmd = "2.0.11"
assert_fs = "1.0.13"
env_logger = "0.10.0"
indoc = "2.0.1"
predicates = "3.0.2"
predicates = "3.0.3"
rstest = "0.17.0"
test-log = "0.2.11"

@ -0,0 +1,44 @@
[tasks.format]
clear = true
install_crate = "rustfmt-nightly"
command = "cargo"
args = ["+nightly", "fmt", "--all"]
[tasks.test]
clear = true
command = "cargo"
args = ["test", "--release", "--all-features", "--workspace"]
[tasks.ci-test]
clear = true
command = "cargo"
args = ["nextest", "run", "--profile", "ci", "--release", "--all-features", "--workspace"]
[tasks.post-ci-test]
clear = true
command = "cargo"
args = ["test", "--release", "--all-features", "--workspace", "--doc"]
[tasks.publish]
clear = true
script = '''
cargo publish --all-features -p distant-auth
cargo publish --all-features -p distant-protocol
cargo publish --all-features -p distant-net
cargo publish --all-features -p distant-core
cargo publish --all-features -p distant-local
cargo publish --all-features -p distant-ssh2
cargo publish --all-features
'''
[tasks.dry-run-publish]
clear = true
script = '''
cargo publish --all-features --dry-run -p distant-auth
cargo publish --all-features --dry-run -p distant-protocol
cargo publish --all-features --dry-run -p distant-net
cargo publish --all-features --dry-run -p distant-core
cargo publish --all-features --dry-run -p distant-local
cargo publish --all-features --dry-run -p distant-ssh2
cargo publish --all-features --dry-run
'''

@ -1,6 +1,11 @@
# distant - remotely edit files and run programs
<h1 align="center">
<img src="https://distant.dev/assets/images/distant-with-logo-300x87.png" alt="Distant">
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![CI][distant_ci_img]][distant_ci_lnk] [![RustC 1.64+][distant_rustc_img]][distant_rustc_lnk]
<a href="https://distant.dev/">Documentation</a> |
<a href="https://github.com/chipsenkbeil/distant/discussions">Discussion</a>
</h1>
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![CI][distant_ci_img]][distant_ci_lnk] [![RustC 1.70+][distant_rustc_img]][distant_rustc_lnk]
[distant_crates_img]: https://img.shields.io/crates/v/distant.svg
[distant_crates_lnk]: https://crates.io/crates/distant
@ -8,164 +13,52 @@
[distant_doc_lnk]: https://docs.rs/distant
[distant_ci_img]: https://github.com/chipsenkbeil/distant/actions/workflows/ci.yml/badge.svg
[distant_ci_lnk]: https://github.com/chipsenkbeil/distant/actions/workflows/ci.yml
[distant_rustc_img]: https://img.shields.io/badge/distant-rustc_1.64+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2022/09/22/Rust-1.64.0.html
[distant_rustc_img]: https://img.shields.io/badge/distant-rustc_1.70+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/06/01/Rust-1.70.0.html
🚧 **(Alpha stage software) This program is in rapid development and may break or change frequently!** 🚧
## Details
The `distant` binary supplies both a server and client component as well as
a command to start a server and configure the local client to be able to
talk to the server.
- Asynchronous in nature, powered by [`tokio`](https://tokio.rs/)
- Data is serialized to send across the wire via [`msgpack`](https://msgpack.org/)
- Encryption & authentication are handled via
[XChaCha20Poly1305](https://tools.ietf.org/html/rfc8439) for an authenticated
encryption scheme via
[RustCrypto/ChaCha20Poly1305](https://github.com/RustCrypto/AEADs/tree/master/chacha20poly1305)
Additionally, the core of the distant client and server codebase can be pulled
in to be used with your own Rust crates via the `distant-core` crate. The
networking library, which is agnostic of `distant` protocols, can be used via
the `distant-net` crate.
## Installation
### Prebuilt Binaries
If you would like a pre-built binary, check out the
[releases section](https://github.com/chipsenkbeil/distant/releases).
### Building from Source
### Unix
If you have [`cargo`](https://github.com/rust-lang/cargo) installed, you can
directly download and build the source via:
```sh
# Need to include -L to follow redirects as this returns 301
curl -L https://sh.distant.dev | sh
```bash
cargo install distant
# Can also use wget to the same result
wget -q -O- https://sh.distant.dev | sh
```
Alternatively, you can clone this repository and build from source following
the [build guide](./BUILDING.md).
## Backend Feature Matrix
Distant supports multiple backends to facilitate remote communication with
another server. Today, these backends include:
* `distant` - a standalone server acting as the reference implementation
* `ssh` - a wrapper around an `ssh` client that translates the distant protocol
into ssh server requests
Not every backend supports every feature of distant. Below is a table outlining
the available features and which backend supports each feature:
| Feature | distant | ssh |
| --------------------- | --------| ----|
| Capabilities | ✅ | ✅ |
| Filesystem I/O | ✅ | ✅ |
| Filesystem Watching | ✅ | ✅ |
| Process Execution | ✅ | ✅ |
| Reconnect | ✅ | ❌ |
| Search | ✅ | ❌ |
| System Information | ✅ | ⚠ |
* ✅ means full support
* ⚠ means partial support
* ❌ means no support
### Feature Details
* `Capabilities` - able to report back what it is capable of performing
* `Filesystem I/O` - able to read from and write to the filesystem
* `Filesystem Watching` - able to receive notifications when changes to the
filesystem occur
* `Process Execution` - able to execute processes
* `Reconnect` - able to reconnect after network outages
* `Search` - able to search the filesystem
* `System Information` - able to retrieve information about the system
## Example
### Starting the manager
In order to facilitate communication between a client and server, you first
need to start the manager. This can be done in one of two ways:
1. Leverage the `service` functionality to spawn the manager using one of the
following supported service management platforms:
- [`sc.exe`](https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-server-2012-r2-and-2012/cc754599(v=ws.11)) for use with [Window Service](https://en.wikipedia.org/wiki/Windows_service) (Windows)
- [Launchd](https://en.wikipedia.org/wiki/Launchd) (MacOS)
- [systemd](https://en.wikipedia.org/wiki/Systemd) (Linux)
- [OpenRC](https://en.wikipedia.org/wiki/OpenRC) (Linux)
- [rc.d](https://en.wikipedia.org/wiki/Init#Research_Unix-style/BSD-style) (FreeBSD)
2. Run the manager manually by using the `listen` subcommand
#### Service management
```bash
# If you want to install the manager as a service, you can use the service
# interface available directly from the CLI
#
# By default, this will install a system-level service, which means that you
# will need elevated permissions to both install AND communicate with the
# manager
distant manager service install
# If you want to maintain a user-level manager service, you can include the
# --user flag. Note that this is only supported on MacOS (via launchd) and
# Linux (via systemd)
distant manager service install --user
# ........
# Once you have installed the service, you will normally need to start it
# manually or restart your machine to trigger startup on boot
distant manager service start # --user if you are working with user-level
```
#### Manual start
See https://distant.dev/getting-started/installation/unix/ for more details.
```bash
# If you choose to run the manager without a service management platform, you
# can either run the manager in the foreground or provide --daemon to spawn and
# detach the manager
### Windows
# Run in the foreground
distant manager listen
# Detach the manager where it will not terminate even if the parent exits
distant manager listen --daemon
```powershell
Set-ExecutionPolicy RemoteSigned -Scope CurrentUser # Optional: Needed to run a remote script the first time
irm sh.distant.dev | iex
```
### Interacting with a remote machine
See https://distant.dev/getting-started/installation/windows/ for more details.
Once you have a manager listening for client requests, you can begin
interacting with the manager, spawn and/or connect to servers, and interact
with remote machines.
## Usage
```bash
# Connect to my.example.com on port 22 via SSH and start a distant server
distant client launch ssh://my.example.com
# After the connection is established, you can perform different operations
# on the remote machine via `distant client action {command} [args]`
distant client action copy path/to/file new/path/to/file
distant client action spawn -- echo 'Hello, this is from the other side'
```sh
# Start a manager in the background
distant manager listen --daemon
# Opening a shell to the remote machine is trivial
distant client shell
# SSH into a server, start distant, and connect to the distant server
distant launch ssh://example.com
# If you have more than one connection open, you can switch between active
# connections by using the `select` subcommand
distant client select '<ID>'
# Read the current working directory
distant fs read .
# For programmatic use, a REPL following the JSON API is available
distant client repl --format json
# Start a shell on the remote machine
distant shell
```
See https://distant.dev/getting-started/usage/ for more details.
## License
This project is licensed under either of

@ -0,0 +1,27 @@
[package]
name = "distant-auth"
description = "Authentication library for distant, providing various implementations"
categories = ["authentication"]
keywords = ["auth", "authentication", "async"]
version = "0.20.0"
authors = ["Chip Senkbeil <chip@senkbeil.org>"]
edition = "2021"
homepage = "https://github.com/chipsenkbeil/distant"
repository = "https://github.com/chipsenkbeil/distant"
readme = "README.md"
license = "MIT OR Apache-2.0"
[features]
default = []
tests = []
[dependencies]
async-trait = "0.1.68"
derive_more = { version = "0.99.17", default-features = false, features = ["display", "from", "error"] }
log = "0.4.18"
serde = { version = "1.0.163", features = ["derive"] }
[dev-dependencies]
env_logger = "0.10.0"
test-log = "0.2.11"
tokio = { version = "1.28.2", features = ["full"] }

@ -0,0 +1,35 @@
# distant auth
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.70.0][distant_rustc_img]][distant_rustc_lnk]
[distant_crates_img]: https://img.shields.io/crates/v/distant-auth.svg
[distant_crates_lnk]: https://crates.io/crates/distant-auth
[distant_doc_img]: https://docs.rs/distant-auth/badge.svg
[distant_doc_lnk]: https://docs.rs/distant-auth
[distant_rustc_img]: https://img.shields.io/badge/distant_auth-rustc_1.70+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/06/01/Rust-1.70.0.html
## Details
The `distant-auth` library supplies the authentication functionality for the
distant interfaces and distant cli.
## Installation
You can import the dependency by adding the following to your `Cargo.toml`:
```toml
[dependencies]
distant-auth = "0.20"
```
## License
This project is licensed under either of
Apache License, Version 2.0, (LICENSE-APACHE or
[apache-license][apache-license]) MIT license (LICENSE-MIT or
[mit-license][mit-license]) at your option.
[apache-license]: http://www.apache.org/licenses/LICENSE-2.0
[mit-license]: http://opensource.org/licenses/MIT

@ -0,0 +1,110 @@
use std::io;
use async_trait::async_trait;
use crate::handler::AuthHandler;
use crate::msg::*;
/// Represents an interface for authenticating with a server.
#[async_trait]
pub trait Authenticate {
/// Performs authentication by leveraging the `handler` for any received challenge.
async fn authenticate(&mut self, mut handler: impl AuthHandler + Send) -> io::Result<()>;
}
/// Represents an interface for submitting challenges for authentication.
#[async_trait]
pub trait Authenticator: Send {
/// Issues an initialization notice and returns the response indicating which authentication
/// methods to pursue
async fn initialize(
&mut self,
initialization: Initialization,
) -> io::Result<InitializationResponse>;
/// Issues a challenge and returns the answers to the `questions` asked.
async fn challenge(&mut self, challenge: Challenge) -> io::Result<ChallengeResponse>;
/// Requests verification of some `kind` and `text`, returning true if passed verification.
async fn verify(&mut self, verification: Verification) -> io::Result<VerificationResponse>;
/// Reports information with no response expected.
async fn info(&mut self, info: Info) -> io::Result<()>;
/// Reports an error occurred during authentication, consuming the authenticator since no more
/// challenges should be issued.
async fn error(&mut self, error: Error) -> io::Result<()>;
/// Reports that the authentication has started for a specific method.
async fn start_method(&mut self, start_method: StartMethod) -> io::Result<()>;
/// Reports that the authentication has finished successfully, consuming the authenticator
/// since no more challenges should be issued.
async fn finished(&mut self) -> io::Result<()>;
}
/// Represents an implementator of [`Authenticator`] used purely for testing purposes.
#[cfg(any(test, feature = "tests"))]
pub struct TestAuthenticator {
pub initialize: Box<dyn FnMut(Initialization) -> io::Result<InitializationResponse> + Send>,
pub challenge: Box<dyn FnMut(Challenge) -> io::Result<ChallengeResponse> + Send>,
pub verify: Box<dyn FnMut(Verification) -> io::Result<VerificationResponse> + Send>,
pub info: Box<dyn FnMut(Info) -> io::Result<()> + Send>,
pub error: Box<dyn FnMut(Error) -> io::Result<()> + Send>,
pub start_method: Box<dyn FnMut(StartMethod) -> io::Result<()> + Send>,
pub finished: Box<dyn FnMut() -> io::Result<()> + Send>,
}
#[cfg(any(test, feature = "tests"))]
impl Default for TestAuthenticator {
fn default() -> Self {
Self {
initialize: Box::new(|x| Ok(InitializationResponse { methods: x.methods })),
challenge: Box::new(|x| {
Ok(ChallengeResponse {
answers: x.questions.into_iter().map(|x| x.text).collect(),
})
}),
verify: Box::new(|_| Ok(VerificationResponse { valid: true })),
info: Box::new(|_| Ok(())),
error: Box::new(|_| Ok(())),
start_method: Box::new(|_| Ok(())),
finished: Box::new(|| Ok(())),
}
}
}
#[cfg(any(test, feature = "tests"))]
#[async_trait]
impl Authenticator for TestAuthenticator {
async fn initialize(
&mut self,
initialization: Initialization,
) -> io::Result<InitializationResponse> {
(self.initialize)(initialization)
}
async fn challenge(&mut self, challenge: Challenge) -> io::Result<ChallengeResponse> {
(self.challenge)(challenge)
}
async fn verify(&mut self, verification: Verification) -> io::Result<VerificationResponse> {
(self.verify)(verification)
}
async fn info(&mut self, info: Info) -> io::Result<()> {
(self.info)(info)
}
async fn error(&mut self, error: Error) -> io::Result<()> {
(self.error)(error)
}
async fn start_method(&mut self, start_method: StartMethod) -> io::Result<()> {
(self.start_method)(start_method)
}
async fn finished(&mut self) -> io::Result<()> {
(self.finished)()
}
}

@ -1,11 +1,11 @@
use std::collections::HashMap;
use std::fmt::Display;
use std::io;
use async_trait::async_trait;
use super::msg::*;
use crate::common::authentication::Authenticator;
use crate::common::HeapSecretKey;
use crate::authenticator::Authenticator;
use crate::msg::*;
mod methods;
pub use methods::*;
@ -176,7 +176,10 @@ impl AuthHandlerMap {
impl AuthHandlerMap {
/// Consumes the map, returning a new map that supports the `static_key` method.
pub fn with_static_key(mut self, key: impl Into<HeapSecretKey>) -> Self {
pub fn with_static_key<K>(mut self, key: K) -> Self
where
K: Display + Send + 'static,
{
self.insert_method_handler("static_key", StaticKeyAuthMethodHandler::simple(key));
self
}
@ -343,3 +346,77 @@ impl<'a> AuthMethodHandler for DynAuthHandler<'a> {
self.0.on_error(error).await
}
}
/// Represents an implementator of [`AuthHandler`] used purely for testing purposes.
#[cfg(any(test, feature = "tests"))]
pub struct TestAuthHandler {
pub on_initialization:
Box<dyn FnMut(Initialization) -> io::Result<InitializationResponse> + Send>,
pub on_challenge: Box<dyn FnMut(Challenge) -> io::Result<ChallengeResponse> + Send>,
pub on_verification: Box<dyn FnMut(Verification) -> io::Result<VerificationResponse> + Send>,
pub on_info: Box<dyn FnMut(Info) -> io::Result<()> + Send>,
pub on_error: Box<dyn FnMut(Error) -> io::Result<()> + Send>,
pub on_start_method: Box<dyn FnMut(StartMethod) -> io::Result<()> + Send>,
pub on_finished: Box<dyn FnMut() -> io::Result<()> + Send>,
}
#[cfg(any(test, feature = "tests"))]
impl Default for TestAuthHandler {
fn default() -> Self {
Self {
on_initialization: Box::new(|x| Ok(InitializationResponse { methods: x.methods })),
on_challenge: Box::new(|x| {
Ok(ChallengeResponse {
answers: x.questions.into_iter().map(|x| x.text).collect(),
})
}),
on_verification: Box::new(|_| Ok(VerificationResponse { valid: true })),
on_info: Box::new(|_| Ok(())),
on_error: Box::new(|_| Ok(())),
on_start_method: Box::new(|_| Ok(())),
on_finished: Box::new(|| Ok(())),
}
}
}
#[cfg(any(test, feature = "tests"))]
#[async_trait]
impl AuthHandler for TestAuthHandler {
async fn on_initialization(
&mut self,
initialization: Initialization,
) -> io::Result<InitializationResponse> {
(self.on_initialization)(initialization)
}
async fn on_start_method(&mut self, start_method: StartMethod) -> io::Result<()> {
(self.on_start_method)(start_method)
}
async fn on_finished(&mut self) -> io::Result<()> {
(self.on_finished)()
}
}
#[cfg(any(test, feature = "tests"))]
#[async_trait]
impl AuthMethodHandler for TestAuthHandler {
async fn on_challenge(&mut self, challenge: Challenge) -> io::Result<ChallengeResponse> {
(self.on_challenge)(challenge)
}
async fn on_verification(
&mut self,
verification: Verification,
) -> io::Result<VerificationResponse> {
(self.on_verification)(verification)
}
async fn on_info(&mut self, info: Info) -> io::Result<()> {
(self.on_info)(info)
}
async fn on_error(&mut self, error: Error) -> io::Result<()> {
(self.on_error)(error)
}
}

@ -2,9 +2,7 @@ use std::io;
use async_trait::async_trait;
use super::{
Challenge, ChallengeResponse, Error, Info, Verification, VerificationKind, VerificationResponse,
};
use crate::msg::{Challenge, ChallengeResponse, Error, Info, Verification, VerificationResponse};
/// Interface for a handler of authentication requests for a specific authentication method.
#[async_trait]

@ -3,9 +3,9 @@ use std::io;
use async_trait::async_trait;
use log::*;
use super::{
AuthMethodHandler, Challenge, ChallengeResponse, Error, Info, Verification, VerificationKind,
VerificationResponse,
use crate::handler::AuthMethodHandler;
use crate::msg::{
Challenge, ChallengeResponse, Error, Info, Verification, VerificationKind, VerificationResponse,
};
/// Blocking implementation of [`AuthMethodHandler`] that uses prompts to communicate challenge &

@ -1,28 +1,26 @@
use std::fmt::Display;
use std::io;
use async_trait::async_trait;
use log::*;
use super::{
AuthMethodHandler, Challenge, ChallengeResponse, Error, Info, Verification,
VerificationResponse,
};
use crate::common::HeapSecretKey;
use crate::handler::AuthMethodHandler;
use crate::msg::{Challenge, ChallengeResponse, Error, Info, Verification, VerificationResponse};
/// Implementation of [`AuthMethodHandler`] that answers challenge requests using a static
/// [`HeapSecretKey`]. All other portions of method authentication are handled by another
/// [`AuthMethodHandler`].
pub struct StaticKeyAuthMethodHandler {
key: HeapSecretKey,
pub struct StaticKeyAuthMethodHandler<K> {
key: K,
handler: Box<dyn AuthMethodHandler>,
}
impl StaticKeyAuthMethodHandler {
impl<K> StaticKeyAuthMethodHandler<K> {
/// Creates a new [`StaticKeyAuthMethodHandler`] that responds to challenges using a static
/// `key`. All other requests are passed to the `handler`.
pub fn new<T: AuthMethodHandler + 'static>(key: impl Into<HeapSecretKey>, handler: T) -> Self {
pub fn new<T: AuthMethodHandler + 'static>(key: K, handler: T) -> Self {
Self {
key: key.into(),
key,
handler: Box::new(handler),
}
}
@ -30,7 +28,7 @@ impl StaticKeyAuthMethodHandler {
/// Creates a new [`StaticKeyAuthMethodHandler`] that responds to challenges using a static
/// `key`. All other requests are passed automatically, meaning that verification is always
/// approvide and info/errors are ignored.
pub fn simple(key: impl Into<HeapSecretKey>) -> Self {
pub fn simple(key: K) -> Self {
Self::new(key, {
struct __AuthMethodHandler;
@ -62,7 +60,10 @@ impl StaticKeyAuthMethodHandler {
}
#[async_trait]
impl AuthMethodHandler for StaticKeyAuthMethodHandler {
impl<K> AuthMethodHandler for StaticKeyAuthMethodHandler<K>
where
K: Display + Send,
{
async fn on_challenge(&mut self, challenge: Challenge) -> io::Result<ChallengeResponse> {
trace!("on_challenge({challenge:?})");
let mut answers = Vec::new();
@ -103,11 +104,11 @@ mod tests {
use test_log::test;
use super::*;
use crate::common::authentication::msg::{ErrorKind, Question, VerificationKind};
use crate::msg::{ErrorKind, Question, VerificationKind};
#[test(tokio::test)]
async fn on_challenge_should_fail_if_non_key_question_received() {
let mut handler = StaticKeyAuthMethodHandler::simple(HeapSecretKey::generate(32).unwrap());
let mut handler = StaticKeyAuthMethodHandler::simple(String::from("secret-key"));
handler
.on_challenge(Challenge {
@ -120,7 +121,7 @@ mod tests {
#[test(tokio::test)]
async fn on_challenge_should_answer_with_stringified_key_for_key_questions() {
let mut handler = StaticKeyAuthMethodHandler::simple(HeapSecretKey::generate(32).unwrap());
let mut handler = StaticKeyAuthMethodHandler::simple(String::from("secret-key"));
let response = handler
.on_challenge(Challenge {
@ -135,7 +136,7 @@ mod tests {
#[test(tokio::test)]
async fn on_verification_should_leverage_fallback_handler() {
let mut handler = StaticKeyAuthMethodHandler::simple(HeapSecretKey::generate(32).unwrap());
let mut handler = StaticKeyAuthMethodHandler::simple(String::from("secret-key"));
let response = handler
.on_verification(Verification {
@ -149,7 +150,7 @@ mod tests {
#[test(tokio::test)]
async fn on_info_should_leverage_fallback_handler() {
let mut handler = StaticKeyAuthMethodHandler::simple(HeapSecretKey::generate(32).unwrap());
let mut handler = StaticKeyAuthMethodHandler::simple(String::from("secret-key"));
handler
.on_info(Info {
@ -161,7 +162,7 @@ mod tests {
#[test(tokio::test)]
async fn on_error_should_leverage_fallback_handler() {
let mut handler = StaticKeyAuthMethodHandler::simple(HeapSecretKey::generate(32).unwrap());
let mut handler = StaticKeyAuthMethodHandler::simple(String::from("secret-key"));
handler
.on_error(Error {

@ -0,0 +1,19 @@
#![doc = include_str!("../README.md")]
#[doc = include_str!("../README.md")]
#[cfg(doctest)]
pub struct ReadmeDoctests;
mod authenticator;
mod handler;
mod methods;
pub mod msg;
pub use authenticator::*;
pub use handler::*;
pub use methods::*;
#[cfg(any(test, feature = "tests"))]
pub mod tests {
pub use crate::{TestAuthHandler, TestAuthenticator};
}

@ -1,12 +1,12 @@
use std::collections::HashMap;
use std::io;
use std::str::FromStr;
use async_trait::async_trait;
use log::*;
use super::super::HeapSecretKey;
use super::msg::*;
use super::Authenticator;
use crate::authenticator::Authenticator;
use crate::msg::*;
mod none;
mod static_key;
@ -48,7 +48,10 @@ impl Verifier {
}
/// Creates a verifier that uses the [`StaticKeyAuthenticationMethod`] exclusively.
pub fn static_key(key: impl Into<HeapSecretKey>) -> Self {
pub fn static_key<K>(key: K) -> Self
where
K: FromStr + PartialEq + Send + Sync + 'static,
{
Self::new(vec![
Box::new(StaticKeyAuthenticationMethod::new(key)) as Box<dyn AuthenticationMethod>
])
@ -117,10 +120,12 @@ pub trait AuthenticationMethod: Send + Sync {
#[cfg(test)]
mod tests {
use std::sync::mpsc;
use test_log::test;
use super::*;
use crate::common::FramedTransport;
use crate::authenticator::TestAuthenticator;
struct SuccessAuthenticationMethod;
@ -150,147 +155,131 @@ mod tests {
#[test(tokio::test)]
async fn verifier_should_fail_to_verify_if_initialization_fails() {
let (mut t1, mut t2) = FramedTransport::test_pair(100);
// Queue up a response to the initialization request
t2.write_frame(b"invalid initialization response")
.await
.unwrap();
let mut authenticator = TestAuthenticator {
initialize: Box::new(|_| Err(io::Error::from(io::ErrorKind::Other))),
..Default::default()
};
let methods: Vec<Box<dyn AuthenticationMethod>> =
vec![Box::new(SuccessAuthenticationMethod)];
let verifier = Verifier::from(methods);
verifier.verify(&mut t1).await.unwrap_err();
verifier.verify(&mut authenticator).await.unwrap_err();
}
#[test(tokio::test)]
async fn verifier_should_fail_to_verify_if_fails_to_send_finished_indicator_after_success() {
let (mut t1, mut t2) = FramedTransport::test_pair(100);
// Queue up a response to the initialization request
t2.write_frame_for(&AuthenticationResponse::Initialization(
InitializationResponse {
methods: vec![SuccessAuthenticationMethod.id().to_string()]
.into_iter()
.collect(),
},
))
.await
.unwrap();
// Then drop the transport so it cannot receive anything else
drop(t2);
let mut authenticator = TestAuthenticator {
initialize: Box::new(|_| {
Ok(InitializationResponse {
methods: vec![SuccessAuthenticationMethod.id().to_string()]
.into_iter()
.collect(),
})
}),
finished: Box::new(|| Err(io::Error::new(io::ErrorKind::Other, "test error"))),
..Default::default()
};
let methods: Vec<Box<dyn AuthenticationMethod>> =
vec![Box::new(SuccessAuthenticationMethod)];
let verifier = Verifier::from(methods);
assert_eq!(
verifier.verify(&mut t1).await.unwrap_err().kind(),
io::ErrorKind::WriteZero
);
let err = verifier.verify(&mut authenticator).await.unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::Other);
assert_eq!(err.to_string(), "test error");
}
#[test(tokio::test)]
async fn verifier_should_fail_to_verify_if_has_no_authentication_methods() {
let (mut t1, mut t2) = FramedTransport::test_pair(100);
// Queue up a response to the initialization request
t2.write_frame_for(&AuthenticationResponse::Initialization(
InitializationResponse {
methods: vec![SuccessAuthenticationMethod.id().to_string()]
.into_iter()
.collect(),
},
))
.await
.unwrap();
let mut authenticator = TestAuthenticator {
initialize: Box::new(|_| {
Ok(InitializationResponse {
methods: vec![SuccessAuthenticationMethod.id().to_string()]
.into_iter()
.collect(),
})
}),
..Default::default()
};
let methods: Vec<Box<dyn AuthenticationMethod>> = vec![];
let verifier = Verifier::from(methods);
verifier.verify(&mut t1).await.unwrap_err();
verifier.verify(&mut authenticator).await.unwrap_err();
}
#[test(tokio::test)]
async fn verifier_should_fail_to_verify_if_initialization_yields_no_valid_authentication_methods(
) {
let (mut t1, mut t2) = FramedTransport::test_pair(100);
// Queue up a response to the initialization request
t2.write_frame_for(&AuthenticationResponse::Initialization(
InitializationResponse {
methods: vec!["other".to_string()].into_iter().collect(),
},
))
.await
.unwrap();
let mut authenticator = TestAuthenticator {
initialize: Box::new(|_| {
Ok(InitializationResponse {
methods: vec!["other".to_string()].into_iter().collect(),
})
}),
..Default::default()
};
let methods: Vec<Box<dyn AuthenticationMethod>> =
vec![Box::new(SuccessAuthenticationMethod)];
let verifier = Verifier::from(methods);
verifier.verify(&mut t1).await.unwrap_err();
verifier.verify(&mut authenticator).await.unwrap_err();
}
#[test(tokio::test)]
async fn verifier_should_fail_to_verify_if_no_authentication_method_succeeds() {
let (mut t1, mut t2) = FramedTransport::test_pair(100);
// Queue up a response to the initialization request
t2.write_frame_for(&AuthenticationResponse::Initialization(
InitializationResponse {
methods: vec![FailAuthenticationMethod.id().to_string()]
.into_iter()
.collect(),
},
))
.await
.unwrap();
let mut authenticator = TestAuthenticator {
initialize: Box::new(|_| {
Ok(InitializationResponse {
methods: vec![FailAuthenticationMethod.id().to_string()]
.into_iter()
.collect(),
})
}),
..Default::default()
};
let methods: Vec<Box<dyn AuthenticationMethod>> = vec![Box::new(FailAuthenticationMethod)];
let verifier = Verifier::from(methods);
verifier.verify(&mut t1).await.unwrap_err();
verifier.verify(&mut authenticator).await.unwrap_err();
}
#[test(tokio::test)]
async fn verifier_should_return_id_of_authentication_method_upon_success() {
let (mut t1, mut t2) = FramedTransport::test_pair(100);
// Queue up a response to the initialization request
t2.write_frame_for(&AuthenticationResponse::Initialization(
InitializationResponse {
methods: vec![SuccessAuthenticationMethod.id().to_string()]
.into_iter()
.collect(),
},
))
.await
.unwrap();
let mut authenticator = TestAuthenticator {
initialize: Box::new(|_| {
Ok(InitializationResponse {
methods: vec![SuccessAuthenticationMethod.id().to_string()]
.into_iter()
.collect(),
})
}),
..Default::default()
};
let methods: Vec<Box<dyn AuthenticationMethod>> =
vec![Box::new(SuccessAuthenticationMethod)];
let verifier = Verifier::from(methods);
assert_eq!(
verifier.verify(&mut t1).await.unwrap(),
verifier.verify(&mut authenticator).await.unwrap(),
SuccessAuthenticationMethod.id()
);
}
#[test(tokio::test)]
async fn verifier_should_try_authentication_methods_in_order_until_one_succeeds() {
let (mut t1, mut t2) = FramedTransport::test_pair(100);
// Queue up a response to the initialization request
t2.write_frame_for(&AuthenticationResponse::Initialization(
InitializationResponse {
methods: vec![
FailAuthenticationMethod.id().to_string(),
SuccessAuthenticationMethod.id().to_string(),
]
.into_iter()
.collect(),
},
))
.await
.unwrap();
let mut authenticator = TestAuthenticator {
initialize: Box::new(|_| {
Ok(InitializationResponse {
methods: vec![
FailAuthenticationMethod.id().to_string(),
SuccessAuthenticationMethod.id().to_string(),
]
.into_iter()
.collect(),
})
}),
..Default::default()
};
let methods: Vec<Box<dyn AuthenticationMethod>> = vec![
Box::new(FailAuthenticationMethod),
@ -298,84 +287,79 @@ mod tests {
];
let verifier = Verifier::from(methods);
assert_eq!(
verifier.verify(&mut t1).await.unwrap(),
verifier.verify(&mut authenticator).await.unwrap(),
SuccessAuthenticationMethod.id()
);
}
#[test(tokio::test)]
async fn verifier_should_send_start_method_before_attempting_each_method() {
let (mut t1, mut t2) = FramedTransport::test_pair(100);
// Queue up a response to the initialization request
t2.write_frame_for(&AuthenticationResponse::Initialization(
InitializationResponse {
methods: vec![
FailAuthenticationMethod.id().to_string(),
SuccessAuthenticationMethod.id().to_string(),
]
.into_iter()
.collect(),
},
))
.await
.unwrap();
let (tx, rx) = mpsc::channel();
let mut authenticator = TestAuthenticator {
initialize: Box::new(|_| {
Ok(InitializationResponse {
methods: vec![
FailAuthenticationMethod.id().to_string(),
SuccessAuthenticationMethod.id().to_string(),
]
.into_iter()
.collect(),
})
}),
start_method: Box::new(move |method| {
tx.send(method.method).unwrap();
Ok(())
}),
..Default::default()
};
let methods: Vec<Box<dyn AuthenticationMethod>> = vec![
Box::new(FailAuthenticationMethod),
Box::new(SuccessAuthenticationMethod),
];
Verifier::from(methods).verify(&mut t1).await.unwrap();
Verifier::from(methods)
.verify(&mut authenticator)
.await
.unwrap();
// Check that we get a start method for each of the attempted methods
match t2.read_frame_as::<Authentication>().await.unwrap().unwrap() {
Authentication::Initialization(_) => (),
x => panic!("Unexpected response: {x:?}"),
}
match t2.read_frame_as::<Authentication>().await.unwrap().unwrap() {
Authentication::StartMethod(x) => assert_eq!(x.method, FailAuthenticationMethod.id()),
x => panic!("Unexpected response: {x:?}"),
}
match t2.read_frame_as::<Authentication>().await.unwrap().unwrap() {
Authentication::StartMethod(x) => {
assert_eq!(x.method, SuccessAuthenticationMethod.id())
}
x => panic!("Unexpected response: {x:?}"),
}
assert_eq!(rx.try_recv().unwrap(), FailAuthenticationMethod.id());
assert_eq!(rx.try_recv().unwrap(), SuccessAuthenticationMethod.id());
assert_eq!(rx.try_recv().unwrap_err(), mpsc::TryRecvError::Empty);
}
#[test(tokio::test)]
async fn verifier_should_send_finished_when_a_method_succeeds() {
let (mut t1, mut t2) = FramedTransport::test_pair(100);
// Queue up a response to the initialization request
t2.write_frame_for(&AuthenticationResponse::Initialization(
InitializationResponse {
methods: vec![
FailAuthenticationMethod.id().to_string(),
SuccessAuthenticationMethod.id().to_string(),
]
.into_iter()
.collect(),
},
))
.await
.unwrap();
let (tx, rx) = mpsc::channel();
let mut authenticator = TestAuthenticator {
initialize: Box::new(|_| {
Ok(InitializationResponse {
methods: vec![
FailAuthenticationMethod.id().to_string(),
SuccessAuthenticationMethod.id().to_string(),
]
.into_iter()
.collect(),
})
}),
finished: Box::new(move || {
tx.send(()).unwrap();
Ok(())
}),
..Default::default()
};
let methods: Vec<Box<dyn AuthenticationMethod>> = vec![
Box::new(FailAuthenticationMethod),
Box::new(SuccessAuthenticationMethod),
];
Verifier::from(methods).verify(&mut t1).await.unwrap();
// Clear out the initialization and start methods
t2.read_frame_as::<Authentication>().await.unwrap().unwrap();
t2.read_frame_as::<Authentication>().await.unwrap().unwrap();
t2.read_frame_as::<Authentication>().await.unwrap().unwrap();
Verifier::from(methods)
.verify(&mut authenticator)
.await
.unwrap();
match t2.read_frame_as::<Authentication>().await.unwrap().unwrap() {
Authentication::Finished => (),
x => panic!("Unexpected response: {x:?}"),
}
rx.try_recv().unwrap();
assert_eq!(rx.try_recv().unwrap_err(), mpsc::TryRecvError::Empty);
}
}

@ -2,13 +2,16 @@ use std::io;
use async_trait::async_trait;
use super::{AuthenticationMethod, Authenticator};
use crate::authenticator::Authenticator;
use crate::methods::AuthenticationMethod;
/// Authenticaton method for a static secret key
/// Authenticaton method that skips authentication and approves anything.
#[derive(Clone, Debug)]
pub struct NoneAuthenticationMethod;
impl NoneAuthenticationMethod {
pub const ID: &str = "none";
#[inline]
pub fn new() -> Self {
Self
@ -25,7 +28,7 @@ impl Default for NoneAuthenticationMethod {
#[async_trait]
impl AuthenticationMethod for NoneAuthenticationMethod {
fn id(&self) -> &'static str {
"none"
Self::ID
}
async fn authenticate(&self, _: &mut dyn Authenticator) -> io::Result<()> {

@ -0,0 +1,133 @@
use std::io;
use std::str::FromStr;
use async_trait::async_trait;
use crate::authenticator::Authenticator;
use crate::methods::AuthenticationMethod;
use crate::msg::{Challenge, Error, Question};
/// Authenticaton method for a static secret key
#[derive(Clone, Debug)]
pub struct StaticKeyAuthenticationMethod<T> {
key: T,
}
impl<T> StaticKeyAuthenticationMethod<T> {
pub const ID: &str = "static_key";
#[inline]
pub fn new(key: T) -> Self {
Self { key }
}
}
#[async_trait]
impl<T> AuthenticationMethod for StaticKeyAuthenticationMethod<T>
where
T: FromStr + PartialEq + Send + Sync,
{
fn id(&self) -> &'static str {
Self::ID
}
async fn authenticate(&self, authenticator: &mut dyn Authenticator) -> io::Result<()> {
let response = authenticator
.challenge(Challenge {
questions: vec![Question {
label: "key".to_string(),
text: "Provide a key: ".to_string(),
options: Default::default(),
}],
options: Default::default(),
})
.await?;
if response.answers.is_empty() {
return Err(Error::non_fatal("missing answer").into_io_permission_denied());
}
match response.answers.into_iter().next().unwrap().parse::<T>() {
Ok(key) if key == self.key => Ok(()),
_ => Err(Error::non_fatal("answer does not match key").into_io_permission_denied()),
}
}
}
#[cfg(test)]
mod tests {
use test_log::test;
use super::*;
use crate::authenticator::TestAuthenticator;
use crate::msg::*;
#[test(tokio::test)]
async fn authenticate_should_fail_if_key_challenge_fails() {
let method = StaticKeyAuthenticationMethod::new(String::new());
let mut authenticator = TestAuthenticator {
challenge: Box::new(|_| Err(io::Error::new(io::ErrorKind::InvalidData, "test error"))),
..Default::default()
};
let err = method.authenticate(&mut authenticator).await.unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::InvalidData);
assert_eq!(err.to_string(), "test error");
}
#[test(tokio::test)]
async fn authenticate_should_fail_if_no_answer_included_in_challenge_response() {
let method = StaticKeyAuthenticationMethod::new(String::new());
let mut authenticator = TestAuthenticator {
challenge: Box::new(|_| {
Ok(ChallengeResponse {
answers: Vec::new(),
})
}),
..Default::default()
};
let err = method.authenticate(&mut authenticator).await.unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::PermissionDenied);
assert_eq!(err.to_string(), "Error: missing answer");
}
#[test(tokio::test)]
async fn authenticate_should_fail_if_answer_does_not_match_key() {
let method = StaticKeyAuthenticationMethod::new(String::from("answer"));
let mut authenticator = TestAuthenticator {
challenge: Box::new(|_| {
Ok(ChallengeResponse {
answers: vec![String::from("other")],
})
}),
..Default::default()
};
let err = method.authenticate(&mut authenticator).await.unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::PermissionDenied);
assert_eq!(err.to_string(), "Error: answer does not match key");
}
#[test(tokio::test)]
async fn authenticate_should_succeed_if_answer_matches_key() {
let method = StaticKeyAuthenticationMethod::new(String::from("answer"));
let mut authenticator = TestAuthenticator {
challenge: Box::new(|_| {
Ok(ChallengeResponse {
answers: vec![String::from("answer")],
})
}),
..Default::default()
};
method.authenticate(&mut authenticator).await.unwrap();
}
}

@ -3,7 +3,7 @@ name = "distant-core"
description = "Core library for distant, enabling operation on a remote computer through file and process manipulation"
categories = ["network-programming"]
keywords = ["api", "async"]
version = "0.20.0-alpha.6"
version = "0.20.0"
authors = ["Chip Senkbeil <chip@senkbeil.org>"]
edition = "2021"
homepage = "https://github.com/chipsenkbeil/distant"
@ -11,44 +11,26 @@ repository = "https://github.com/chipsenkbeil/distant"
readme = "README.md"
license = "MIT OR Apache-2.0"
[features]
schemars = ["dep:schemars", "distant-net/schemars"]
[dependencies]
async-trait = "0.1.68"
bitflags = "2.0.2"
bitflags = "2.3.1"
bytes = "1.4.0"
derive_more = { version = "0.99.17", default-features = false, features = ["as_mut", "as_ref", "deref", "deref_mut", "display", "from", "error", "into", "into_iterator", "is_variant", "try_into"] }
distant-net = { version = "=0.20.0-alpha.6", path = "../distant-net" }
distant-net = { version = "=0.20.0", path = "../distant-net" }
distant-protocol = { version = "=0.20.0", path = "../distant-protocol" }
futures = "0.3.28"
grep = "0.2.11"
hex = "0.4.3"
ignore = "0.4.20"
log = "0.4.17"
notify = { version = "5.1.0", features = ["serde"] }
log = "0.4.18"
num_cpus = "1.15.0"
once_cell = "1.17.1"
portable-pty = "0.8.1"
once_cell = "1.17.2"
rand = { version = "0.8.5", features = ["getrandom"] }
regex = "1.7.3"
serde = { version = "1.0.159", features = ["derive"] }
regex = "1.8.3"
serde = { version = "1.0.163", features = ["derive"] }
serde_bytes = "0.11.9"
serde_json = "1.0.95"
shell-words = "1.1.0"
serde_json = "1.0.96"
strum = { version = "0.24.1", features = ["derive"] }
tokio = { version = "1.27.0", features = ["full"] }
tokio-util = { version = "0.7.7", features = ["codec"] }
walkdir = "2.3.3"
whoami = "1.4.0"
winsplit = "0.1.0"
# Optional dependencies based on features
schemars = { version = "0.8.12", optional = true }
tokio = { version = "1.28.2", features = ["full"] }
[dev-dependencies]
assert_fs = "1.0.12"
env_logger = "0.10.0"
indoc = "2.0.1"
predicates = "3.0.2"
rstest = "0.17.0"
test-log = "0.2.11"

@ -1,26 +1,20 @@
# distant core
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.64.0][distant_rustc_img]][distant_rustc_lnk]
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.70.0][distant_rustc_img]][distant_rustc_lnk]
[distant_crates_img]: https://img.shields.io/crates/v/distant-core.svg
[distant_crates_lnk]: https://crates.io/crates/distant-core
[distant_doc_img]: https://docs.rs/distant-core/badge.svg
[distant_doc_lnk]: https://docs.rs/distant-core
[distant_rustc_img]: https://img.shields.io/badge/distant_core-rustc_1.64+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2022/09/22/Rust-1.64.0.html
Library that powers the [`distant`](https://github.com/chipsenkbeil/distant)
binary.
🚧 **(Alpha stage software) This library is in rapid development and may break or change frequently!** 🚧
[distant_rustc_img]: https://img.shields.io/badge/distant_core-rustc_1.70+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/06/01/Rust-1.70.0.html
## Details
The `distant-core` library supplies the client, manager, and server
implementations for use with the distant API in order to communicate with
remote machines and perform actions. This library acts as the primary
implementation that powers the CLI, but is also available for other extensions
like `distant-ssh2`.
The `distant-core` library supplies the client and server interfaces along with
a client implementation for distant. The library exposes an API that downstream
libraries such as `distant-local` and `distant-ssh2` can implement to provide a
distant-compatible interface.
## Installation
@ -28,41 +22,7 @@ You can import the dependency by adding the following to your `Cargo.toml`:
```toml
[dependencies]
distant-core = "0.19"
```
## Features
Currently, the library supports the following features:
- `schemars`: derives the `schemars::JsonSchema` interface on
`DistantMsg`, `DistantRequestData`, and `DistantResponseData` data types
By default, no features are enabled on the library.
## Examples
Below is an example of connecting to a distant server over TCP without any
encryption or authentication:
```rust
use distant_core::{
DistantClient,
DistantChannelExt,
net::{PlainCodec, TcpClientExt},
};
use std::{net::SocketAddr, path::Path};
// Connect to a server located at example.com on port 8080 that is using
// no encryption or authentication (PlainCodec)
let addr: SocketAddr = "example.com:8080".parse().unwrap();
let mut client = DistantClient::connect(addr, PlainCodec).await
.expect("Failed to connect");
// Append text to a file
// NOTE: This method comes from DistantChannelExt
client.append_file_text(Path::new("path/to/file.txt"), "new contents").await
.expect("Failed to append to file");
distant-core = "0.20"
```
## License

@ -4,50 +4,37 @@ use std::sync::Arc;
use async_trait::async_trait;
use distant_net::common::ConnectionId;
use distant_net::server::{ConnectionCtx, Reply, ServerCtx, ServerHandler};
use distant_net::server::{Reply, RequestCtx, ServerHandler};
use log::*;
use crate::protocol::{
self, Capabilities, ChangeKind, DirEntry, Environment, Error, Metadata, ProcessId, PtySize,
SearchId, SearchQuery, SystemInfo,
self, ChangeKind, DirEntry, Environment, Error, Metadata, Permissions, ProcessId, PtySize,
SearchId, SearchQuery, SetPermissionsOptions, SystemInfo, Version,
};
mod local;
pub use local::LocalDistantApi;
mod reply;
use reply::DistantSingleReply;
/// Represents the context provided to the [`DistantApi`] for incoming requests
pub struct DistantCtx<T> {
pub struct DistantCtx {
pub connection_id: ConnectionId,
pub reply: Box<dyn Reply<Data = protocol::Response>>,
pub local_data: Arc<T>,
}
/// Represents a [`ServerHandler`] that leverages an API compliant with `distant`
pub struct DistantApiServerHandler<T, D>
pub struct DistantApiServerHandler<T>
where
T: DistantApi<LocalData = D>,
T: DistantApi,
{
api: T,
api: Arc<T>,
}
impl<T, D> DistantApiServerHandler<T, D>
impl<T> DistantApiServerHandler<T>
where
T: DistantApi<LocalData = D>,
T: DistantApi,
{
pub fn new(api: T) -> Self {
Self { api }
}
}
impl DistantApiServerHandler<LocalDistantApi, <LocalDistantApi as DistantApi>::LocalData> {
/// Creates a new server using the [`LocalDistantApi`] implementation
pub fn local() -> io::Result<Self> {
Ok(Self {
api: LocalDistantApi::initialize()?,
})
Self { api: Arc::new(api) }
}
}
@ -63,12 +50,15 @@ fn unsupported<T>(label: &str) -> io::Result<T> {
/// which can be used to build other servers that are compatible with distant
#[async_trait]
pub trait DistantApi {
type LocalData: Send + Sync;
/// Invoked whenever a new connection is established.
#[allow(unused_variables)]
async fn on_connect(&self, id: ConnectionId) -> io::Result<()> {
Ok(())
}
/// Invoked whenever a new connection is established, providing a mutable reference to the
/// newly-created local data. This is a way to support modifying local data before it is used.
/// Invoked whenever an existing connection is dropped.
#[allow(unused_variables)]
async fn on_accept(&self, ctx: ConnectionCtx<'_, Self::LocalData>) -> io::Result<()> {
async fn on_disconnect(&self, id: ConnectionId) -> io::Result<()> {
Ok(())
}
@ -76,8 +66,8 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn capabilities(&self, ctx: DistantCtx<Self::LocalData>) -> io::Result<Capabilities> {
unsupported("capabilities")
async fn version(&self, ctx: DistantCtx) -> io::Result<Version> {
unsupported("version")
}
/// Reads bytes from a file.
@ -86,11 +76,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn read_file(
&self,
ctx: DistantCtx<Self::LocalData>,
path: PathBuf,
) -> io::Result<Vec<u8>> {
async fn read_file(&self, ctx: DistantCtx, path: PathBuf) -> io::Result<Vec<u8>> {
unsupported("read_file")
}
@ -100,11 +86,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn read_file_text(
&self,
ctx: DistantCtx<Self::LocalData>,
path: PathBuf,
) -> io::Result<String> {
async fn read_file_text(&self, ctx: DistantCtx, path: PathBuf) -> io::Result<String> {
unsupported("read_file_text")
}
@ -115,12 +97,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn write_file(
&self,
ctx: DistantCtx<Self::LocalData>,
path: PathBuf,
data: Vec<u8>,
) -> io::Result<()> {
async fn write_file(&self, ctx: DistantCtx, path: PathBuf, data: Vec<u8>) -> io::Result<()> {
unsupported("write_file")
}
@ -133,7 +110,7 @@ pub trait DistantApi {
#[allow(unused_variables)]
async fn write_file_text(
&self,
ctx: DistantCtx<Self::LocalData>,
ctx: DistantCtx,
path: PathBuf,
data: String,
) -> io::Result<()> {
@ -147,12 +124,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn append_file(
&self,
ctx: DistantCtx<Self::LocalData>,
path: PathBuf,
data: Vec<u8>,
) -> io::Result<()> {
async fn append_file(&self, ctx: DistantCtx, path: PathBuf, data: Vec<u8>) -> io::Result<()> {
unsupported("append_file")
}
@ -165,7 +137,7 @@ pub trait DistantApi {
#[allow(unused_variables)]
async fn append_file_text(
&self,
ctx: DistantCtx<Self::LocalData>,
ctx: DistantCtx,
path: PathBuf,
data: String,
) -> io::Result<()> {
@ -184,7 +156,7 @@ pub trait DistantApi {
#[allow(unused_variables)]
async fn read_dir(
&self,
ctx: DistantCtx<Self::LocalData>,
ctx: DistantCtx,
path: PathBuf,
depth: usize,
absolute: bool,
@ -201,12 +173,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn create_dir(
&self,
ctx: DistantCtx<Self::LocalData>,
path: PathBuf,
all: bool,
) -> io::Result<()> {
async fn create_dir(&self, ctx: DistantCtx, path: PathBuf, all: bool) -> io::Result<()> {
unsupported("create_dir")
}
@ -217,12 +184,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn copy(
&self,
ctx: DistantCtx<Self::LocalData>,
src: PathBuf,
dst: PathBuf,
) -> io::Result<()> {
async fn copy(&self, ctx: DistantCtx, src: PathBuf, dst: PathBuf) -> io::Result<()> {
unsupported("copy")
}
@ -233,12 +195,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn remove(
&self,
ctx: DistantCtx<Self::LocalData>,
path: PathBuf,
force: bool,
) -> io::Result<()> {
async fn remove(&self, ctx: DistantCtx, path: PathBuf, force: bool) -> io::Result<()> {
unsupported("remove")
}
@ -249,12 +206,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn rename(
&self,
ctx: DistantCtx<Self::LocalData>,
src: PathBuf,
dst: PathBuf,
) -> io::Result<()> {
async fn rename(&self, ctx: DistantCtx, src: PathBuf, dst: PathBuf) -> io::Result<()> {
unsupported("rename")
}
@ -269,7 +221,7 @@ pub trait DistantApi {
#[allow(unused_variables)]
async fn watch(
&self,
ctx: DistantCtx<Self::LocalData>,
ctx: DistantCtx,
path: PathBuf,
recursive: bool,
only: Vec<ChangeKind>,
@ -284,7 +236,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn unwatch(&self, ctx: DistantCtx<Self::LocalData>, path: PathBuf) -> io::Result<()> {
async fn unwatch(&self, ctx: DistantCtx, path: PathBuf) -> io::Result<()> {
unsupported("unwatch")
}
@ -294,7 +246,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn exists(&self, ctx: DistantCtx<Self::LocalData>, path: PathBuf) -> io::Result<bool> {
async fn exists(&self, ctx: DistantCtx, path: PathBuf) -> io::Result<bool> {
unsupported("exists")
}
@ -308,7 +260,7 @@ pub trait DistantApi {
#[allow(unused_variables)]
async fn metadata(
&self,
ctx: DistantCtx<Self::LocalData>,
ctx: DistantCtx,
path: PathBuf,
canonicalize: bool,
resolve_file_type: bool,
@ -316,17 +268,31 @@ pub trait DistantApi {
unsupported("metadata")
}
/// Sets permissions for a file, directory, or symlink.
///
/// * `path` - the path to the file, directory, or symlink
/// * `resolve_symlink` - if true, will resolve the path to the underlying file/directory
/// * `permissions` - the new permissions to apply
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn set_permissions(
&self,
ctx: DistantCtx,
path: PathBuf,
permissions: Permissions,
options: SetPermissionsOptions,
) -> io::Result<()> {
unsupported("set_permissions")
}
/// Searches files for matches based on a query.
///
/// * `query` - the specific query to perform
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn search(
&self,
ctx: DistantCtx<Self::LocalData>,
query: SearchQuery,
) -> io::Result<SearchId> {
async fn search(&self, ctx: DistantCtx, query: SearchQuery) -> io::Result<SearchId> {
unsupported("search")
}
@ -336,11 +302,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn cancel_search(
&self,
ctx: DistantCtx<Self::LocalData>,
id: SearchId,
) -> io::Result<()> {
async fn cancel_search(&self, ctx: DistantCtx, id: SearchId) -> io::Result<()> {
unsupported("cancel_search")
}
@ -355,7 +317,7 @@ pub trait DistantApi {
#[allow(unused_variables)]
async fn proc_spawn(
&self,
ctx: DistantCtx<Self::LocalData>,
ctx: DistantCtx,
cmd: String,
environment: Environment,
current_dir: Option<PathBuf>,
@ -370,7 +332,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn proc_kill(&self, ctx: DistantCtx<Self::LocalData>, id: ProcessId) -> io::Result<()> {
async fn proc_kill(&self, ctx: DistantCtx, id: ProcessId) -> io::Result<()> {
unsupported("proc_kill")
}
@ -381,12 +343,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn proc_stdin(
&self,
ctx: DistantCtx<Self::LocalData>,
id: ProcessId,
data: Vec<u8>,
) -> io::Result<()> {
async fn proc_stdin(&self, ctx: DistantCtx, id: ProcessId, data: Vec<u8>) -> io::Result<()> {
unsupported("proc_stdin")
}
@ -399,7 +356,7 @@ pub trait DistantApi {
#[allow(unused_variables)]
async fn proc_resize_pty(
&self,
ctx: DistantCtx<Self::LocalData>,
ctx: DistantCtx,
id: ProcessId,
size: PtySize,
) -> io::Result<()> {
@ -410,32 +367,34 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn system_info(&self, ctx: DistantCtx<Self::LocalData>) -> io::Result<SystemInfo> {
async fn system_info(&self, ctx: DistantCtx) -> io::Result<SystemInfo> {
unsupported("system_info")
}
}
#[async_trait]
impl<T, D> ServerHandler for DistantApiServerHandler<T, D>
impl<T> ServerHandler for DistantApiServerHandler<T>
where
T: DistantApi<LocalData = D> + Send + Sync,
D: Send + Sync,
T: DistantApi + Send + Sync + 'static,
{
type LocalData = D;
type Request = protocol::Msg<protocol::Request>;
type Response = protocol::Msg<protocol::Response>;
/// Overridden to leverage [`DistantApi`] implementation of `on_accept`
async fn on_accept(&self, ctx: ConnectionCtx<'_, Self::LocalData>) -> io::Result<()> {
T::on_accept(&self.api, ctx).await
/// Overridden to leverage [`DistantApi`] implementation of `on_connect`.
async fn on_connect(&self, id: ConnectionId) -> io::Result<()> {
T::on_connect(&self.api, id).await
}
async fn on_request(&self, ctx: ServerCtx<Self::Request, Self::Response, Self::LocalData>) {
let ServerCtx {
/// Overridden to leverage [`DistantApi`] implementation of `on_disconnect`.
async fn on_disconnect(&self, id: ConnectionId) -> io::Result<()> {
T::on_disconnect(&self.api, id).await
}
async fn on_request(&self, ctx: RequestCtx<Self::Request, Self::Response>) {
let RequestCtx {
connection_id,
request,
reply,
local_data,
} = ctx;
// Convert our reply to a queued reply so we can ensure that the result
@ -448,10 +407,9 @@ where
let ctx = DistantCtx {
connection_id,
reply: Box::new(DistantSingleReply::from(reply.clone_reply())),
local_data,
};
let data = handle_request(self, ctx, data).await;
let data = handle_request(Arc::clone(&self.api), ctx, data).await;
// Report outgoing errors in our debug logs
if let protocol::Response::Error(x) = &data {
@ -460,27 +418,34 @@ where
protocol::Msg::Single(data)
}
protocol::Msg::Batch(list) => {
protocol::Msg::Batch(list)
if matches!(request.header.get_as("sequence"), Some(Ok(true))) =>
{
let mut out = Vec::new();
let mut has_failed = false;
for data in list {
// Once we hit a failure, all remaining requests return interrupted
if has_failed {
out.push(protocol::Response::Error(protocol::Error {
kind: protocol::ErrorKind::Interrupted,
description: String::from("Canceled due to earlier error"),
}));
continue;
}
let ctx = DistantCtx {
connection_id,
reply: Box::new(DistantSingleReply::from(reply.clone_reply())),
local_data: Arc::clone(&local_data),
};
// TODO: This does not run in parallel, meaning that the next item in the
// batch will not be queued until the previous item completes! This
// would be useful if we wanted to chain requests where the previous
// request feeds into the current request, but not if we just want
// to run everything together. So we should instead rewrite this
// to spawn a task per request and then await completion of all tasks
let data = handle_request(self, ctx, data).await;
let data = handle_request(Arc::clone(&self.api), ctx, data).await;
// Report outgoing errors in our debug logs
// Report outgoing errors in our debug logs and mark as failed
// to cancel any future tasks being run
if let protocol::Response::Error(x) = &data {
debug!("[Conn {}] {}", connection_id, x);
has_failed = true;
}
out.push(data);
@ -488,17 +453,54 @@ where
protocol::Msg::Batch(out)
}
protocol::Msg::Batch(list) => {
let mut tasks = Vec::new();
// If sequence specified as true, we want to process in order, otherwise we can
// process in any order
for data in list {
let api = Arc::clone(&self.api);
let ctx = DistantCtx {
connection_id,
reply: Box::new(DistantSingleReply::from(reply.clone_reply())),
};
let task = tokio::spawn(async move {
let data = handle_request(api, ctx, data).await;
// Report outgoing errors in our debug logs
if let protocol::Response::Error(x) = &data {
debug!("[Conn {}] {}", connection_id, x);
}
data
});
tasks.push(task);
}
let out = futures::future::join_all(tasks)
.await
.into_iter()
.map(|x| match x {
Ok(x) => x,
Err(x) => protocol::Response::Error(x.to_string().into()),
})
.collect();
protocol::Msg::Batch(out)
}
};
// Queue up our result to go before ANY of the other messages that might be sent.
// This is important to avoid situations such as when a process is started, but before
// the confirmation can be sent some stdout or stderr is captured and sent first.
if let Err(x) = reply.send_before(response).await {
if let Err(x) = reply.send_before(response) {
error!("[Conn {}] Failed to send response: {}", connection_id, x);
}
// Flush out all of our replies thus far and toggle to no longer hold submissions
if let Err(x) = reply.flush(false).await {
if let Err(x) = reply.flush(false) {
error!(
"[Conn {}] Failed to flush response queue: {}",
connection_id, x
@ -508,54 +510,46 @@ where
}
/// Processes an incoming request
async fn handle_request<T, D>(
server: &DistantApiServerHandler<T, D>,
ctx: DistantCtx<D>,
async fn handle_request<T>(
api: Arc<T>,
ctx: DistantCtx,
request: protocol::Request,
) -> protocol::Response
where
T: DistantApi<LocalData = D> + Send + Sync,
D: Send + Sync,
T: DistantApi + Send + Sync,
{
match request {
protocol::Request::Capabilities {} => server
.api
.capabilities(ctx)
protocol::Request::Version {} => api
.version(ctx)
.await
.map(|supported| protocol::Response::Capabilities { supported })
.map(protocol::Response::Version)
.unwrap_or_else(protocol::Response::from),
protocol::Request::FileRead { path } => server
.api
protocol::Request::FileRead { path } => api
.read_file(ctx, path)
.await
.map(|data| protocol::Response::Blob { data })
.unwrap_or_else(protocol::Response::from),
protocol::Request::FileReadText { path } => server
.api
protocol::Request::FileReadText { path } => api
.read_file_text(ctx, path)
.await
.map(|data| protocol::Response::Text { data })
.unwrap_or_else(protocol::Response::from),
protocol::Request::FileWrite { path, data } => server
.api
protocol::Request::FileWrite { path, data } => api
.write_file(ctx, path, data)
.await
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::FileWriteText { path, text } => server
.api
protocol::Request::FileWriteText { path, text } => api
.write_file_text(ctx, path, text)
.await
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::FileAppend { path, data } => server
.api
protocol::Request::FileAppend { path, data } => api
.append_file(ctx, path, data)
.await
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::FileAppendText { path, text } => server
.api
protocol::Request::FileAppendText { path, text } => api
.append_file_text(ctx, path, text)
.await
.map(|_| protocol::Response::Ok)
@ -566,8 +560,7 @@ where
absolute,
canonicalize,
include_root,
} => server
.api
} => api
.read_dir(ctx, path, depth, absolute, canonicalize, include_root)
.await
.map(|(entries, errors)| protocol::Response::DirEntries {
@ -575,26 +568,22 @@ where
errors: errors.into_iter().map(Error::from).collect(),
})
.unwrap_or_else(protocol::Response::from),
protocol::Request::DirCreate { path, all } => server
.api
protocol::Request::DirCreate { path, all } => api
.create_dir(ctx, path, all)
.await
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::Remove { path, force } => server
.api
protocol::Request::Remove { path, force } => api
.remove(ctx, path, force)
.await
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::Copy { src, dst } => server
.api
protocol::Request::Copy { src, dst } => api
.copy(ctx, src, dst)
.await
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::Rename { src, dst } => server
.api
protocol::Request::Rename { src, dst } => api
.rename(ctx, src, dst)
.await
.map(|_| protocol::Response::Ok)
@ -604,20 +593,17 @@ where
recursive,
only,
except,
} => server
.api
} => api
.watch(ctx, path, recursive, only, except)
.await
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::Unwatch { path } => server
.api
protocol::Request::Unwatch { path } => api
.unwatch(ctx, path)
.await
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::Exists { path } => server
.api
protocol::Request::Exists { path } => api
.exists(ctx, path)
.await
.map(|value| protocol::Response::Exists { value })
@ -626,20 +612,26 @@ where
path,
canonicalize,
resolve_file_type,
} => server
.api
} => api
.metadata(ctx, path, canonicalize, resolve_file_type)
.await
.map(protocol::Response::Metadata)
.unwrap_or_else(protocol::Response::from),
protocol::Request::Search { query } => server
.api
protocol::Request::SetPermissions {
path,
permissions,
options,
} => api
.set_permissions(ctx, path, permissions, options)
.await
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::Search { query } => api
.search(ctx, query)
.await
.map(|id| protocol::Response::SearchStarted { id })
.unwrap_or_else(protocol::Response::from),
protocol::Request::CancelSearch { id } => server
.api
protocol::Request::CancelSearch { id } => api
.cancel_search(ctx, id)
.await
.map(|_| protocol::Response::Ok)
@ -649,32 +641,27 @@ where
environment,
current_dir,
pty,
} => server
.api
} => api
.proc_spawn(ctx, cmd.into(), environment, current_dir, pty)
.await
.map(|id| protocol::Response::ProcSpawned { id })
.unwrap_or_else(protocol::Response::from),
protocol::Request::ProcKill { id } => server
.api
protocol::Request::ProcKill { id } => api
.proc_kill(ctx, id)
.await
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::ProcStdin { id, data } => server
.api
protocol::Request::ProcStdin { id, data } => api
.proc_stdin(ctx, id, data)
.await
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::ProcResizePty { id, size } => server
.api
protocol::Request::ProcResizePty { id, size } => api
.proc_resize_pty(ctx, id, size)
.await
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::SystemInfo {} => server
.api
protocol::Request::SystemInfo {} => api
.system_info(ctx)
.await
.map(protocol::Response::SystemInfo)

@ -1,6 +1,4 @@
use std::future::Future;
use std::io;
use std::pin::Pin;
use distant_net::server::Reply;
@ -19,14 +17,10 @@ impl From<Box<dyn Reply<Data = protocol::Msg<protocol::Response>>>> for DistantS
impl Reply for DistantSingleReply {
type Data = protocol::Response;
fn send(&self, data: Self::Data) -> Pin<Box<dyn Future<Output = io::Result<()>> + Send + '_>> {
fn send(&self, data: Self::Data) -> io::Result<()> {
self.0.send(protocol::Msg::Single(data))
}
fn blocking_send(&self, data: Self::Data) -> io::Result<()> {
self.0.blocking_send(protocol::Msg::Single(data))
}
fn clone_reply(&self) -> Box<dyn Reply<Data = Self::Data>> {
Box::new(Self(self.0.clone_reply()))
}

@ -11,8 +11,8 @@ use crate::client::{
Watcher,
};
use crate::protocol::{
self, Capabilities, ChangeKindSet, DirEntry, Environment, Error as Failure, Metadata, PtySize,
SearchId, SearchQuery, SystemInfo,
self, ChangeKindSet, DirEntry, Environment, Error as Failure, Metadata, Permissions, PtySize,
SearchId, SearchQuery, SetPermissionsOptions, SystemInfo, Version,
};
pub type AsyncReturn<'a, T, E = io::Error> =
@ -38,17 +38,18 @@ pub trait DistantChannelExt {
data: impl Into<String>,
) -> AsyncReturn<'_, ()>;
/// Retrieves server capabilities
fn capabilities(&mut self) -> AsyncReturn<'_, Capabilities>;
/// Copies a remote file or directory from src to dst
fn copy(&mut self, src: impl Into<PathBuf>, dst: impl Into<PathBuf>) -> AsyncReturn<'_, ()>;
/// Creates a remote directory, optionally creating all parent components if specified
fn create_dir(&mut self, path: impl Into<PathBuf>, all: bool) -> AsyncReturn<'_, ()>;
/// Checks whether the `path` exists on the remote machine
fn exists(&mut self, path: impl Into<PathBuf>) -> AsyncReturn<'_, bool>;
/// Checks whether this client is compatible with the remote server
fn is_compatible(&mut self) -> AsyncReturn<'_, bool>;
/// Retrieves metadata about a path on a remote machine
fn metadata(
&mut self,
@ -57,6 +58,14 @@ pub trait DistantChannelExt {
resolve_file_type: bool,
) -> AsyncReturn<'_, Metadata>;
/// Sets permissions for a path on a remote machine
fn set_permissions(
&mut self,
path: impl Into<PathBuf>,
permissions: Permissions,
options: SetPermissionsOptions,
) -> AsyncReturn<'_, ()>;
/// Perform a search
fn search(&mut self, query: impl Into<SearchQuery>) -> AsyncReturn<'_, Searcher>;
@ -128,6 +137,12 @@ pub trait DistantChannelExt {
/// Retrieves information about the remote system
fn system_info(&mut self) -> AsyncReturn<'_, SystemInfo>;
/// Retrieves server version information
fn version(&mut self) -> AsyncReturn<'_, Version>;
/// Returns version of protocol that the client uses
fn protocol_version(&self) -> protocol::semver::Version;
/// Writes a remote file with the data from a collection of bytes
fn write_file(
&mut self,
@ -196,18 +211,6 @@ impl DistantChannelExt
)
}
fn capabilities(&mut self) -> AsyncReturn<'_, Capabilities> {
make_body!(
self,
protocol::Request::Capabilities {},
|data| match data {
protocol::Response::Capabilities { supported } => Ok(supported),
protocol::Response::Error(x) => Err(io::Error::from(x)),
_ => Err(mismatched_response()),
}
)
}
fn copy(&mut self, src: impl Into<PathBuf>, dst: impl Into<PathBuf>) -> AsyncReturn<'_, ()> {
make_body!(
self,
@ -236,6 +239,15 @@ impl DistantChannelExt
)
}
fn is_compatible(&mut self) -> AsyncReturn<'_, bool> {
make_body!(self, protocol::Request::Version {}, |data| match data {
protocol::Response::Version(version) =>
Ok(protocol::is_compatible_with(&version.protocol_version)),
protocol::Response::Error(x) => Err(io::Error::from(x)),
_ => Err(mismatched_response()),
})
}
fn metadata(
&mut self,
path: impl Into<PathBuf>,
@ -257,6 +269,23 @@ impl DistantChannelExt
)
}
fn set_permissions(
&mut self,
path: impl Into<PathBuf>,
permissions: Permissions,
options: SetPermissionsOptions,
) -> AsyncReturn<'_, ()> {
make_body!(
self,
protocol::Request::SetPermissions {
path: path.into(),
permissions,
options,
},
@ok
)
}
fn search(&mut self, query: impl Into<SearchQuery>) -> AsyncReturn<'_, Searcher> {
let query = query.into();
Box::pin(async move { Searcher::search(self.clone(), query).await })
@ -432,6 +461,18 @@ impl DistantChannelExt
})
}
fn version(&mut self) -> AsyncReturn<'_, Version> {
make_body!(self, protocol::Request::Version {}, |data| match data {
protocol::Response::Version(x) => Ok(x),
protocol::Response::Error(x) => Err(io::Error::from(x)),
_ => Err(mismatched_response()),
})
}
fn protocol_version(&self) -> protocol::semver::Version {
protocol::PROTOCOL_VERSION
}
fn write_file(
&mut self,
path: impl Into<PathBuf>,

@ -333,24 +333,24 @@ fn swap_prefix(obj: &mut Map<String, Value>, old: &str, new: &str) {
}
impl LspContent {
/// Converts all URIs with `file://` as the scheme to `distant://` instead
/// Converts all URIs with `file` as the scheme to `distant` instead
pub fn convert_local_scheme_to_distant(&mut self) {
self.convert_local_scheme_to("distant://")
self.convert_local_scheme_to("distant")
}
/// Converts all URIs with `file://` as the scheme to `scheme` instead
/// Converts all URIs with `file` as the scheme to `scheme` instead
pub fn convert_local_scheme_to(&mut self, scheme: &str) {
swap_prefix(&mut self.0, "file://", scheme);
swap_prefix(&mut self.0, "file:", &format!("{scheme}:"));
}
/// Converts all URIs with `distant://` as the scheme to `file://` instead
/// Converts all URIs with `distant` as the scheme to `file` instead
pub fn convert_distant_scheme_to_local(&mut self) {
self.convert_scheme_to_local("distant://")
self.convert_scheme_to_local("distant")
}
/// Converts all URIs with `scheme` as the scheme to `file://` instead
/// Converts all URIs with `scheme` as the scheme to `file` instead
pub fn convert_scheme_to_local(&mut self, scheme: &str) {
swap_prefix(&mut self.0, scheme, "file://");
swap_prefix(&mut self.0, &format!("{scheme}:"), "file:");
}
}
@ -719,7 +719,7 @@ mod tests {
"key12": true,
}));
content.convert_local_scheme_to("custom://");
content.convert_local_scheme_to("custom");
assert_eq!(
content.0,
make_obj!({
@ -809,7 +809,7 @@ mod tests {
"key12": true,
}));
content.convert_scheme_to_local("custom://");
content.convert_scheme_to_local("custom");
assert_eq!(
content.0,
make_obj!({

@ -5,9 +5,8 @@ use distant_net::client::Mailbox;
use distant_net::common::{Request, Response};
use log::*;
use tokio::io;
use tokio::sync::mpsc;
use tokio::sync::mpsc::error::{TryRecvError, TrySendError};
use tokio::sync::RwLock;
use tokio::sync::{mpsc, RwLock};
use tokio::task::JoinHandle;
use crate::client::DistantChannel;

@ -263,12 +263,16 @@ mod tests {
req.id,
vec![
protocol::Response::Changed(Change {
timestamp: 0,
kind: ChangeKind::Access,
paths: vec![test_path.to_path_buf()],
path: test_path.to_path_buf(),
details: Default::default(),
}),
protocol::Response::Changed(Change {
kind: ChangeKind::Content,
paths: vec![test_path.to_path_buf()],
timestamp: 1,
kind: ChangeKind::Modify,
path: test_path.to_path_buf(),
details: Default::default(),
}),
],
))
@ -280,8 +284,10 @@ mod tests {
assert_eq!(
change,
Change {
timestamp: 0,
kind: ChangeKind::Access,
paths: vec![test_path.to_path_buf()]
path: test_path.to_path_buf(),
details: Default::default(),
}
);
@ -289,8 +295,10 @@ mod tests {
assert_eq!(
change,
Change {
kind: ChangeKind::Content,
paths: vec![test_path.to_path_buf()]
timestamp: 1,
kind: ChangeKind::Modify,
path: test_path.to_path_buf(),
details: Default::default(),
}
);
}
@ -330,8 +338,10 @@ mod tests {
.write_frame_for(&Response::new(
req.id.clone(),
protocol::Response::Changed(Change {
timestamp: 0,
kind: ChangeKind::Access,
paths: vec![test_path.to_path_buf()],
path: test_path.to_path_buf(),
details: Default::default(),
}),
))
.await
@ -342,8 +352,10 @@ mod tests {
.write_frame_for(&Response::new(
req.id.clone() + "1",
protocol::Response::Changed(Change {
kind: ChangeKind::Content,
paths: vec![test_path.to_path_buf()],
timestamp: 1,
kind: ChangeKind::Modify,
path: test_path.to_path_buf(),
details: Default::default(),
}),
))
.await
@ -354,8 +366,10 @@ mod tests {
.write_frame_for(&Response::new(
req.id,
protocol::Response::Changed(Change {
kind: ChangeKind::Remove,
paths: vec![test_path.to_path_buf()],
timestamp: 2,
kind: ChangeKind::Delete,
path: test_path.to_path_buf(),
details: Default::default(),
}),
))
.await
@ -366,8 +380,10 @@ mod tests {
assert_eq!(
change,
Change {
timestamp: 0,
kind: ChangeKind::Access,
paths: vec![test_path.to_path_buf()]
path: test_path.to_path_buf(),
details: Default::default(),
}
);
@ -375,8 +391,10 @@ mod tests {
assert_eq!(
change,
Change {
kind: ChangeKind::Remove,
paths: vec![test_path.to_path_buf()]
timestamp: 2,
kind: ChangeKind::Delete,
path: test_path.to_path_buf(),
details: Default::default(),
}
);
}
@ -414,16 +432,22 @@ mod tests {
req.id,
vec![
protocol::Response::Changed(Change {
timestamp: 0,
kind: ChangeKind::Access,
paths: vec![test_path.to_path_buf()],
path: test_path.to_path_buf(),
details: Default::default(),
}),
protocol::Response::Changed(Change {
kind: ChangeKind::Content,
paths: vec![test_path.to_path_buf()],
timestamp: 1,
kind: ChangeKind::Modify,
path: test_path.to_path_buf(),
details: Default::default(),
}),
protocol::Response::Changed(Change {
kind: ChangeKind::Remove,
paths: vec![test_path.to_path_buf()],
timestamp: 2,
kind: ChangeKind::Delete,
path: test_path.to_path_buf(),
details: Default::default(),
}),
],
))
@ -447,8 +471,10 @@ mod tests {
assert_eq!(
change,
Change {
timestamp: 0,
kind: ChangeKind::Access,
paths: vec![test_path.to_path_buf()]
path: test_path.to_path_buf(),
details: Default::default(),
}
);
@ -470,8 +496,10 @@ mod tests {
.write_frame_for(&Response::new(
req.id,
protocol::Response::Changed(Change {
timestamp: 3,
kind: ChangeKind::Unknown,
paths: vec![test_path.to_path_buf()],
path: test_path.to_path_buf(),
details: Default::default(),
}),
))
.await
@ -482,15 +510,19 @@ mod tests {
assert_eq!(
watcher.lock().await.next().await,
Some(Change {
kind: ChangeKind::Content,
paths: vec![test_path.to_path_buf()]
timestamp: 1,
kind: ChangeKind::Modify,
path: test_path.to_path_buf(),
details: Default::default(),
})
);
assert_eq!(
watcher.lock().await.next().await,
Some(Change {
kind: ChangeKind::Remove,
paths: vec![test_path.to_path_buf()]
timestamp: 2,
kind: ChangeKind::Delete,
path: test_path.to_path_buf(),
details: Default::default(),
})
);
assert_eq!(watcher.lock().await.next().await, None);

@ -1,5 +1,3 @@
use std::time::Duration;
/// Capacity associated stdin, stdout, and stderr pipes receiving data from remote server
pub const CLIENT_PIPE_CAPACITY: usize = 10000;
@ -8,16 +6,3 @@ pub const CLIENT_WATCHER_CAPACITY: usize = 100;
/// Capacity associated with a client searcher receiving matches
pub const CLIENT_SEARCHER_CAPACITY: usize = 10000;
/// Capacity associated with the server's file watcher to pass events outbound
pub const SERVER_WATCHER_CAPACITY: usize = 10000;
/// Represents the maximum size (in bytes) that data will be read from pipes
/// per individual `read` call
///
/// Current setting is 16k size
pub const MAX_PIPE_CHUNK_SIZE: usize = 16384;
/// Duration in milliseconds to sleep between reading stdout/stderr chunks
/// to avoid sending many small messages to clients
pub const READ_PAUSE_DURATION: Duration = Duration::from_millis(1);

@ -1,3 +1,9 @@
#![doc = include_str!("../README.md")]
#[doc = include_str!("../README.md")]
#[cfg(doctest)]
pub struct ReadmeDoctests;
mod api;
pub use api::*;
@ -7,10 +13,10 @@ pub use client::*;
mod credentials;
pub use credentials::*;
pub mod protocol;
mod constants;
mod serde_str;
/// Re-export of `distant-net` as `net`
/// Network functionality.
pub use distant_net as net;
/// Protocol structures.
pub use distant_protocol as protocol;

@ -1,553 +0,0 @@
use std::io;
use std::path::PathBuf;
use derive_more::{From, IsVariant};
use serde::{Deserialize, Serialize};
use strum::{AsRefStr, EnumDiscriminants, EnumIter, EnumMessage, EnumString};
mod capabilities;
pub use capabilities::*;
mod change;
pub use change::*;
mod cmd;
pub use cmd::*;
mod error;
pub use error::*;
mod filesystem;
pub use filesystem::*;
mod metadata;
pub use metadata::*;
mod pty;
pub use pty::*;
mod search;
pub use search::*;
mod system;
pub use system::*;
mod utils;
pub(crate) use utils::*;
/// Id for a remote process
pub type ProcessId = u32;
/// Mapping of environment variables
pub type Environment = distant_net::common::Map;
/// Represents a wrapper around a distant message, supporting single and batch requests
#[derive(Clone, Debug, From, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(untagged)]
pub enum Msg<T> {
Single(T),
Batch(Vec<T>),
}
impl<T> Msg<T> {
/// Returns true if msg has a single payload
pub fn is_single(&self) -> bool {
matches!(self, Self::Single(_))
}
/// Returns reference to single value if msg is single variant
pub fn as_single(&self) -> Option<&T> {
match self {
Self::Single(x) => Some(x),
_ => None,
}
}
/// Returns mutable reference to single value if msg is single variant
pub fn as_mut_single(&mut self) -> Option<&T> {
match self {
Self::Single(x) => Some(x),
_ => None,
}
}
/// Returns the single value if msg is single variant
pub fn into_single(self) -> Option<T> {
match self {
Self::Single(x) => Some(x),
_ => None,
}
}
/// Returns true if msg has a batch of payloads
pub fn is_batch(&self) -> bool {
matches!(self, Self::Batch(_))
}
/// Returns reference to batch value if msg is batch variant
pub fn as_batch(&self) -> Option<&[T]> {
match self {
Self::Batch(x) => Some(x),
_ => None,
}
}
/// Returns mutable reference to batch value if msg is batch variant
pub fn as_mut_batch(&mut self) -> Option<&mut [T]> {
match self {
Self::Batch(x) => Some(x),
_ => None,
}
}
/// Returns the batch value if msg is batch variant
pub fn into_batch(self) -> Option<Vec<T>> {
match self {
Self::Batch(x) => Some(x),
_ => None,
}
}
/// Convert into a collection of payload data
pub fn into_vec(self) -> Vec<T> {
match self {
Self::Single(x) => vec![x],
Self::Batch(x) => x,
}
}
}
#[cfg(feature = "schemars")]
impl<T: schemars::JsonSchema> Msg<T> {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(Msg<T>)
}
}
/// Represents the payload of a request to be performed on the remote machine
#[derive(Clone, Debug, PartialEq, Eq, EnumDiscriminants, IsVariant, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[strum_discriminants(derive(
AsRefStr,
strum::Display,
EnumIter,
EnumMessage,
EnumString,
Hash,
PartialOrd,
Ord,
IsVariant,
Serialize,
Deserialize
))]
#[cfg_attr(
feature = "schemars",
strum_discriminants(derive(schemars::JsonSchema))
)]
#[strum_discriminants(name(CapabilityKind))]
#[strum_discriminants(strum(serialize_all = "snake_case"))]
#[serde(rename_all = "snake_case", deny_unknown_fields, tag = "type")]
pub enum Request {
/// Retrieve information about the server's capabilities
#[strum_discriminants(strum(message = "Supports retrieving capabilities"))]
Capabilities {},
/// Reads a file from the specified path on the remote machine
#[strum_discriminants(strum(message = "Supports reading binary file"))]
FileRead {
/// The path to the file on the remote machine
path: PathBuf,
},
/// Reads a file from the specified path on the remote machine
/// and treats the contents as text
#[strum_discriminants(strum(message = "Supports reading text file"))]
FileReadText {
/// The path to the file on the remote machine
path: PathBuf,
},
/// Writes a file, creating it if it does not exist, and overwriting any existing content
/// on the remote machine
#[strum_discriminants(strum(message = "Supports writing binary file"))]
FileWrite {
/// The path to the file on the remote machine
path: PathBuf,
/// Data for server-side writing of content
#[serde(with = "serde_bytes")]
#[cfg_attr(feature = "schemars", schemars(with = "Vec<u8>"))]
data: Vec<u8>,
},
/// Writes a file using text instead of bytes, creating it if it does not exist,
/// and overwriting any existing content on the remote machine
#[strum_discriminants(strum(message = "Supports writing text file"))]
FileWriteText {
/// The path to the file on the remote machine
path: PathBuf,
/// Data for server-side writing of content
text: String,
},
/// Appends to a file, creating it if it does not exist, on the remote machine
#[strum_discriminants(strum(message = "Supports appending to binary file"))]
FileAppend {
/// The path to the file on the remote machine
path: PathBuf,
/// Data for server-side writing of content
#[serde(with = "serde_bytes")]
#[cfg_attr(feature = "schemars", schemars(with = "Vec<u8>"))]
data: Vec<u8>,
},
/// Appends text to a file, creating it if it does not exist, on the remote machine
#[strum_discriminants(strum(message = "Supports appending to text file"))]
FileAppendText {
/// The path to the file on the remote machine
path: PathBuf,
/// Data for server-side writing of content
text: String,
},
/// Reads a directory from the specified path on the remote machine
#[strum_discriminants(strum(message = "Supports reading directory"))]
DirRead {
/// The path to the directory on the remote machine
path: PathBuf,
/// Maximum depth to traverse with 0 indicating there is no maximum
/// depth and 1 indicating the most immediate children within the
/// directory
#[serde(default = "one")]
depth: usize,
/// Whether or not to return absolute or relative paths
#[serde(default)]
absolute: bool,
/// Whether or not to canonicalize the resulting paths, meaning
/// returning the canonical, absolute form of a path with all
/// intermediate components normalized and symbolic links resolved
///
/// Note that the flag absolute must be true to have absolute paths
/// returned, even if canonicalize is flagged as true
#[serde(default)]
canonicalize: bool,
/// Whether or not to include the root directory in the retrieved
/// entries
///
/// If included, the root directory will also be a canonicalized,
/// absolute path and will not follow any of the other flags
#[serde(default)]
include_root: bool,
},
/// Creates a directory on the remote machine
#[strum_discriminants(strum(message = "Supports creating directory"))]
DirCreate {
/// The path to the directory on the remote machine
path: PathBuf,
/// Whether or not to create all parent directories
#[serde(default)]
all: bool,
},
/// Removes a file or directory on the remote machine
#[strum_discriminants(strum(message = "Supports removing files, directories, and symlinks"))]
Remove {
/// The path to the file or directory on the remote machine
path: PathBuf,
/// Whether or not to remove all contents within directory if is a directory.
/// Does nothing different for files
#[serde(default)]
force: bool,
},
/// Copies a file or directory on the remote machine
#[strum_discriminants(strum(message = "Supports copying files, directories, and symlinks"))]
Copy {
/// The path to the file or directory on the remote machine
src: PathBuf,
/// New location on the remote machine for copy of file or directory
dst: PathBuf,
},
/// Moves/renames a file or directory on the remote machine
#[strum_discriminants(strum(message = "Supports renaming files, directories, and symlinks"))]
Rename {
/// The path to the file or directory on the remote machine
src: PathBuf,
/// New location on the remote machine for the file or directory
dst: PathBuf,
},
/// Watches a path for changes
#[strum_discriminants(strum(message = "Supports watching filesystem for changes"))]
Watch {
/// The path to the file, directory, or symlink on the remote machine
path: PathBuf,
/// If true, will recursively watch for changes within directories, othewise
/// will only watch for changes immediately within directories
#[serde(default)]
recursive: bool,
/// Filter to only report back specified changes
#[serde(default)]
only: Vec<ChangeKind>,
/// Filter to report back changes except these specified changes
#[serde(default)]
except: Vec<ChangeKind>,
},
/// Unwatches a path for changes, meaning no additional changes will be reported
#[strum_discriminants(strum(message = "Supports unwatching filesystem for changes"))]
Unwatch {
/// The path to the file, directory, or symlink on the remote machine
path: PathBuf,
},
/// Checks whether the given path exists
#[strum_discriminants(strum(message = "Supports checking if a path exists"))]
Exists {
/// The path to the file or directory on the remote machine
path: PathBuf,
},
/// Retrieves filesystem metadata for the specified path on the remote machine
#[strum_discriminants(strum(
message = "Supports retrieving metadata about a file, directory, or symlink"
))]
Metadata {
/// The path to the file, directory, or symlink on the remote machine
path: PathBuf,
/// Whether or not to include a canonicalized version of the path, meaning
/// returning the canonical, absolute form of a path with all
/// intermediate components normalized and symbolic links resolved
#[serde(default)]
canonicalize: bool,
/// Whether or not to follow symlinks to determine absolute file type (dir/file)
#[serde(default)]
resolve_file_type: bool,
},
/// Searches filesystem using the provided query
#[strum_discriminants(strum(message = "Supports searching filesystem using queries"))]
Search {
/// Query to perform against the filesystem
query: SearchQuery,
},
/// Cancels an active search being run against the filesystem
#[strum_discriminants(strum(
message = "Supports canceling an active search against the filesystem"
))]
CancelSearch {
/// Id of the search to cancel
id: SearchId,
},
/// Spawns a new process on the remote machine
#[strum_discriminants(strum(message = "Supports spawning a process"))]
ProcSpawn {
/// The full command to run including arguments
cmd: Cmd,
/// Environment to provide to the remote process
#[serde(default)]
environment: Environment,
/// Alternative current directory for the remote process
#[serde(default)]
current_dir: Option<PathBuf>,
/// If provided, will spawn process in a pty, otherwise spawns directly
#[serde(default)]
pty: Option<PtySize>,
},
/// Kills a process running on the remote machine
#[strum_discriminants(strum(message = "Supports killing a spawned process"))]
ProcKill {
/// Id of the actively-running process
id: ProcessId,
},
/// Sends additional data to stdin of running process
#[strum_discriminants(strum(message = "Supports sending stdin to a spawned process"))]
ProcStdin {
/// Id of the actively-running process to send stdin data
id: ProcessId,
/// Data to send to a process's stdin pipe
#[serde(with = "serde_bytes")]
#[cfg_attr(feature = "schemars", schemars(with = "Vec<u8>"))]
data: Vec<u8>,
},
/// Resize pty of remote process
#[strum_discriminants(strum(message = "Supports resizing the pty of a spawned process"))]
ProcResizePty {
/// Id of the actively-running process whose pty to resize
id: ProcessId,
/// The new pty dimensions
size: PtySize,
},
/// Retrieve information about the server and the system it is on
#[strum_discriminants(strum(message = "Supports retrieving system information"))]
SystemInfo {},
}
#[cfg(feature = "schemars")]
impl Request {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(Request)
}
}
/// Represents the payload of a successful response
#[derive(Clone, Debug, PartialEq, Eq, AsRefStr, IsVariant, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(rename_all = "snake_case", deny_unknown_fields, tag = "type")]
#[strum(serialize_all = "snake_case")]
pub enum Response {
/// General okay with no extra data, returned in cases like
/// creating or removing a directory, copying a file, or renaming
/// a file
Ok,
/// General-purpose failure that occurred from some request
Error(Error),
/// Response containing some arbitrary, binary data
Blob {
/// Binary data associated with the response
#[serde(with = "serde_bytes")]
#[cfg_attr(feature = "schemars", schemars(with = "Vec<u8>"))]
data: Vec<u8>,
},
/// Response containing some arbitrary, text data
Text {
/// Text data associated with the response
data: String,
},
/// Response to reading a directory
DirEntries {
/// Entries contained within the requested directory
entries: Vec<DirEntry>,
/// Errors encountered while scanning for entries
errors: Vec<Error>,
},
/// Response to a filesystem change for some watched file, directory, or symlink
Changed(Change),
/// Response to checking if a path exists
Exists { value: bool },
/// Represents metadata about some filesystem object (file, directory, symlink) on remote machine
Metadata(Metadata),
/// Represents a search being started
SearchStarted {
/// Arbitrary id associated with search
id: SearchId,
},
/// Represents some subset of results for a search query (may not be all of them)
SearchResults {
/// Arbitrary id associated with search
id: SearchId,
/// Collection of matches from performing a query
matches: Vec<SearchQueryMatch>,
},
/// Represents a search being completed
SearchDone {
/// Arbitrary id associated with search
id: SearchId,
},
/// Response to starting a new process
ProcSpawned {
/// Arbitrary id associated with running process
id: ProcessId,
},
/// Actively-transmitted stdout as part of running process
ProcStdout {
/// Arbitrary id associated with running process
id: ProcessId,
/// Data read from a process' stdout pipe
#[serde(with = "serde_bytes")]
#[cfg_attr(feature = "schemars", schemars(with = "Vec<u8>"))]
data: Vec<u8>,
},
/// Actively-transmitted stderr as part of running process
ProcStderr {
/// Arbitrary id associated with running process
id: ProcessId,
/// Data read from a process' stderr pipe
#[serde(with = "serde_bytes")]
#[cfg_attr(feature = "schemars", schemars(with = "Vec<u8>"))]
data: Vec<u8>,
},
/// Response to a process finishing
ProcDone {
/// Arbitrary id associated with running process
id: ProcessId,
/// Whether or not termination was successful
success: bool,
/// Exit code associated with termination, will be missing if terminated by signal
code: Option<i32>,
},
/// Response to retrieving information about the server and the system it is on
SystemInfo(SystemInfo),
/// Response to retrieving information about the server's capabilities
Capabilities { supported: Capabilities },
}
#[cfg(feature = "schemars")]
impl Response {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(Response)
}
}
impl From<io::Error> for Response {
fn from(x: io::Error) -> Self {
Self::Error(Error::from(x))
}
}
/// Used to provide a default serde value of 1
const fn one() -> usize {
1
}

@ -1,207 +0,0 @@
use std::cmp::Ordering;
use std::collections::HashSet;
use std::hash::{Hash, Hasher};
use std::ops::{BitAnd, BitOr, BitXor};
use std::str::FromStr;
use derive_more::{From, Into, IntoIterator};
use serde::{Deserialize, Serialize};
use strum::{EnumMessage, IntoEnumIterator};
use super::CapabilityKind;
/// Set of supported capabilities for a server
#[derive(Clone, Debug, From, Into, PartialEq, Eq, IntoIterator, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(transparent)]
pub struct Capabilities(#[into_iterator(owned, ref)] HashSet<Capability>);
impl Capabilities {
/// Return set of capabilities encompassing all possible capabilities
pub fn all() -> Self {
Self(CapabilityKind::iter().map(Capability::from).collect())
}
/// Return empty set of capabilities
pub fn none() -> Self {
Self(HashSet::new())
}
/// Returns true if the capability with described kind is included
pub fn contains(&self, kind: impl AsRef<str>) -> bool {
let cap = Capability {
kind: kind.as_ref().to_string(),
description: String::new(),
};
self.0.contains(&cap)
}
/// Adds the specified capability to the set of capabilities
///
/// * If the set did not have this capability, returns `true`
/// * If the set did have this capability, returns `false`
pub fn insert(&mut self, cap: impl Into<Capability>) -> bool {
self.0.insert(cap.into())
}
/// Removes the capability with the described kind, returning the capability
pub fn take(&mut self, kind: impl AsRef<str>) -> Option<Capability> {
let cap = Capability {
kind: kind.as_ref().to_string(),
description: String::new(),
};
self.0.take(&cap)
}
/// Removes the capability with the described kind, returning true if it existed
pub fn remove(&mut self, kind: impl AsRef<str>) -> bool {
let cap = Capability {
kind: kind.as_ref().to_string(),
description: String::new(),
};
self.0.remove(&cap)
}
/// Converts into vec of capabilities sorted by kind
pub fn into_sorted_vec(self) -> Vec<Capability> {
let mut this = self.0.into_iter().collect::<Vec<_>>();
this.sort_unstable();
this
}
}
#[cfg(feature = "schemars")]
impl Capabilities {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(Capabilities)
}
}
impl BitAnd for &Capabilities {
type Output = Capabilities;
fn bitand(self, rhs: Self) -> Self::Output {
Capabilities(self.0.bitand(&rhs.0))
}
}
impl BitOr for &Capabilities {
type Output = Capabilities;
fn bitor(self, rhs: Self) -> Self::Output {
Capabilities(self.0.bitor(&rhs.0))
}
}
impl BitOr<Capability> for &Capabilities {
type Output = Capabilities;
fn bitor(self, rhs: Capability) -> Self::Output {
let mut other = Capabilities::none();
other.0.insert(rhs);
self.bitor(&other)
}
}
impl BitXor for &Capabilities {
type Output = Capabilities;
fn bitxor(self, rhs: Self) -> Self::Output {
Capabilities(self.0.bitxor(&rhs.0))
}
}
impl FromIterator<Capability> for Capabilities {
fn from_iter<I: IntoIterator<Item = Capability>>(iter: I) -> Self {
let mut this = Capabilities::none();
for capability in iter {
this.0.insert(capability);
}
this
}
}
/// Capability tied to a server. A capability is equivalent based on its kind and not description.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
pub struct Capability {
/// Label describing the kind of capability
pub kind: String,
/// Information about the capability
pub description: String,
}
impl Capability {
/// Will convert the [`Capability`]'s `kind` into a known [`CapabilityKind`] if possible,
/// returning None if the capability is unknown
pub fn to_capability_kind(&self) -> Option<CapabilityKind> {
CapabilityKind::from_str(&self.kind).ok()
}
/// Returns true if the described capability is unknown
pub fn is_unknown(&self) -> bool {
self.to_capability_kind().is_none()
}
}
impl PartialEq for Capability {
fn eq(&self, other: &Self) -> bool {
self.kind.eq_ignore_ascii_case(&other.kind)
}
}
impl Eq for Capability {}
impl PartialOrd for Capability {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for Capability {
fn cmp(&self, other: &Self) -> Ordering {
self.kind
.to_ascii_lowercase()
.cmp(&other.kind.to_ascii_lowercase())
}
}
impl Hash for Capability {
fn hash<H: Hasher>(&self, state: &mut H) {
self.kind.to_ascii_lowercase().hash(state);
}
}
impl From<CapabilityKind> for Capability {
/// Creates a new capability using the kind's default message
fn from(kind: CapabilityKind) -> Self {
Self {
kind: kind.to_string(),
description: kind
.get_message()
.map(ToString::to_string)
.unwrap_or_default(),
}
}
}
#[cfg(feature = "schemars")]
impl Capability {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(Capability)
}
}
#[cfg(feature = "schemars")]
impl CapabilityKind {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(CapabilityKind)
}
}

@ -1,516 +0,0 @@
use std::collections::HashSet;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::iter::FromIterator;
use std::ops::{BitOr, Sub};
use std::path::PathBuf;
use std::str::FromStr;
use derive_more::{Deref, DerefMut, IntoIterator};
use notify::event::Event as NotifyEvent;
use notify::EventKind as NotifyEventKind;
use serde::{Deserialize, Serialize};
use strum::{EnumString, EnumVariantNames, VariantNames};
/// Change to one or more paths on the filesystem
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
pub struct Change {
/// Label describing the kind of change
pub kind: ChangeKind,
/// Paths that were changed
pub paths: Vec<PathBuf>,
}
#[cfg(feature = "schemars")]
impl Change {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(Change)
}
}
impl From<NotifyEvent> for Change {
fn from(x: NotifyEvent) -> Self {
Self {
kind: x.kind.into(),
paths: x.paths,
}
}
}
#[derive(
Copy,
Clone,
Debug,
strum::Display,
EnumString,
EnumVariantNames,
Hash,
PartialEq,
Eq,
PartialOrd,
Ord,
Serialize,
Deserialize,
)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
#[strum(serialize_all = "snake_case")]
pub enum ChangeKind {
/// Something about a file or directory was accessed, but
/// no specific details were known
Access,
/// A file was closed for executing
AccessCloseExecute,
/// A file was closed for reading
AccessCloseRead,
/// A file was closed for writing
AccessCloseWrite,
/// A file was opened for executing
AccessOpenExecute,
/// A file was opened for reading
AccessOpenRead,
/// A file was opened for writing
AccessOpenWrite,
/// A file or directory was read
AccessRead,
/// The access time of a file or directory was changed
AccessTime,
/// A file, directory, or something else was created
Create,
/// The content of a file or directory changed
Content,
/// The data of a file or directory was modified, but
/// no specific details were known
Data,
/// The metadata of a file or directory was modified, but
/// no specific details were known
Metadata,
/// Something about a file or directory was modified, but
/// no specific details were known
Modify,
/// A file, directory, or something else was removed
Remove,
/// A file or directory was renamed, but no specific details were known
Rename,
/// A file or directory was renamed, and the provided paths
/// are the source and target in that order (from, to)
RenameBoth,
/// A file or directory was renamed, and the provided path
/// is the origin of the rename (before being renamed)
RenameFrom,
/// A file or directory was renamed, and the provided path
/// is the result of the rename
RenameTo,
/// A file's size changed
Size,
/// The ownership of a file or directory was changed
Ownership,
/// The permissions of a file or directory was changed
Permissions,
/// The write or modify time of a file or directory was changed
WriteTime,
// Catchall in case we have no insight as to the type of change
Unknown,
}
impl ChangeKind {
/// Returns a list of all variants as str names
pub const fn variants() -> &'static [&'static str] {
Self::VARIANTS
}
/// Returns a list of all variants as a vec
pub fn all() -> Vec<ChangeKind> {
ChangeKindSet::all().into_sorted_vec()
}
/// Returns true if the change is a kind of access
pub fn is_access_kind(&self) -> bool {
self.is_open_access_kind()
|| self.is_close_access_kind()
|| matches!(self, Self::Access | Self::AccessRead)
}
/// Returns true if the change is a kind of open access
pub fn is_open_access_kind(&self) -> bool {
matches!(
self,
Self::AccessOpenExecute | Self::AccessOpenRead | Self::AccessOpenWrite
)
}
/// Returns true if the change is a kind of close access
pub fn is_close_access_kind(&self) -> bool {
matches!(
self,
Self::AccessCloseExecute | Self::AccessCloseRead | Self::AccessCloseWrite
)
}
/// Returns true if the change is a kind of creation
pub fn is_create_kind(&self) -> bool {
matches!(self, Self::Create)
}
/// Returns true if the change is a kind of modification
pub fn is_modify_kind(&self) -> bool {
self.is_data_modify_kind() || self.is_metadata_modify_kind() || matches!(self, Self::Modify)
}
/// Returns true if the change is a kind of data modification
pub fn is_data_modify_kind(&self) -> bool {
matches!(self, Self::Content | Self::Data | Self::Size)
}
/// Returns true if the change is a kind of metadata modification
pub fn is_metadata_modify_kind(&self) -> bool {
matches!(
self,
Self::AccessTime
| Self::Metadata
| Self::Ownership
| Self::Permissions
| Self::WriteTime
)
}
/// Returns true if the change is a kind of rename
pub fn is_rename_kind(&self) -> bool {
matches!(
self,
Self::Rename | Self::RenameBoth | Self::RenameFrom | Self::RenameTo
)
}
/// Returns true if the change is a kind of removal
pub fn is_remove_kind(&self) -> bool {
matches!(self, Self::Remove)
}
/// Returns true if the change kind is unknown
pub fn is_unknown_kind(&self) -> bool {
matches!(self, Self::Unknown)
}
}
#[cfg(feature = "schemars")]
impl ChangeKind {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(ChangeKind)
}
}
impl BitOr for ChangeKind {
type Output = ChangeKindSet;
fn bitor(self, rhs: Self) -> Self::Output {
let mut set = ChangeKindSet::empty();
set.insert(self);
set.insert(rhs);
set
}
}
impl From<NotifyEventKind> for ChangeKind {
fn from(x: NotifyEventKind) -> Self {
use notify::event::{
AccessKind, AccessMode, DataChange, MetadataKind, ModifyKind, RenameMode,
};
match x {
// File/directory access events
NotifyEventKind::Access(AccessKind::Read) => Self::AccessRead,
NotifyEventKind::Access(AccessKind::Open(AccessMode::Execute)) => {
Self::AccessOpenExecute
}
NotifyEventKind::Access(AccessKind::Open(AccessMode::Read)) => Self::AccessOpenRead,
NotifyEventKind::Access(AccessKind::Open(AccessMode::Write)) => Self::AccessOpenWrite,
NotifyEventKind::Access(AccessKind::Close(AccessMode::Execute)) => {
Self::AccessCloseExecute
}
NotifyEventKind::Access(AccessKind::Close(AccessMode::Read)) => Self::AccessCloseRead,
NotifyEventKind::Access(AccessKind::Close(AccessMode::Write)) => Self::AccessCloseWrite,
NotifyEventKind::Access(_) => Self::Access,
// File/directory creation events
NotifyEventKind::Create(_) => Self::Create,
// Rename-oriented events
NotifyEventKind::Modify(ModifyKind::Name(RenameMode::Both)) => Self::RenameBoth,
NotifyEventKind::Modify(ModifyKind::Name(RenameMode::From)) => Self::RenameFrom,
NotifyEventKind::Modify(ModifyKind::Name(RenameMode::To)) => Self::RenameTo,
NotifyEventKind::Modify(ModifyKind::Name(_)) => Self::Rename,
// Data-modification events
NotifyEventKind::Modify(ModifyKind::Data(DataChange::Content)) => Self::Content,
NotifyEventKind::Modify(ModifyKind::Data(DataChange::Size)) => Self::Size,
NotifyEventKind::Modify(ModifyKind::Data(_)) => Self::Data,
// Metadata-modification events
NotifyEventKind::Modify(ModifyKind::Metadata(MetadataKind::AccessTime)) => {
Self::AccessTime
}
NotifyEventKind::Modify(ModifyKind::Metadata(MetadataKind::WriteTime)) => {
Self::WriteTime
}
NotifyEventKind::Modify(ModifyKind::Metadata(MetadataKind::Permissions)) => {
Self::Permissions
}
NotifyEventKind::Modify(ModifyKind::Metadata(MetadataKind::Ownership)) => {
Self::Ownership
}
NotifyEventKind::Modify(ModifyKind::Metadata(_)) => Self::Metadata,
// General modification events
NotifyEventKind::Modify(_) => Self::Modify,
// File/directory removal events
NotifyEventKind::Remove(_) => Self::Remove,
// Catch-all for other events
NotifyEventKind::Any | NotifyEventKind::Other => Self::Unknown,
}
}
}
/// Represents a distinct set of different change kinds
#[derive(Clone, Debug, Deref, DerefMut, IntoIterator, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct ChangeKindSet(HashSet<ChangeKind>);
impl ChangeKindSet {
/// Produces an empty set of [`ChangeKind`]
pub fn empty() -> Self {
Self(HashSet::new())
}
/// Produces a set of all [`ChangeKind`]
pub fn all() -> Self {
vec![
ChangeKind::Access,
ChangeKind::AccessCloseExecute,
ChangeKind::AccessCloseRead,
ChangeKind::AccessCloseWrite,
ChangeKind::AccessOpenExecute,
ChangeKind::AccessOpenRead,
ChangeKind::AccessOpenWrite,
ChangeKind::AccessRead,
ChangeKind::AccessTime,
ChangeKind::Create,
ChangeKind::Content,
ChangeKind::Data,
ChangeKind::Metadata,
ChangeKind::Modify,
ChangeKind::Remove,
ChangeKind::Rename,
ChangeKind::RenameBoth,
ChangeKind::RenameFrom,
ChangeKind::RenameTo,
ChangeKind::Size,
ChangeKind::Ownership,
ChangeKind::Permissions,
ChangeKind::WriteTime,
ChangeKind::Unknown,
]
.into_iter()
.collect()
}
/// Produces a changeset containing all of the access kinds
pub fn access_set() -> Self {
Self::access_open_set()
| Self::access_close_set()
| ChangeKind::AccessRead
| ChangeKind::Access
}
/// Produces a changeset containing all of the open access kinds
pub fn access_open_set() -> Self {
ChangeKind::AccessOpenExecute | ChangeKind::AccessOpenRead | ChangeKind::AccessOpenWrite
}
/// Produces a changeset containing all of the close access kinds
pub fn access_close_set() -> Self {
ChangeKind::AccessCloseExecute | ChangeKind::AccessCloseRead | ChangeKind::AccessCloseWrite
}
// Produces a changeset containing all of the modification kinds
pub fn modify_set() -> Self {
Self::modify_data_set() | Self::modify_metadata_set() | ChangeKind::Modify
}
/// Produces a changeset containing all of the data modification kinds
pub fn modify_data_set() -> Self {
ChangeKind::Content | ChangeKind::Data | ChangeKind::Size
}
/// Produces a changeset containing all of the metadata modification kinds
pub fn modify_metadata_set() -> Self {
ChangeKind::AccessTime
| ChangeKind::Metadata
| ChangeKind::Ownership
| ChangeKind::Permissions
| ChangeKind::WriteTime
}
/// Produces a changeset containing all of the rename kinds
pub fn rename_set() -> Self {
ChangeKind::Rename | ChangeKind::RenameBoth | ChangeKind::RenameFrom | ChangeKind::RenameTo
}
/// Consumes set and returns a sorted vec of the kinds of changes
pub fn into_sorted_vec(self) -> Vec<ChangeKind> {
let mut v = self.0.into_iter().collect::<Vec<_>>();
v.sort();
v
}
}
#[cfg(feature = "schemars")]
impl ChangeKindSet {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(ChangeKindSet)
}
}
impl fmt::Display for ChangeKindSet {
/// Outputs a comma-separated series of [`ChangeKind`] as string that are sorted
/// such that this will always be consistent output
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut kinds = self
.0
.iter()
.map(ToString::to_string)
.collect::<Vec<String>>();
kinds.sort_unstable();
write!(f, "{}", kinds.join(","))
}
}
impl PartialEq for ChangeKindSet {
fn eq(&self, other: &Self) -> bool {
self.to_string() == other.to_string()
}
}
impl Eq for ChangeKindSet {}
impl Hash for ChangeKindSet {
/// Hashes based on the output of [`fmt::Display`]
fn hash<H: Hasher>(&self, state: &mut H) {
self.to_string().hash(state);
}
}
impl BitOr<ChangeKindSet> for ChangeKindSet {
type Output = Self;
fn bitor(mut self, rhs: ChangeKindSet) -> Self::Output {
self.extend(rhs.0);
self
}
}
impl BitOr<ChangeKind> for ChangeKindSet {
type Output = Self;
fn bitor(mut self, rhs: ChangeKind) -> Self::Output {
self.0.insert(rhs);
self
}
}
impl BitOr<ChangeKindSet> for ChangeKind {
type Output = ChangeKindSet;
fn bitor(self, rhs: ChangeKindSet) -> Self::Output {
rhs | self
}
}
impl Sub<ChangeKindSet> for ChangeKindSet {
type Output = Self;
fn sub(self, other: Self) -> Self::Output {
ChangeKindSet(&self.0 - &other.0)
}
}
impl Sub<&'_ ChangeKindSet> for &ChangeKindSet {
type Output = ChangeKindSet;
fn sub(self, other: &ChangeKindSet) -> Self::Output {
ChangeKindSet(&self.0 - &other.0)
}
}
impl FromStr for ChangeKindSet {
type Err = strum::ParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut change_set = HashSet::new();
for word in s.split(',') {
change_set.insert(ChangeKind::from_str(word.trim())?);
}
Ok(ChangeKindSet(change_set))
}
}
impl FromIterator<ChangeKind> for ChangeKindSet {
fn from_iter<I: IntoIterator<Item = ChangeKind>>(iter: I) -> Self {
let mut change_set = HashSet::new();
for i in iter {
change_set.insert(i);
}
ChangeKindSet(change_set)
}
}
impl From<ChangeKind> for ChangeKindSet {
fn from(change_kind: ChangeKind) -> Self {
let mut set = Self::empty();
set.insert(change_kind);
set
}
}
impl From<Vec<ChangeKind>> for ChangeKindSet {
fn from(changes: Vec<ChangeKind>) -> Self {
changes.into_iter().collect()
}
}
impl Default for ChangeKindSet {
fn default() -> Self {
Self::empty()
}
}

@ -1,53 +0,0 @@
use std::ops::{Deref, DerefMut};
use derive_more::{Display, From, Into};
use serde::{Deserialize, Serialize};
/// Represents some command with arguments to execute
#[derive(Clone, Debug, Display, From, Into, Hash, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct Cmd(String);
impl Cmd {
/// Creates a new command from the given `cmd`
pub fn new(cmd: impl Into<String>) -> Self {
Self(cmd.into())
}
/// Returns reference to the program portion of the command
pub fn program(&self) -> &str {
match self.0.split_once(' ') {
Some((program, _)) => program.trim(),
None => self.0.trim(),
}
}
/// Returns reference to the arguments portion of the command
pub fn arguments(&self) -> &str {
match self.0.split_once(' ') {
Some((_, arguments)) => arguments.trim(),
None => "",
}
}
}
#[cfg(feature = "schemars")]
impl Cmd {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(Cmd)
}
}
impl Deref for Cmd {
type Target = String;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for Cmd {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}

@ -1,59 +0,0 @@
use std::fs::FileType as StdFileType;
use std::path::PathBuf;
use derive_more::IsVariant;
use serde::{Deserialize, Serialize};
use strum::AsRefStr;
/// Represents information about a single entry within a directory
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
pub struct DirEntry {
/// Represents the full path to the entry
pub path: PathBuf,
/// Represents the type of the entry as a file/dir/symlink
pub file_type: FileType,
/// Depth at which this entry was created relative to the root (0 being immediately within
/// root)
pub depth: usize,
}
#[cfg(feature = "schemars")]
impl DirEntry {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(DirEntry)
}
}
/// Represents the type associated with a dir entry
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, AsRefStr, IsVariant, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
#[strum(serialize_all = "snake_case")]
pub enum FileType {
Dir,
File,
Symlink,
}
impl From<StdFileType> for FileType {
fn from(ft: StdFileType) -> Self {
if ft.is_dir() {
Self::Dir
} else if ft.is_symlink() {
Self::Symlink
} else {
Self::File
}
}
}
#[cfg(feature = "schemars")]
impl FileType {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(FileType)
}
}

@ -1,404 +0,0 @@
use std::io;
use std::path::{Path, PathBuf};
use std::time::SystemTime;
use bitflags::bitflags;
use serde::{Deserialize, Serialize};
use super::{deserialize_u128_option, serialize_u128_option, FileType};
/// Represents metadata about some path on a remote machine
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct Metadata {
/// Canonicalized path to the file or directory, resolving symlinks, only included
/// if flagged during the request
pub canonicalized_path: Option<PathBuf>,
/// Represents the type of the entry as a file/dir/symlink
pub file_type: FileType,
/// Size of the file/directory/symlink in bytes
pub len: u64,
/// Whether or not the file/directory/symlink is marked as unwriteable
pub readonly: bool,
/// Represents the last time (in milliseconds) when the file/directory/symlink was accessed;
/// can be optional as certain systems don't support this
#[serde(serialize_with = "serialize_u128_option")]
#[serde(deserialize_with = "deserialize_u128_option")]
pub accessed: Option<u128>,
/// Represents when (in milliseconds) the file/directory/symlink was created;
/// can be optional as certain systems don't support this
#[serde(serialize_with = "serialize_u128_option")]
#[serde(deserialize_with = "deserialize_u128_option")]
pub created: Option<u128>,
/// Represents the last time (in milliseconds) when the file/directory/symlink was modified;
/// can be optional as certain systems don't support this
#[serde(serialize_with = "serialize_u128_option")]
#[serde(deserialize_with = "deserialize_u128_option")]
pub modified: Option<u128>,
/// Represents metadata that is specific to a unix remote machine
pub unix: Option<UnixMetadata>,
/// Represents metadata that is specific to a windows remote machine
pub windows: Option<WindowsMetadata>,
}
impl Metadata {
pub async fn read(
path: impl AsRef<Path>,
canonicalize: bool,
resolve_file_type: bool,
) -> io::Result<Self> {
let metadata = tokio::fs::symlink_metadata(path.as_ref()).await?;
let canonicalized_path = if canonicalize {
Some(tokio::fs::canonicalize(path.as_ref()).await?)
} else {
None
};
// If asking for resolved file type and current type is symlink, then we want to refresh
// our metadata to get the filetype for the resolved link
let file_type = if resolve_file_type && metadata.file_type().is_symlink() {
tokio::fs::metadata(path).await?.file_type()
} else {
metadata.file_type()
};
Ok(Self {
canonicalized_path,
accessed: metadata
.accessed()
.ok()
.and_then(|t| t.duration_since(SystemTime::UNIX_EPOCH).ok())
.map(|d| d.as_millis()),
created: metadata
.created()
.ok()
.and_then(|t| t.duration_since(SystemTime::UNIX_EPOCH).ok())
.map(|d| d.as_millis()),
modified: metadata
.modified()
.ok()
.and_then(|t| t.duration_since(SystemTime::UNIX_EPOCH).ok())
.map(|d| d.as_millis()),
len: metadata.len(),
readonly: metadata.permissions().readonly(),
file_type: if file_type.is_dir() {
FileType::Dir
} else if file_type.is_file() {
FileType::File
} else {
FileType::Symlink
},
#[cfg(unix)]
unix: Some({
use std::os::unix::prelude::*;
let mode = metadata.mode();
crate::protocol::UnixMetadata::from(mode)
}),
#[cfg(not(unix))]
unix: None,
#[cfg(windows)]
windows: Some({
use std::os::windows::prelude::*;
let attributes = metadata.file_attributes();
crate::protocol::WindowsMetadata::from(attributes)
}),
#[cfg(not(windows))]
windows: None,
})
}
}
#[cfg(feature = "schemars")]
impl Metadata {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(Metadata)
}
}
/// Represents unix-specific metadata about some path on a remote machine
#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct UnixMetadata {
/// Represents whether or not owner can read from the file
pub owner_read: bool,
/// Represents whether or not owner can write to the file
pub owner_write: bool,
/// Represents whether or not owner can execute the file
pub owner_exec: bool,
/// Represents whether or not associated group can read from the file
pub group_read: bool,
/// Represents whether or not associated group can write to the file
pub group_write: bool,
/// Represents whether or not associated group can execute the file
pub group_exec: bool,
/// Represents whether or not other can read from the file
pub other_read: bool,
/// Represents whether or not other can write to the file
pub other_write: bool,
/// Represents whether or not other can execute the file
pub other_exec: bool,
}
#[cfg(feature = "schemars")]
impl UnixMetadata {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(UnixMetadata)
}
}
impl From<u32> for UnixMetadata {
/// Create from a unix mode bitset
fn from(mode: u32) -> Self {
let flags = UnixFilePermissionFlags::from_bits_truncate(mode);
Self {
owner_read: flags.contains(UnixFilePermissionFlags::OWNER_READ),
owner_write: flags.contains(UnixFilePermissionFlags::OWNER_WRITE),
owner_exec: flags.contains(UnixFilePermissionFlags::OWNER_EXEC),
group_read: flags.contains(UnixFilePermissionFlags::GROUP_READ),
group_write: flags.contains(UnixFilePermissionFlags::GROUP_WRITE),
group_exec: flags.contains(UnixFilePermissionFlags::GROUP_EXEC),
other_read: flags.contains(UnixFilePermissionFlags::OTHER_READ),
other_write: flags.contains(UnixFilePermissionFlags::OTHER_WRITE),
other_exec: flags.contains(UnixFilePermissionFlags::OTHER_EXEC),
}
}
}
impl From<UnixMetadata> for u32 {
/// Convert to a unix mode bitset
fn from(metadata: UnixMetadata) -> Self {
let mut flags = UnixFilePermissionFlags::empty();
if metadata.owner_read {
flags.insert(UnixFilePermissionFlags::OWNER_READ);
}
if metadata.owner_write {
flags.insert(UnixFilePermissionFlags::OWNER_WRITE);
}
if metadata.owner_exec {
flags.insert(UnixFilePermissionFlags::OWNER_EXEC);
}
if metadata.group_read {
flags.insert(UnixFilePermissionFlags::GROUP_READ);
}
if metadata.group_write {
flags.insert(UnixFilePermissionFlags::GROUP_WRITE);
}
if metadata.group_exec {
flags.insert(UnixFilePermissionFlags::GROUP_EXEC);
}
if metadata.other_read {
flags.insert(UnixFilePermissionFlags::OTHER_READ);
}
if metadata.other_write {
flags.insert(UnixFilePermissionFlags::OTHER_WRITE);
}
if metadata.other_exec {
flags.insert(UnixFilePermissionFlags::OTHER_EXEC);
}
flags.bits()
}
}
impl UnixMetadata {
pub fn is_readonly(self) -> bool {
!(self.owner_read || self.group_read || self.other_read)
}
}
bitflags! {
struct UnixFilePermissionFlags: u32 {
const OWNER_READ = 0o400;
const OWNER_WRITE = 0o200;
const OWNER_EXEC = 0o100;
const GROUP_READ = 0o40;
const GROUP_WRITE = 0o20;
const GROUP_EXEC = 0o10;
const OTHER_READ = 0o4;
const OTHER_WRITE = 0o2;
const OTHER_EXEC = 0o1;
}
}
/// Represents windows-specific metadata about some path on a remote machine
#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct WindowsMetadata {
/// Represents whether or not a file or directory is an archive
pub archive: bool,
/// Represents whether or not a file or directory is compressed
pub compressed: bool,
/// Represents whether or not the file or directory is encrypted
pub encrypted: bool,
/// Represents whether or not a file or directory is hidden
pub hidden: bool,
/// Represents whether or not a directory or user data stream is configured with integrity
pub integrity_stream: bool,
/// Represents whether or not a file does not have other attributes set
pub normal: bool,
/// Represents whether or not a file or directory is not to be indexed by content indexing
/// service
pub not_content_indexed: bool,
/// Represents whether or not a user data stream is not to be read by the background data
/// integrity scanner
pub no_scrub_data: bool,
/// Represents whether or not the data of a file is not available immediately
pub offline: bool,
/// Represents whether or not a file or directory is not fully present locally
pub recall_on_data_access: bool,
/// Represents whether or not a file or directory has no physical representation on the local
/// system (is virtual)
pub recall_on_open: bool,
/// Represents whether or not a file or directory has an associated reparse point, or a file is
/// a symbolic link
pub reparse_point: bool,
/// Represents whether or not a file is a sparse file
pub sparse_file: bool,
/// Represents whether or not a file or directory is used partially or exclusively by the
/// operating system
pub system: bool,
/// Represents whether or not a file is being used for temporary storage
pub temporary: bool,
}
#[cfg(feature = "schemars")]
impl WindowsMetadata {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(WindowsMetadata)
}
}
impl From<u32> for WindowsMetadata {
/// Create from a windows file attribute bitset
fn from(file_attributes: u32) -> Self {
let flags = WindowsFileAttributeFlags::from_bits_truncate(file_attributes);
Self {
archive: flags.contains(WindowsFileAttributeFlags::ARCHIVE),
compressed: flags.contains(WindowsFileAttributeFlags::COMPRESSED),
encrypted: flags.contains(WindowsFileAttributeFlags::ENCRYPTED),
hidden: flags.contains(WindowsFileAttributeFlags::HIDDEN),
integrity_stream: flags.contains(WindowsFileAttributeFlags::INTEGRITY_SYSTEM),
normal: flags.contains(WindowsFileAttributeFlags::NORMAL),
not_content_indexed: flags.contains(WindowsFileAttributeFlags::NOT_CONTENT_INDEXED),
no_scrub_data: flags.contains(WindowsFileAttributeFlags::NO_SCRUB_DATA),
offline: flags.contains(WindowsFileAttributeFlags::OFFLINE),
recall_on_data_access: flags.contains(WindowsFileAttributeFlags::RECALL_ON_DATA_ACCESS),
recall_on_open: flags.contains(WindowsFileAttributeFlags::RECALL_ON_OPEN),
reparse_point: flags.contains(WindowsFileAttributeFlags::REPARSE_POINT),
sparse_file: flags.contains(WindowsFileAttributeFlags::SPARSE_FILE),
system: flags.contains(WindowsFileAttributeFlags::SYSTEM),
temporary: flags.contains(WindowsFileAttributeFlags::TEMPORARY),
}
}
}
impl From<WindowsMetadata> for u32 {
/// Convert to a windows file attribute bitset
fn from(metadata: WindowsMetadata) -> Self {
let mut flags = WindowsFileAttributeFlags::empty();
if metadata.archive {
flags.insert(WindowsFileAttributeFlags::ARCHIVE);
}
if metadata.compressed {
flags.insert(WindowsFileAttributeFlags::COMPRESSED);
}
if metadata.encrypted {
flags.insert(WindowsFileAttributeFlags::ENCRYPTED);
}
if metadata.hidden {
flags.insert(WindowsFileAttributeFlags::HIDDEN);
}
if metadata.integrity_stream {
flags.insert(WindowsFileAttributeFlags::INTEGRITY_SYSTEM);
}
if metadata.normal {
flags.insert(WindowsFileAttributeFlags::NORMAL);
}
if metadata.not_content_indexed {
flags.insert(WindowsFileAttributeFlags::NOT_CONTENT_INDEXED);
}
if metadata.no_scrub_data {
flags.insert(WindowsFileAttributeFlags::NO_SCRUB_DATA);
}
if metadata.offline {
flags.insert(WindowsFileAttributeFlags::OFFLINE);
}
if metadata.recall_on_data_access {
flags.insert(WindowsFileAttributeFlags::RECALL_ON_DATA_ACCESS);
}
if metadata.recall_on_open {
flags.insert(WindowsFileAttributeFlags::RECALL_ON_OPEN);
}
if metadata.reparse_point {
flags.insert(WindowsFileAttributeFlags::REPARSE_POINT);
}
if metadata.sparse_file {
flags.insert(WindowsFileAttributeFlags::SPARSE_FILE);
}
if metadata.system {
flags.insert(WindowsFileAttributeFlags::SYSTEM);
}
if metadata.temporary {
flags.insert(WindowsFileAttributeFlags::TEMPORARY);
}
flags.bits()
}
}
bitflags! {
struct WindowsFileAttributeFlags: u32 {
const ARCHIVE = 0x20;
const COMPRESSED = 0x800;
const ENCRYPTED = 0x4000;
const HIDDEN = 0x2;
const INTEGRITY_SYSTEM = 0x8000;
const NORMAL = 0x80;
const NOT_CONTENT_INDEXED = 0x2000;
const NO_SCRUB_DATA = 0x20000;
const OFFLINE = 0x1000;
const RECALL_ON_DATA_ACCESS = 0x400000;
const RECALL_ON_OPEN = 0x40000;
const REPARSE_POINT = 0x400;
const SPARSE_FILE = 0x200;
const SYSTEM = 0x4;
const TEMPORARY = 0x100;
const VIRTUAL = 0x10000;
}
}

@ -1,140 +0,0 @@
use std::fmt;
use std::num::ParseIntError;
use std::str::FromStr;
use derive_more::{Display, Error};
use portable_pty::PtySize as PortablePtySize;
use serde::{Deserialize, Serialize};
/// Represents the size associated with a remote PTY
#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct PtySize {
/// Number of lines of text
pub rows: u16,
/// Number of columns of text
pub cols: u16,
/// Width of a cell in pixels. Note that some systems never fill this value and ignore it.
#[serde(default)]
pub pixel_width: u16,
/// Height of a cell in pixels. Note that some systems never fill this value and ignore it.
#[serde(default)]
pub pixel_height: u16,
}
impl PtySize {
/// Creates new size using just rows and columns
pub fn from_rows_and_cols(rows: u16, cols: u16) -> Self {
Self {
rows,
cols,
..Default::default()
}
}
}
#[cfg(feature = "schemars")]
impl PtySize {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(PtySize)
}
}
impl From<PortablePtySize> for PtySize {
fn from(size: PortablePtySize) -> Self {
Self {
rows: size.rows,
cols: size.cols,
pixel_width: size.pixel_width,
pixel_height: size.pixel_height,
}
}
}
impl From<PtySize> for PortablePtySize {
fn from(size: PtySize) -> Self {
Self {
rows: size.rows,
cols: size.cols,
pixel_width: size.pixel_width,
pixel_height: size.pixel_height,
}
}
}
impl fmt::Display for PtySize {
/// Prints out `rows,cols[,pixel_width,pixel_height]` where the
/// pixel width and pixel height are only included if either
/// one of them is not zero
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{},{}", self.rows, self.cols)?;
if self.pixel_width > 0 || self.pixel_height > 0 {
write!(f, ",{},{}", self.pixel_width, self.pixel_height)?;
}
Ok(())
}
}
impl Default for PtySize {
fn default() -> Self {
PtySize {
rows: 24,
cols: 80,
pixel_width: 0,
pixel_height: 0,
}
}
}
#[derive(Clone, Debug, PartialEq, Eq, Display, Error)]
pub enum PtySizeParseError {
MissingRows,
MissingColumns,
InvalidRows(ParseIntError),
InvalidColumns(ParseIntError),
InvalidPixelWidth(ParseIntError),
InvalidPixelHeight(ParseIntError),
}
impl FromStr for PtySize {
type Err = PtySizeParseError;
/// Attempts to parse a str into PtySize using one of the following formats:
///
/// * rows,cols (defaults to 0 for pixel_width & pixel_height)
/// * rows,cols,pixel_width,pixel_height
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut tokens = s.split(',');
Ok(Self {
rows: tokens
.next()
.ok_or(PtySizeParseError::MissingRows)?
.trim()
.parse()
.map_err(PtySizeParseError::InvalidRows)?,
cols: tokens
.next()
.ok_or(PtySizeParseError::MissingColumns)?
.trim()
.parse()
.map_err(PtySizeParseError::InvalidColumns)?,
pixel_width: tokens
.next()
.map(|s| s.trim().parse())
.transpose()
.map_err(PtySizeParseError::InvalidPixelWidth)?
.unwrap_or(0),
pixel_height: tokens
.next()
.map(|s| s.trim().parse())
.transpose()
.map_err(PtySizeParseError::InvalidPixelHeight)?
.unwrap_or(0),
})
}
}

@ -1,425 +0,0 @@
use std::borrow::Cow;
use std::collections::HashSet;
use std::path::PathBuf;
use std::str::FromStr;
use serde::{Deserialize, Serialize};
use super::FileType;
/// Id associated with a search
pub type SearchId = u32;
/// Represents a query to perform against the filesystem
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct SearchQuery {
/// Kind of data to examine using condition
pub target: SearchQueryTarget,
/// Condition to meet to be considered a match
pub condition: SearchQueryCondition,
/// Paths in which to perform the query
pub paths: Vec<PathBuf>,
/// Options to apply to the query
#[serde(default)]
pub options: SearchQueryOptions,
}
#[cfg(feature = "schemars")]
impl SearchQuery {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(SearchQuery)
}
}
impl FromStr for SearchQuery {
type Err = serde_json::error::Error;
/// Parses search query from a JSON string
fn from_str(s: &str) -> Result<Self, Self::Err> {
serde_json::from_str(s)
}
}
/// Kind of data to examine using conditions
#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(rename_all = "snake_case")]
pub enum SearchQueryTarget {
/// Checks path of file, directory, or symlink
Path,
/// Checks contents of files
Contents,
}
#[cfg(feature = "schemars")]
impl SearchQueryTarget {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(SearchQueryTarget)
}
}
/// Condition used to find a match in a search query
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(rename_all = "snake_case", deny_unknown_fields, tag = "type")]
pub enum SearchQueryCondition {
/// Text is found anywhere (all regex patterns are escaped)
Contains { value: String },
/// Begins with some text (all regex patterns are escaped)
EndsWith { value: String },
/// Matches some text exactly (all regex patterns are escaped)
Equals { value: String },
/// Any of the conditions match
Or { value: Vec<SearchQueryCondition> },
/// Matches some regex
Regex { value: String },
/// Begins with some text (all regex patterns are escaped)
StartsWith { value: String },
}
impl SearchQueryCondition {
/// Creates a new instance with `Contains` variant
pub fn contains(value: impl Into<String>) -> Self {
Self::Contains {
value: value.into(),
}
}
/// Creates a new instance with `EndsWith` variant
pub fn ends_with(value: impl Into<String>) -> Self {
Self::EndsWith {
value: value.into(),
}
}
/// Creates a new instance with `Equals` variant
pub fn equals(value: impl Into<String>) -> Self {
Self::Equals {
value: value.into(),
}
}
/// Creates a new instance with `Or` variant
pub fn or<I, C>(value: I) -> Self
where
I: IntoIterator<Item = C>,
C: Into<SearchQueryCondition>,
{
Self::Or {
value: value.into_iter().map(|s| s.into()).collect(),
}
}
/// Creates a new instance with `Regex` variant
pub fn regex(value: impl Into<String>) -> Self {
Self::Regex {
value: value.into(),
}
}
/// Creates a new instance with `StartsWith` variant
pub fn starts_with(value: impl Into<String>) -> Self {
Self::StartsWith {
value: value.into(),
}
}
/// Converts the condition in a regex string
pub fn to_regex_string(&self) -> String {
match self {
Self::Contains { value } => regex::escape(value),
Self::EndsWith { value } => format!(r"{}$", regex::escape(value)),
Self::Equals { value } => format!(r"^{}$", regex::escape(value)),
Self::Regex { value } => value.to_string(),
Self::StartsWith { value } => format!(r"^{}", regex::escape(value)),
Self::Or { value } => {
let mut s = String::new();
for (i, condition) in value.iter().enumerate() {
if i > 0 {
s.push('|');
}
s.push_str(&condition.to_regex_string());
}
s
}
}
}
}
#[cfg(feature = "schemars")]
impl SearchQueryCondition {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(SearchQueryCondition)
}
}
impl FromStr for SearchQueryCondition {
type Err = std::convert::Infallible;
/// Parses search query from a JSON string
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Self::regex(s))
}
}
/// Options associated with a search query
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(default)]
pub struct SearchQueryOptions {
/// Restrict search to only these file types (otherwise all are allowed).
pub allowed_file_types: HashSet<FileType>,
/// Regex to use to filter paths being searched to only those that match the include condition.
pub include: Option<SearchQueryCondition>,
/// Regex to use to filter paths being searched to only those that do not match the exclude.
/// condition
pub exclude: Option<SearchQueryCondition>,
/// If true, will search upward through parent directories rather than the traditional downward
/// search that recurses through all children directories.
///
/// Note that this will use maximum depth to apply to the reverse direction, and will only look
/// through each ancestor directory's immediate entries. In other words, this will not result
/// in recursing through sibling directories.
///
/// An upward search will ALWAYS search the contents of a directory, so this means providing a
/// path to a directory will search its entries EVEN if the max_depth is 0.
pub upward: bool,
/// Search should follow symbolic links.
pub follow_symbolic_links: bool,
/// Maximum results to return before stopping the query.
pub limit: Option<u64>,
/// Maximum depth (directories) to search
///
/// The smallest depth is 0 and always corresponds to the path given to the new function on
/// this type. Its direct descendents have depth 1, and their descendents have depth 2, and so
/// on.
///
/// Note that this will not simply filter the entries of the iterator, but it will actually
/// avoid descending into directories when the depth is exceeded.
pub max_depth: Option<u64>,
/// Amount of results to batch before sending back excluding final submission that will always
/// include the remaining results even if less than pagination request.
pub pagination: Option<u64>,
}
#[cfg(feature = "schemars")]
impl SearchQueryOptions {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(SearchQueryOptions)
}
}
/// Represents a match for a search query
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(rename_all = "snake_case", deny_unknown_fields, tag = "type")]
pub enum SearchQueryMatch {
/// Matches part of a file's path
Path(SearchQueryPathMatch),
/// Matches part of a file's contents
Contents(SearchQueryContentsMatch),
}
impl SearchQueryMatch {
pub fn into_path_match(self) -> Option<SearchQueryPathMatch> {
match self {
Self::Path(x) => Some(x),
_ => None,
}
}
pub fn into_contents_match(self) -> Option<SearchQueryContentsMatch> {
match self {
Self::Contents(x) => Some(x),
_ => None,
}
}
}
#[cfg(feature = "schemars")]
impl SearchQueryMatch {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(SearchQueryMatch)
}
}
/// Represents details for a match on a path
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct SearchQueryPathMatch {
/// Path associated with the match
pub path: PathBuf,
/// Collection of matches tied to `path` where each submatch's byte offset is relative to
/// `path`
pub submatches: Vec<SearchQuerySubmatch>,
}
#[cfg(feature = "schemars")]
impl SearchQueryPathMatch {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(SearchQueryPathMatch)
}
}
/// Represents details for a match on a file's contents
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct SearchQueryContentsMatch {
/// Path to file whose contents match
pub path: PathBuf,
/// Line(s) that matched
pub lines: SearchQueryMatchData,
/// Line number where match starts (base index 1)
pub line_number: u64,
/// Absolute byte offset corresponding to the start of `lines` in the data being searched
pub absolute_offset: u64,
/// Collection of matches tied to `lines` where each submatch's byte offset is relative to
/// `lines` and not the overall content
pub submatches: Vec<SearchQuerySubmatch>,
}
#[cfg(feature = "schemars")]
impl SearchQueryContentsMatch {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(SearchQueryContentsMatch)
}
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct SearchQuerySubmatch {
/// Content matched by query
pub r#match: SearchQueryMatchData,
/// Byte offset representing start of submatch (inclusive)
pub start: u64,
/// Byte offset representing end of submatch (exclusive)
pub end: u64,
}
#[cfg(feature = "schemars")]
impl SearchQuerySubmatch {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(SearchQuerySubmatch)
}
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(
rename_all = "snake_case",
deny_unknown_fields,
tag = "type",
content = "value"
)]
pub enum SearchQueryMatchData {
/// Match represented as UTF-8 text
Text(String),
/// Match represented as bytes
Bytes(Vec<u8>),
}
impl SearchQueryMatchData {
/// Creates a new instance with `Text` variant
pub fn text(value: impl Into<String>) -> Self {
Self::Text(value.into())
}
/// Creates a new instance with `Bytes` variant
pub fn bytes(value: impl Into<Vec<u8>>) -> Self {
Self::Bytes(value.into())
}
/// Returns the UTF-8 str reference to the data, if is valid UTF-8
pub fn to_str(&self) -> Option<&str> {
match self {
Self::Text(x) => Some(x),
Self::Bytes(x) => std::str::from_utf8(x).ok(),
}
}
/// Converts data to a UTF-8 string, replacing any invalid UTF-8 sequences with
/// [`U+FFFD REPLACEMENT CHARACTER`](https://doc.rust-lang.org/nightly/core/char/const.REPLACEMENT_CHARACTER.html)
pub fn to_string_lossy(&self) -> Cow<'_, str> {
match self {
Self::Text(x) => Cow::Borrowed(x),
Self::Bytes(x) => String::from_utf8_lossy(x),
}
}
}
#[cfg(feature = "schemars")]
impl SearchQueryMatchData {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(SearchQueryMatchData)
}
}
#[cfg(test)]
mod tests {
use super::*;
mod search_query_condition {
use test_log::test;
use super::*;
#[test]
fn to_regex_string_should_convert_to_appropriate_regex_and_escape_as_needed() {
assert_eq!(
SearchQueryCondition::contains("t^es$t").to_regex_string(),
r"t\^es\$t"
);
assert_eq!(
SearchQueryCondition::ends_with("t^es$t").to_regex_string(),
r"t\^es\$t$"
);
assert_eq!(
SearchQueryCondition::equals("t^es$t").to_regex_string(),
r"^t\^es\$t$"
);
assert_eq!(
SearchQueryCondition::or([
SearchQueryCondition::contains("t^es$t"),
SearchQueryCondition::equals("t^es$t"),
SearchQueryCondition::regex("^test$"),
])
.to_regex_string(),
r"t\^es\$t|^t\^es\$t$|^test$"
);
assert_eq!(
SearchQueryCondition::regex("test").to_regex_string(),
"test"
);
assert_eq!(
SearchQueryCondition::starts_with("t^es$t").to_regex_string(),
r"^t\^es\$t"
);
}
}
}

@ -1,59 +0,0 @@
use std::env;
use std::path::PathBuf;
use serde::{Deserialize, Serialize};
/// Represents information about a system
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct SystemInfo {
/// Family of the operating system as described in
/// https://doc.rust-lang.org/std/env/consts/constant.FAMILY.html
pub family: String,
/// Name of the specific operating system as described in
/// https://doc.rust-lang.org/std/env/consts/constant.OS.html
pub os: String,
/// Architecture of the CPI as described in
/// https://doc.rust-lang.org/std/env/consts/constant.ARCH.html
pub arch: String,
/// Current working directory of the running server process
pub current_dir: PathBuf,
/// Primary separator for path components for the current platform
/// as defined in https://doc.rust-lang.org/std/path/constant.MAIN_SEPARATOR.html
pub main_separator: char,
/// Name of the user running the server process
pub username: String,
/// Default shell tied to user running the server process
pub shell: String,
}
#[cfg(feature = "schemars")]
impl SystemInfo {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(SystemInfo)
}
}
impl Default for SystemInfo {
fn default() -> Self {
Self {
family: env::consts::FAMILY.to_string(),
os: env::consts::OS.to_string(),
arch: env::consts::ARCH.to_string(),
current_dir: env::current_dir().unwrap_or_default(),
main_separator: std::path::MAIN_SEPARATOR,
username: whoami::username(),
shell: if cfg!(windows) {
env::var("ComSpec").unwrap_or_else(|_| String::from("cmd.exe"))
} else {
env::var("SHELL").unwrap_or_else(|_| String::from("/bin/sh"))
},
}
}
}

@ -1,26 +0,0 @@
use serde::{Deserialize, Serialize};
pub(crate) fn deserialize_u128_option<'de, D>(deserializer: D) -> Result<Option<u128>, D::Error>
where
D: serde::Deserializer<'de>,
{
match Option::<String>::deserialize(deserializer)? {
Some(s) => match s.parse::<u128>() {
Ok(value) => Ok(Some(value)),
Err(error) => Err(serde::de::Error::custom(format!(
"Cannot convert to u128 with error: {error:?}"
))),
},
None => Ok(None),
}
}
pub(crate) fn serialize_u128_option<S: serde::Serializer>(
val: &Option<u128>,
s: S,
) -> Result<S::Ok, S::Error> {
match val {
Some(v) => format!("{}", *v).serialize(s),
None => s.serialize_unit(),
}
}

@ -0,0 +1,325 @@
use std::io;
use std::path::PathBuf;
use async_trait::async_trait;
use distant_core::{
DistantApi, DistantApiServerHandler, DistantChannelExt, DistantClient, DistantCtx,
};
use distant_net::auth::{DummyAuthHandler, Verifier};
use distant_net::client::Client;
use distant_net::common::{InmemoryTransport, OneshotListener, Version};
use distant_net::server::{Server, ServerRef};
use distant_protocol::PROTOCOL_VERSION;
/// Stands up an inmemory client and server using the given api.
async fn setup(api: impl DistantApi + Send + Sync + 'static) -> (DistantClient, ServerRef) {
let (t1, t2) = InmemoryTransport::pair(100);
let server = Server::new()
.handler(DistantApiServerHandler::new(api))
.verifier(Verifier::none())
.version(Version::new(
PROTOCOL_VERSION.major,
PROTOCOL_VERSION.minor,
PROTOCOL_VERSION.patch,
))
.start(OneshotListener::from_value(t2))
.expect("Failed to start server");
let client: DistantClient = Client::build()
.auth_handler(DummyAuthHandler)
.connector(t1)
.version(Version::new(
PROTOCOL_VERSION.major,
PROTOCOL_VERSION.minor,
PROTOCOL_VERSION.patch,
))
.connect()
.await
.expect("Failed to connect to server");
(client, server)
}
mod single {
use test_log::test;
use super::*;
#[test(tokio::test)]
async fn should_support_single_request_returning_error() {
struct TestDistantApi;
#[async_trait]
impl DistantApi for TestDistantApi {
async fn read_file(&self, _ctx: DistantCtx, _path: PathBuf) -> io::Result<Vec<u8>> {
Err(io::Error::new(io::ErrorKind::NotFound, "test error"))
}
}
let (mut client, _server) = setup(TestDistantApi).await;
let error = client.read_file(PathBuf::from("file")).await.unwrap_err();
assert_eq!(error.kind(), io::ErrorKind::NotFound);
assert_eq!(error.to_string(), "test error");
}
#[test(tokio::test)]
async fn should_support_single_request_returning_success() {
struct TestDistantApi;
#[async_trait]
impl DistantApi for TestDistantApi {
async fn read_file(&self, _ctx: DistantCtx, _path: PathBuf) -> io::Result<Vec<u8>> {
Ok(b"hello world".to_vec())
}
}
let (mut client, _server) = setup(TestDistantApi).await;
let contents = client.read_file(PathBuf::from("file")).await.unwrap();
assert_eq!(contents, b"hello world");
}
}
mod batch_parallel {
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use distant_net::common::Request;
use distant_protocol::{Msg, Request as RequestPayload};
use test_log::test;
use super::*;
#[test(tokio::test)]
async fn should_support_multiple_requests_running_in_parallel() {
struct TestDistantApi;
#[async_trait]
impl DistantApi for TestDistantApi {
async fn read_file(&self, _ctx: DistantCtx, path: PathBuf) -> io::Result<Vec<u8>> {
if path.to_str().unwrap() == "slow" {
tokio::time::sleep(Duration::from_millis(500)).await;
}
let time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
Ok((time.as_millis() as u64).to_be_bytes().to_vec())
}
}
let (mut client, _server) = setup(TestDistantApi).await;
let request = Request::new(Msg::batch([
RequestPayload::FileRead {
path: PathBuf::from("file1"),
},
RequestPayload::FileRead {
path: PathBuf::from("slow"),
},
RequestPayload::FileRead {
path: PathBuf::from("file2"),
},
]));
let response = client.send(request).await.unwrap();
let payloads = response.payload.into_batch().unwrap();
// Collect our times from the reading
let mut times = Vec::new();
for payload in payloads {
match payload {
distant_protocol::Response::Blob { data } => {
let mut buf = [0u8; 8];
buf.copy_from_slice(&data[..8]);
times.push(u64::from_be_bytes(buf));
}
x => panic!("Unexpected payload: {x:?}"),
}
}
// Verify that these ran in parallel as the first and third requests should not be
// over 500 milliseconds apart due to the sleep in the middle!
let diff = times[0].abs_diff(times[2]);
assert!(diff <= 500, "Sequential ordering detected");
}
#[test(tokio::test)]
async fn should_run_all_requests_even_if_some_fail() {
struct TestDistantApi;
#[async_trait]
impl DistantApi for TestDistantApi {
async fn read_file(&self, _ctx: DistantCtx, path: PathBuf) -> io::Result<Vec<u8>> {
if path.to_str().unwrap() == "fail" {
return Err(io::Error::new(io::ErrorKind::Other, "test error"));
}
Ok(Vec::new())
}
}
let (mut client, _server) = setup(TestDistantApi).await;
let request = Request::new(Msg::batch([
RequestPayload::FileRead {
path: PathBuf::from("file1"),
},
RequestPayload::FileRead {
path: PathBuf::from("fail"),
},
RequestPayload::FileRead {
path: PathBuf::from("file2"),
},
]));
let response = client.send(request).await.unwrap();
let payloads = response.payload.into_batch().unwrap();
// Should be a success, error, and success
assert!(
matches!(payloads[0], distant_protocol::Response::Blob { .. }),
"Unexpected payloads[0]: {:?}",
payloads[0]
);
assert!(
matches!(
&payloads[1],
distant_protocol::Response::Error(distant_protocol::Error { kind, description })
if matches!(kind, distant_protocol::ErrorKind::Other) && description == "test error"
),
"Unexpected payloads[1]: {:?}",
payloads[1]
);
assert!(
matches!(payloads[2], distant_protocol::Response::Blob { .. }),
"Unexpected payloads[2]: {:?}",
payloads[2]
);
}
}
mod batch_sequence {
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use distant_net::common::Request;
use distant_protocol::{Msg, Request as RequestPayload};
use test_log::test;
use super::*;
#[test(tokio::test)]
async fn should_support_multiple_requests_running_in_sequence() {
struct TestDistantApi;
#[async_trait]
impl DistantApi for TestDistantApi {
async fn read_file(&self, _ctx: DistantCtx, path: PathBuf) -> io::Result<Vec<u8>> {
if path.to_str().unwrap() == "slow" {
tokio::time::sleep(Duration::from_millis(500)).await;
}
let time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
Ok((time.as_millis() as u64).to_be_bytes().to_vec())
}
}
let (mut client, _server) = setup(TestDistantApi).await;
let mut request = Request::new(Msg::batch([
RequestPayload::FileRead {
path: PathBuf::from("file1"),
},
RequestPayload::FileRead {
path: PathBuf::from("slow"),
},
RequestPayload::FileRead {
path: PathBuf::from("file2"),
},
]));
// Mark as running in sequence
request.header.insert("sequence", true);
let response = client.send(request).await.unwrap();
let payloads = response.payload.into_batch().unwrap();
// Collect our times from the reading
let mut times = Vec::new();
for payload in payloads {
match payload {
distant_protocol::Response::Blob { data } => {
let mut buf = [0u8; 8];
buf.copy_from_slice(&data[..8]);
times.push(u64::from_be_bytes(buf));
}
x => panic!("Unexpected payload: {x:?}"),
}
}
// Verify that these ran in sequence as the first and third requests should be
// over 500 milliseconds apart due to the sleep in the middle!
let diff = times[0].abs_diff(times[2]);
assert!(diff > 500, "Parallel ordering detected");
}
#[test(tokio::test)]
async fn should_interrupt_any_requests_following_a_failure() {
struct TestDistantApi;
#[async_trait]
impl DistantApi for TestDistantApi {
async fn read_file(&self, _ctx: DistantCtx, path: PathBuf) -> io::Result<Vec<u8>> {
if path.to_str().unwrap() == "fail" {
return Err(io::Error::new(io::ErrorKind::Other, "test error"));
}
Ok(Vec::new())
}
}
let (mut client, _server) = setup(TestDistantApi).await;
let mut request = Request::new(Msg::batch([
RequestPayload::FileRead {
path: PathBuf::from("file1"),
},
RequestPayload::FileRead {
path: PathBuf::from("fail"),
},
RequestPayload::FileRead {
path: PathBuf::from("file2"),
},
]));
// Mark as running in sequence
request.header.insert("sequence", true);
let response = client.send(request).await.unwrap();
let payloads = response.payload.into_batch().unwrap();
// Should be a success, error, and interrupt
assert!(
matches!(payloads[0], distant_protocol::Response::Blob { .. }),
"Unexpected payloads[0]: {:?}",
payloads[0]
);
assert!(
matches!(
&payloads[1],
distant_protocol::Response::Error(distant_protocol::Error { kind, description })
if matches!(kind, distant_protocol::ErrorKind::Other) && description == "test error"
),
"Unexpected payloads[1]: {:?}",
payloads[1]
);
assert!(
matches!(
&payloads[2],
distant_protocol::Response::Error(distant_protocol::Error { kind, .. })
if matches!(kind, distant_protocol::ErrorKind::Interrupted)
),
"Unexpected payloads[2]: {:?}",
payloads[2]
);
}
}

@ -0,0 +1,46 @@
[package]
name = "distant-local"
description = "Library implementing distant API for local interactions"
categories = ["network-programming"]
version = "0.20.0"
authors = ["Chip Senkbeil <chip@senkbeil.org>"]
edition = "2021"
homepage = "https://github.com/chipsenkbeil/distant"
repository = "https://github.com/chipsenkbeil/distant"
readme = "README.md"
license = "MIT OR Apache-2.0"
[features]
default = ["macos-fsevent"]
# If specified, will use MacOS FSEvent for file watching
macos-fsevent = ["notify/macos_fsevent"]
# If specified, will use MacOS kqueue for file watching
macos-kqueue = ["notify/macos_kqueue"]
[dependencies]
async-trait = "0.1.68"
distant-core = { version = "=0.20.0", path = "../distant-core" }
grep = "0.2.12"
ignore = "0.4.20"
log = "0.4.18"
notify = { version = "6.0.0", default-features = false, features = ["macos_fsevent"] }
notify-debouncer-full = { version = "0.1.0", default-features = false }
num_cpus = "1.15.0"
portable-pty = "0.8.1"
rand = { version = "0.8.5", features = ["getrandom"] }
shell-words = "1.1.0"
tokio = { version = "1.28.2", features = ["full"] }
walkdir = "2.3.3"
whoami = "1.4.0"
winsplit = "0.1.0"
[dev-dependencies]
assert_fs = "1.0.13"
env_logger = "0.10.0"
indoc = "2.0.1"
once_cell = "1.17.2"
predicates = "3.0.3"
rstest = "0.17.0"
test-log = "0.2.11"

@ -0,0 +1,45 @@
# distant local
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.70.0][distant_rustc_img]][distant_rustc_lnk]
[distant_crates_img]: https://img.shields.io/crates/v/distant-local.svg
[distant_crates_lnk]: https://crates.io/crates/distant-local
[distant_doc_img]: https://docs.rs/distant-local/badge.svg
[distant_doc_lnk]: https://docs.rs/distant-local
[distant_rustc_img]: https://img.shields.io/badge/distant_local-rustc_1.70+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/06/01/Rust-1.70.0.html
## Details
The `distant-local` library acts as the primary implementation of a distant
server that powers the CLI. The logic acts on the local machine of the server
and is designed to be used as the foundation for distant operation handling.
## Installation
You can import the dependency by adding the following to your `Cargo.toml`:
```toml
[dependencies]
distant-local = "0.20"
```
## Examples
```rust,no_run
use distant_local::{Config, new_handler};
// Create a server API handler to be used with the server
let handler = new_handler(Config::default()).unwrap();
```
## License
This project is licensed under either of
Apache License, Version 2.0, (LICENSE-APACHE or
[apache-license][apache-license]) MIT license (LICENSE-MIT or
[mit-license][mit-license]) at your option.
[apache-license]: http://www.apache.org/licenses/LICENSE-2.0
[mit-license]: http://opensource.org/licenses/MIT

File diff suppressed because it is too large Load Diff

@ -1,11 +1,10 @@
use std::future::Future;
use std::pin::Pin;
use distant_core::protocol::{ProcessId, PtySize};
use tokio::io;
use tokio::sync::mpsc;
use crate::protocol::{ProcessId, PtySize};
mod pty;
pub use pty::*;

@ -3,6 +3,7 @@ use std::io::{self, Read, Write};
use std::path::PathBuf;
use std::sync::{Arc, Mutex, Weak};
use distant_core::protocol::Environment;
use log::*;
use portable_pty::{CommandBuilder, MasterPty, PtySize as PortablePtySize};
use tokio::sync::mpsc;
@ -13,7 +14,6 @@ use super::{
ProcessPty, PtySize, WaitRx,
};
use crate::constants::{MAX_PIPE_CHUNK_SIZE, READ_PAUSE_DURATION};
use crate::protocol::Environment;
/// Represents a process that is associated with a pty
pub struct PtyProcess {

@ -2,6 +2,7 @@ use std::ffi::OsStr;
use std::path::PathBuf;
use std::process::Stdio;
use distant_core::protocol::Environment;
use log::*;
use tokio::io;
use tokio::process::Command;
@ -12,7 +13,6 @@ use super::{
wait, ExitStatus, FutureReturn, InputChannel, NoProcessPty, OutputChannel, Process, ProcessId,
ProcessKiller, WaitRx,
};
use crate::protocol::Environment;
mod tasks;

@ -1,5 +1,7 @@
use std::io;
use crate::config::Config;
mod process;
pub use process::*;
@ -22,11 +24,13 @@ pub struct GlobalState {
}
impl GlobalState {
pub fn initialize() -> io::Result<Self> {
pub fn initialize(config: Config) -> io::Result<Self> {
Ok(Self {
process: ProcessState::new(),
search: SearchState::new(),
watcher: WatcherState::initialize()?,
watcher: WatcherBuilder::new()
.with_config(config.watch)
.initialize()?,
})
}
}

@ -3,12 +3,11 @@ use std::io;
use std::ops::Deref;
use std::path::PathBuf;
use distant_net::server::Reply;
use distant_core::net::server::Reply;
use distant_core::protocol::{Environment, ProcessId, PtySize, Response};
use tokio::sync::{mpsc, oneshot};
use tokio::task::JoinHandle;
use crate::protocol::{Environment, ProcessId, PtySize, Response};
mod instance;
pub use instance::*;

@ -2,14 +2,14 @@ use std::future::Future;
use std::io;
use std::path::PathBuf;
use distant_net::server::Reply;
use distant_core::net::server::Reply;
use distant_core::protocol::{Environment, ProcessId, PtySize, Response};
use log::*;
use tokio::task::JoinHandle;
use crate::api::local::process::{
use crate::api::process::{
InputChannel, OutputChannel, Process, ProcessKiller, ProcessPty, PtyProcess, SimpleProcess,
};
use crate::protocol::{Environment, ProcessId, PtySize, Response};
/// Holds information related to a spawned process on the server
pub struct ProcessInstance {
@ -85,6 +85,7 @@ impl ProcessInstance {
let args = cmd_and_args.split_off(1);
let cmd = cmd_and_args.into_iter().next().unwrap();
debug!("Spawning process: {cmd} {args:?}");
let mut child: Box<dyn Process> = match pty {
Some(size) => Box::new(PtyProcess::spawn(
cmd.clone(),
@ -173,7 +174,7 @@ async fn stdout_task(
loop {
match stdout.recv().await {
Ok(Some(data)) => {
reply.send(Response::ProcStdout { id, data }).await?;
reply.send(Response::ProcStdout { id, data })?;
}
Ok(None) => return Ok(()),
Err(x) => return Err(x),
@ -189,7 +190,7 @@ async fn stderr_task(
loop {
match stderr.recv().await {
Ok(Some(data)) => {
reply.send(Response::ProcStderr { id, data }).await?;
reply.send(Response::ProcStderr { id, data })?;
}
Ok(None) => return Ok(()),
Err(x) => return Err(x),
@ -205,15 +206,11 @@ async fn wait_task(
let status = child.wait().await;
match status {
Ok(status) => {
reply
.send(Response::ProcDone {
id,
success: status.success,
code: status.code,
})
.await
}
Err(x) => reply.send(Response::from(x)).await,
Ok(status) => reply.send(Response::ProcDone {
id,
success: status.success,
code: status.code,
}),
Err(x) => reply.send(Response::from(x)),
}
}

@ -3,7 +3,12 @@ use std::ops::Deref;
use std::path::Path;
use std::{cmp, io};
use distant_net::server::Reply;
use distant_core::net::server::Reply;
use distant_core::protocol::{
Response, SearchId, SearchQuery, SearchQueryContentsMatch, SearchQueryMatch,
SearchQueryMatchData, SearchQueryOptions, SearchQueryPathMatch, SearchQuerySubmatch,
SearchQueryTarget,
};
use grep::matcher::Matcher;
use grep::regex::{RegexMatcher, RegexMatcherBuilder};
use grep::searcher::{BinaryDetection, Searcher, SearcherBuilder, Sink, SinkMatch};
@ -13,12 +18,6 @@ use log::*;
use tokio::sync::{broadcast, mpsc, oneshot};
use tokio::task::JoinHandle;
use crate::protocol::{
Response, SearchId, SearchQuery, SearchQueryContentsMatch, SearchQueryMatch,
SearchQueryMatchData, SearchQueryOptions, SearchQueryPathMatch, SearchQuerySubmatch,
SearchQueryTarget,
};
const MAXIMUM_SEARCH_THREADS: usize = 12;
/// Holds information related to active searches on the server
@ -138,7 +137,11 @@ async fn search_task(tx: mpsc::Sender<InnerSearchMsg>, mut rx: mpsc::Receiver<In
Ok(executor) => executor,
Err(x) => {
let _ = cb.send(Err(x));
return;
// NOTE: We do not want to exit our task! This processes all of our search
// requests, so if we exit, things have gone terrible. This is just a
// regular error, so we merely continue to wait for the next request.
continue;
}
};
@ -225,13 +228,10 @@ impl SearchQueryReporter {
if let Some(len) = options.pagination {
if matches.len() as u64 >= len {
trace!("[Query {id}] Reached {len} paginated matches");
if let Err(x) = reply
.send(Response::SearchResults {
id,
matches: std::mem::take(&mut matches),
})
.await
{
if let Err(x) = reply.send(Response::SearchResults {
id,
matches: std::mem::take(&mut matches),
}) {
error!("[Query {id}] Failed to send paginated matches: {x}");
}
}
@ -241,14 +241,14 @@ impl SearchQueryReporter {
// Send any remaining matches
if !matches.is_empty() {
trace!("[Query {id}] Sending {} remaining matches", matches.len());
if let Err(x) = reply.send(Response::SearchResults { id, matches }).await {
if let Err(x) = reply.send(Response::SearchResults { id, matches }) {
error!("[Query {id}] Failed to send final matches: {x}");
}
}
// Report that we are done
trace!("[Query {id}] Reporting as done");
if let Err(x) = reply.send(Response::SearchDone { id }).await {
if let Err(x) = reply.send(Response::SearchDone { id }) {
error!("[Query {id}] Failed to send done status: {x}");
}
}
@ -345,6 +345,13 @@ impl SearchQueryExecutor {
.build()
.map_err(|x| io::Error::new(io::ErrorKind::Other, x))?,
)
.standard_filters(false)
.hidden(query.options.ignore_hidden)
.ignore(query.options.use_ignore_files)
.parents(query.options.use_parent_ignore_files)
.git_ignore(query.options.use_git_ignore_files)
.git_global(query.options.use_global_git_ignore_files)
.git_exclude(query.options.use_git_exclude_files)
.skip_stdout(true);
if query.options.upward {
@ -807,16 +814,16 @@ mod tests {
use std::path::PathBuf;
use assert_fs::prelude::*;
use distant_core::protocol::{FileType, SearchQueryCondition, SearchQueryMatchData};
use test_log::test;
use super::*;
use crate::protocol::{FileType, SearchQueryCondition, SearchQueryMatchData};
fn make_path(path: &str) -> PathBuf {
use std::path::MAIN_SEPARATOR;
use std::path::MAIN_SEPARATOR_STR;
// Ensure that our path is compliant with the current platform
let path = path.replace('/', &MAIN_SEPARATOR.to_string());
let path = path.replace('/', MAIN_SEPARATOR_STR);
PathBuf::from(path)
}
@ -843,7 +850,7 @@ mod tests {
let root = setup_dir(Vec::new());
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
let query = SearchQuery {
paths: vec![root.path().to_path_buf()],
@ -870,7 +877,7 @@ mod tests {
]);
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
let query = SearchQuery {
paths: vec![root.path().to_path_buf()],
@ -947,7 +954,7 @@ mod tests {
]);
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
let query = SearchQuery {
paths: vec![root.path().to_path_buf()],
@ -1022,7 +1029,7 @@ mod tests {
]);
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
let query = SearchQuery {
paths: vec![root.path().to_path_buf()],
@ -1090,7 +1097,7 @@ mod tests {
let root = setup_dir(vec![("path/to/file.txt", "aa ab ac\nba bb bc\nca cb cc")]);
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
let query = SearchQuery {
paths: vec![root.path().to_path_buf()],
@ -1184,7 +1191,7 @@ mod tests {
]);
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
let query = SearchQuery {
paths: vec![root.path().to_path_buf()],
@ -1277,7 +1284,7 @@ mod tests {
]);
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
let query = SearchQuery {
paths: vec![root.path().to_path_buf()],
@ -1311,7 +1318,7 @@ mod tests {
]);
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
let query = SearchQuery {
paths: vec![root.path().to_path_buf()],
@ -1354,7 +1361,7 @@ mod tests {
expected_paths: Vec<PathBuf>,
) {
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
let query = SearchQuery {
paths: vec![root.path().to_path_buf()],
target: SearchQueryTarget::Path,
@ -1442,7 +1449,7 @@ mod tests {
]);
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
let query = SearchQuery {
paths: vec![root.path().to_path_buf()],
@ -1494,7 +1501,7 @@ mod tests {
]);
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
let query = SearchQuery {
paths: vec![root.path().to_path_buf()],
@ -1560,7 +1567,7 @@ mod tests {
.unwrap();
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
// NOTE: We provide regex that matches an invalid UTF-8 character by disabling the u flag
// and checking for 0x9F (159)
@ -1612,7 +1619,7 @@ mod tests {
.unwrap();
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
// NOTE: We provide regex that matches an invalid UTF-8 character by disabling the u flag
// and checking for 0x9F (159)
@ -1648,7 +1655,7 @@ mod tests {
expected_paths: Vec<PathBuf>,
) {
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
let query = SearchQuery {
paths: vec![root.path().to_path_buf()],
@ -1732,7 +1739,7 @@ mod tests {
.unwrap();
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
let query = SearchQuery {
paths: vec![root.path().to_path_buf()],
@ -1787,7 +1794,7 @@ mod tests {
.unwrap();
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
// NOTE: Following symlobic links on its own does nothing, but when combined with a file
// type filter, it will evaluate the underlying type of symbolic links and filter
@ -1835,7 +1842,7 @@ mod tests {
]);
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
let query = SearchQuery {
paths: vec![
@ -1919,7 +1926,7 @@ mod tests {
expected_paths: Vec<PathBuf>,
) {
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
let query = SearchQuery {
paths: vec![path],
target: SearchQueryTarget::Path,

@ -2,56 +2,57 @@ use std::collections::HashMap;
use std::io;
use std::ops::Deref;
use std::path::{Path, PathBuf};
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use distant_net::common::ConnectionId;
use distant_core::net::common::ConnectionId;
use distant_core::protocol::{Change, ChangeDetails, ChangeDetailsAttribute, ChangeKind};
use log::*;
use notify::event::{AccessKind, AccessMode, MetadataKind, ModifyKind, RenameMode};
use notify::{
Config as WatcherConfig, Error as WatcherError, ErrorKind as WatcherErrorKind,
Event as WatcherEvent, PollWatcher, RecursiveMode, Watcher,
Event as WatcherEvent, EventKind, PollWatcher, RecommendedWatcher, RecursiveMode, Watcher,
};
use notify_debouncer_full::{new_debouncer_opt, DebounceEventResult, Debouncer, FileIdMap};
use tokio::sync::mpsc::error::TrySendError;
use tokio::sync::mpsc::{self};
use tokio::sync::oneshot;
use tokio::sync::{mpsc, oneshot};
use tokio::task::JoinHandle;
use crate::config::WatchConfig;
use crate::constants::SERVER_WATCHER_CAPACITY;
use crate::protocol::ChangeKind;
mod path;
pub use path::*;
/// Holds information related to watched paths on the server
pub struct WatcherState {
channel: WatcherChannel,
task: JoinHandle<()>,
/// Builder for a watcher.
#[derive(Default)]
pub struct WatcherBuilder {
config: WatchConfig,
}
impl Drop for WatcherState {
/// Aborts the task that handles watcher path operations and management
fn drop(&mut self) {
self.abort();
impl WatcherBuilder {
/// Creates a new builder configured to use the native watcher using default configuration.
pub fn new() -> Self {
Self::default()
}
/// Swaps the configuration with the provided one.
pub fn with_config(self, config: WatchConfig) -> Self {
Self { config }
}
}
impl WatcherState {
/// Will create a watcher and initialize watched paths to be empty
pub fn initialize() -> io::Result<Self> {
pub fn initialize(self) -> io::Result<WatcherState> {
// NOTE: Cannot be something small like 1 as this seems to cause a deadlock sometimes
// with a large volume of watch requests
let (tx, rx) = mpsc::channel(SERVER_WATCHER_CAPACITY);
macro_rules! spawn_watcher {
($watcher:ident) => {{
Self {
channel: WatcherChannel { tx },
task: tokio::spawn(watcher_task($watcher, rx)),
}
}};
}
let watcher_config = WatcherConfig::default()
.with_compare_contents(self.config.compare_contents)
.with_poll_interval(self.config.poll_interval.unwrap_or(Duration::from_secs(30)));
macro_rules! event_handler {
($tx:ident) => {
move |res| match $tx.try_send(match res {
macro_rules! process_event {
($tx:ident, $evt:expr) => {
match $tx.try_send(match $evt {
Ok(x) => InnerWatcherMsg::Event { ev: x },
Err(x) => InnerWatcherMsg::Error { err: x },
}) {
@ -69,30 +70,83 @@ impl WatcherState {
};
}
macro_rules! new_debouncer {
($watcher:ident, $tx:ident) => {{
new_debouncer_opt::<_, $watcher, FileIdMap>(
self.config.debounce_timeout,
self.config.debounce_tick_rate,
move |result: DebounceEventResult| match result {
Ok(events) => {
for x in events {
process_event!($tx, Ok(x));
}
}
Err(errors) => {
for x in errors {
process_event!($tx, Err(x));
}
}
},
FileIdMap::new(),
watcher_config,
)
}};
}
macro_rules! spawn_task {
($debouncer:expr) => {{
WatcherState {
channel: WatcherChannel { tx },
task: tokio::spawn(watcher_task($debouncer, rx)),
}
}};
}
let tx = tx.clone();
let result = {
let tx = tx.clone();
notify::recommended_watcher(event_handler!(tx))
};
match result {
Ok(watcher) => Ok(spawn_watcher!(watcher)),
Err(x) => match x.kind {
// notify-rs has a bug on Mac M1 with Docker and Linux, so we detect that error
// and fall back to the poll watcher if this occurs
//
// https://github.com/notify-rs/notify/issues/423
WatcherErrorKind::Io(x) if x.raw_os_error() == Some(38) => {
warn!("Recommended watcher is unsupported! Falling back to polling watcher!");
let watcher = PollWatcher::new(event_handler!(tx), WatcherConfig::default())
.map_err(|x| io::Error::new(io::ErrorKind::Other, x))?;
Ok(spawn_watcher!(watcher))
if self.config.native {
let result = {
let tx = tx.clone();
new_debouncer!(RecommendedWatcher, tx)
};
match result {
Ok(debouncer) => Ok(spawn_task!(debouncer)),
Err(x) => {
match x.kind {
// notify-rs has a bug on Mac M1 with Docker and Linux, so we detect that error
// and fall back to the poll watcher if this occurs
//
// https://github.com/notify-rs/notify/issues/423
WatcherErrorKind::Io(x) if x.raw_os_error() == Some(38) => {
warn!("Recommended watcher is unsupported! Falling back to polling watcher!");
Ok(spawn_task!(new_debouncer!(PollWatcher, tx)
.map_err(|x| io::Error::new(io::ErrorKind::Other, x))?))
}
_ => Err(io::Error::new(io::ErrorKind::Other, x)),
}
}
_ => Err(io::Error::new(io::ErrorKind::Other, x)),
},
}
} else {
Ok(spawn_task!(new_debouncer!(PollWatcher, tx)
.map_err(|x| io::Error::new(io::ErrorKind::Other, x))?))
}
}
}
/// Holds information related to watched paths on the server
pub struct WatcherState {
channel: WatcherChannel,
task: JoinHandle<()>,
}
impl Drop for WatcherState {
/// Aborts the task that handles watcher path operations and management
fn drop(&mut self) {
self.abort();
}
}
impl WatcherState {
/// Aborts the watcher task
pub fn abort(&self) {
self.task.abort();
@ -169,7 +223,12 @@ enum InnerWatcherMsg {
},
}
async fn watcher_task(mut watcher: impl Watcher, mut rx: mpsc::Receiver<InnerWatcherMsg>) {
async fn watcher_task<W>(
mut debouncer: Debouncer<W, FileIdMap>,
mut rx: mpsc::Receiver<InnerWatcherMsg>,
) where
W: Watcher,
{
// TODO: Optimize this in some way to be more performant than
// checking every path whenever an event comes in
let mut registered_paths: Vec<RegisteredPath> = Vec::new();
@ -193,7 +252,8 @@ async fn watcher_task(mut watcher: impl Watcher, mut rx: mpsc::Receiver<InnerWat
// Send an okay because we always succeed in this case
let _ = cb.send(Ok(()));
} else {
let res = watcher
let res = debouncer
.watcher()
.watch(
registered_path.path(),
if registered_path.is_recursive() {
@ -233,7 +293,8 @@ async fn watcher_task(mut watcher: impl Watcher, mut rx: mpsc::Receiver<InnerWat
// 3. Otherwise, we return okay because we succeeded
if *cnt <= removed_cnt {
let _ = cb.send(
watcher
debouncer
.watcher()
.unwatch(&path)
.map_err(|x| io::Error::new(io::ErrorKind::Other, x)),
);
@ -256,16 +317,91 @@ async fn watcher_task(mut watcher: impl Watcher, mut rx: mpsc::Receiver<InnerWat
}
}
InnerWatcherMsg::Event { ev } => {
let kind = ChangeKind::from(ev.kind);
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("System time before unix epoch")
.as_secs();
let kind = match ev.kind {
EventKind::Access(AccessKind::Read) => ChangeKind::Access,
EventKind::Modify(ModifyKind::Metadata(_)) => ChangeKind::Attribute,
EventKind::Access(AccessKind::Close(AccessMode::Write)) => {
ChangeKind::CloseWrite
}
EventKind::Access(AccessKind::Close(_)) => ChangeKind::CloseNoWrite,
EventKind::Create(_) => ChangeKind::Create,
EventKind::Remove(_) => ChangeKind::Delete,
EventKind::Modify(ModifyKind::Data(_)) => ChangeKind::Modify,
EventKind::Access(AccessKind::Open(_)) => ChangeKind::Open,
EventKind::Modify(ModifyKind::Name(_)) => ChangeKind::Rename,
_ => ChangeKind::Unknown,
};
for registered_path in registered_paths.iter() {
match registered_path.filter_and_send(kind, &ev.paths).await {
Ok(_) => (),
Err(x) => error!(
"[Conn {}] Failed to forward changes to paths: {}",
registered_path.id(),
x
// For rename both, we assume the paths is a pair that represents before and
// after, so we want to grab the before and use it!
let (paths, renamed): (&[PathBuf], Option<PathBuf>) = match ev.kind {
EventKind::Modify(ModifyKind::Name(RenameMode::Both)) => (
&ev.paths[0..1],
if ev.paths.len() > 1 {
ev.paths.last().cloned()
} else {
None
},
),
_ => (&ev.paths, None),
};
for path in paths {
let attribute = match ev.kind {
EventKind::Modify(ModifyKind::Metadata(MetadataKind::Ownership)) => {
Some(ChangeDetailsAttribute::Ownership)
}
EventKind::Modify(ModifyKind::Metadata(MetadataKind::Permissions)) => {
Some(ChangeDetailsAttribute::Permissions)
}
EventKind::Modify(ModifyKind::Metadata(MetadataKind::WriteTime)) => {
Some(ChangeDetailsAttribute::Timestamp)
}
_ => None,
};
// Calculate a timestamp for creation & modification paths
let details_timestamp = match ev.kind {
EventKind::Create(_) => tokio::fs::symlink_metadata(path.as_path())
.await
.ok()
.and_then(|m| m.created().ok())
.and_then(|t| t.duration_since(UNIX_EPOCH).ok())
.map(|d| d.as_secs()),
EventKind::Modify(_) => tokio::fs::symlink_metadata(path.as_path())
.await
.ok()
.and_then(|m| m.modified().ok())
.and_then(|t| t.duration_since(UNIX_EPOCH).ok())
.map(|d| d.as_secs()),
_ => None,
};
let change = Change {
timestamp,
kind,
path: path.to_path_buf(),
details: ChangeDetails {
attribute,
renamed: renamed.clone(),
timestamp: details_timestamp,
extra: ev.info().map(ToString::to_string),
},
};
match registered_path.filter_and_send(change) {
Ok(_) => (),
Err(x) => error!(
"[Conn {}] Failed to forward changes to paths: {}",
registered_path.id(),
x
),
}
}
}
}
@ -274,10 +410,11 @@ async fn watcher_task(mut watcher: impl Watcher, mut rx: mpsc::Receiver<InnerWat
error!("Watcher encountered an error {} for {:?}", msg, err.paths);
for registered_path in registered_paths.iter() {
match registered_path
.filter_and_send_error(&msg, &err.paths, !err.paths.is_empty())
.await
{
match registered_path.filter_and_send_error(
&msg,
&err.paths,
!err.paths.is_empty(),
) {
Ok(_) => (),
Err(x) => error!(
"[Conn {}] Failed to forward changes to paths: {}",

@ -2,10 +2,9 @@ use std::hash::{Hash, Hasher};
use std::path::{Path, PathBuf};
use std::{fmt, io};
use distant_net::common::ConnectionId;
use distant_net::server::Reply;
use crate::protocol::{Change, ChangeKind, ChangeKindSet, Error, Response};
use distant_core::net::common::ConnectionId;
use distant_core::net::server::Reply;
use distant_core::protocol::{Change, ChangeKindSet, Error, Response};
/// Represents a path registered with a watcher that includes relevant state including
/// the ability to reply with
@ -120,39 +119,27 @@ impl RegisteredPath {
}
/// Sends a reply for a change tied to this registered path, filtering
/// out any paths that are not applicable
/// out any changes that are not applicable.
///
/// Returns true if message was sent, and false if not
pub async fn filter_and_send<T>(&self, kind: ChangeKind, paths: T) -> io::Result<bool>
where
T: IntoIterator,
T::Item: AsRef<Path>,
{
if !self.allowed().contains(&kind) {
/// Returns true if message was sent, and false if not.
pub fn filter_and_send(&self, change: Change) -> io::Result<bool> {
if !self.allowed().contains(&change.kind) {
return Ok(false);
}
let paths: Vec<PathBuf> = paths
.into_iter()
.filter(|p| self.applies_to_path(p.as_ref()))
.map(|p| p.as_ref().to_path_buf())
.collect();
if !paths.is_empty() {
self.reply
.send(Response::Changed(Change { kind, paths }))
.await
.map(|_| true)
// Only send if this registered path applies to the changed path
if self.applies_to_path(&change.path) {
self.reply.send(Response::Changed(change)).map(|_| true)
} else {
Ok(false)
}
}
/// Sends an error message and includes paths if provided, skipping sending the message if
/// no paths match and `skip_if_no_paths` is true
/// no paths match and `skip_if_no_paths` is true.
///
/// Returns true if message was sent, and false if not
pub async fn filter_and_send_error<T>(
/// Returns true if message was sent, and false if not.
pub fn filter_and_send_error<T>(
&self,
msg: &str,
paths: T,
@ -175,7 +162,6 @@ impl RegisteredPath {
} else {
Response::Error(Error::from(format!("{msg} about {paths:?}")))
})
.await
.map(|_| true)
} else {
Ok(false)

@ -0,0 +1,28 @@
use std::time::Duration;
#[derive(Clone, Debug, Default, PartialEq, Eq)]
pub struct Config {
pub watch: WatchConfig,
}
/// Configuration specifically for watching files and directories.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct WatchConfig {
pub native: bool,
pub poll_interval: Option<Duration>,
pub compare_contents: bool,
pub debounce_timeout: Duration,
pub debounce_tick_rate: Option<Duration>,
}
impl Default for WatchConfig {
fn default() -> Self {
Self {
native: true,
poll_interval: None,
compare_contents: false,
debounce_timeout: Duration::from_millis(500),
debounce_tick_rate: None,
}
}
}

@ -0,0 +1,14 @@
use std::time::Duration;
/// Capacity associated with the server's file watcher to pass events outbound
pub const SERVER_WATCHER_CAPACITY: usize = 10000;
/// Represents the maximum size (in bytes) that data will be read from pipes
/// per individual `read` call
///
/// Current setting is 16k size
pub const MAX_PIPE_CHUNK_SIZE: usize = 16384;
/// Duration in milliseconds to sleep between reading stdout/stderr chunks
/// to avoid sending many small messages to clients
pub const READ_PAUSE_DURATION: Duration = Duration::from_millis(1);

@ -0,0 +1,20 @@
#![doc = include_str!("../README.md")]
#[doc = include_str!("../README.md")]
#[cfg(doctest)]
pub struct ReadmeDoctests;
mod api;
mod config;
mod constants;
pub use api::Api;
pub use config::*;
use distant_core::DistantApiServerHandler;
/// Implementation of [`DistantApiServerHandler`] using [`Api`].
pub type Handler = DistantApiServerHandler<Api>;
/// Initializes a new [`Handler`].
pub fn new_handler(config: Config) -> std::io::Result<Handler> {
Ok(Handler::new(Api::initialize(config)?))
}

@ -1,5 +1,5 @@
use assert_fs::prelude::*;
use distant_core::protocol::ChangeKindSet;
use distant_core::protocol::{ChangeKind, ChangeKindSet};
use distant_core::DistantChannelExt;
use rstest::*;
use test_log::test;
@ -29,7 +29,7 @@ async fn should_handle_large_volume_of_file_watching(#[future] ctx: DistantClien
.watch(
file.path(),
false,
ChangeKindSet::modify_set(),
ChangeKindSet::new([ChangeKind::Modify]),
ChangeKindSet::empty(),
)
.await

@ -1,11 +1,12 @@
use std::net::SocketAddr;
use std::time::Duration;
use distant_core::net::auth::{DummyAuthHandler, Verifier};
use distant_core::net::client::{Client, TcpConnector};
use distant_core::net::common::authentication::{DummyAuthHandler, Verifier};
use distant_core::net::common::PortRange;
use distant_core::net::server::Server;
use distant_core::{DistantApiServerHandler, DistantClient, LocalDistantApi};
use distant_core::{DistantApiServerHandler, DistantClient};
use distant_local::Api;
use rstest::*;
use tokio::sync::mpsc;
@ -21,7 +22,7 @@ impl DistantClientCtx {
let (started_tx, mut started_rx) = mpsc::channel::<u16>(1);
tokio::spawn(async move {
if let Ok(api) = LocalDistantApi::initialize() {
if let Ok(api) = Api::initialize(Default::default()) {
let port: PortRange = "0".parse().unwrap();
let port = {
let handler = DistantApiServerHandler::new(api);

@ -3,7 +3,7 @@ name = "distant-net"
description = "Network library for distant, providing implementations to support client/server architecture"
categories = ["network-programming"]
keywords = ["api", "async"]
version = "0.20.0-alpha.6"
version = "0.20.0"
authors = ["Chip Senkbeil <chip@senkbeil.org>"]
edition = "2021"
homepage = "https://github.com/chipsenkbeil/distant"
@ -15,27 +15,30 @@ license = "MIT OR Apache-2.0"
async-trait = "0.1.68"
bytes = "1.4.0"
chacha20poly1305 = "0.10.1"
const-str = "0.5.6"
derive_more = { version = "0.99.17", default-features = false, features = ["as_mut", "as_ref", "deref", "deref_mut", "display", "from", "error", "into", "into_iterator", "is_variant", "try_into"] }
distant-auth = { version = "=0.20.0", path = "../distant-auth" }
dyn-clone = "1.0.11"
flate2 = "1.0.25"
flate2 = "1.0.26"
hex = "0.4.3"
hkdf = "0.12.3"
log = "0.4.17"
log = "0.4.18"
paste = "1.0.12"
p256 = { version = "0.13.0", features = ["ecdh", "pem"] }
p256 = { version = "0.13.2", features = ["ecdh", "pem"] }
rand = { version = "0.8.5", features = ["getrandom"] }
rmp = "0.8.11"
rmp-serde = "1.1.1"
sha2 = "0.10.6"
serde = { version = "1.0.159", features = ["derive"] }
semver = { version = "1.0.17", features = ["serde"] }
serde = { version = "1.0.163", features = ["derive"] }
serde_bytes = "0.11.9"
serde_json = "1.0.96"
strum = { version = "0.24.1", features = ["derive"] }
tokio = { version = "1.27.0", features = ["full"] }
# Optional dependencies based on features
schemars = { version = "0.8.12", optional = true }
tokio = { version = "1.28.2", features = ["full"] }
[dev-dependencies]
distant-auth = { version = "=0.20.0", path = "../distant-auth", features = ["tests"] }
env_logger = "0.10.0"
serde_json = "1.0.95"
serde_json = "1.0.96"
tempfile = "3.5.0"
test-log = "0.2.11"

@ -1,18 +1,13 @@
# distant net
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.64.0][distant_rustc_img]][distant_rustc_lnk]
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.70.0][distant_rustc_img]][distant_rustc_lnk]
[distant_crates_img]: https://img.shields.io/crates/v/distant-net.svg
[distant_crates_lnk]: https://crates.io/crates/distant-net
[distant_doc_img]: https://docs.rs/distant-net/badge.svg
[distant_doc_lnk]: https://docs.rs/distant-net
[distant_rustc_img]: https://img.shields.io/badge/distant_net-rustc_1.64+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2022/09/22/Rust-1.64.0.html
Library that powers the [`distant`](https://github.com/chipsenkbeil/distant)
binary.
🚧 **(Alpha stage software) This library is in rapid development and may break or change frequently!** 🚧
[distant_rustc_img]: https://img.shields.io/badge/distant_net-rustc_1.70+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/06/01/Rust-1.70.0.html
## Details
@ -25,18 +20,9 @@ You can import the dependency by adding the following to your `Cargo.toml`:
```toml
[dependencies]
distant-net = "0.19"
distant-net = "0.20"
```
## Features
Currently, the library supports the following features:
- `schemars`: derives the `schemars::JsonSchema` interface on `Request`
and `Response` data types
By default, no features are enabled on the library.
## License
This project is licensed under either of

@ -1,50 +1,12 @@
use std::io;
use async_trait::async_trait;
use distant_auth::msg::*;
use distant_auth::{AuthHandler, Authenticate, Authenticator};
use log::*;
use super::msg::*;
use super::AuthHandler;
use crate::common::{utils, FramedTransport, Transport};
/// Represents an interface for authenticating with a server.
#[async_trait]
pub trait Authenticate {
/// Performs authentication by leveraging the `handler` for any received challenge.
async fn authenticate(&mut self, mut handler: impl AuthHandler + Send) -> io::Result<()>;
}
/// Represents an interface for submitting challenges for authentication.
#[async_trait]
pub trait Authenticator: Send {
/// Issues an initialization notice and returns the response indicating which authentication
/// methods to pursue
async fn initialize(
&mut self,
initialization: Initialization,
) -> io::Result<InitializationResponse>;
/// Issues a challenge and returns the answers to the `questions` asked.
async fn challenge(&mut self, challenge: Challenge) -> io::Result<ChallengeResponse>;
/// Requests verification of some `kind` and `text`, returning true if passed verification.
async fn verify(&mut self, verification: Verification) -> io::Result<VerificationResponse>;
/// Reports information with no response expected.
async fn info(&mut self, info: Info) -> io::Result<()>;
/// Reports an error occurred during authentication, consuming the authenticator since no more
/// challenges should be issued.
async fn error(&mut self, error: Error) -> io::Result<()>;
/// Reports that the authentication has started for a specific method.
async fn start_method(&mut self, start_method: StartMethod) -> io::Result<()>;
/// Reports that the authentication has finished successfully, consuming the authenticator
/// since no more challenges should be issued.
async fn finished(&mut self) -> io::Result<()>;
}
macro_rules! write_frame {
($transport:expr, $data:expr) => {{
let data = utils::serialize_to_vec(&$data)?;
@ -203,161 +165,20 @@ where
#[cfg(test)]
mod tests {
use distant_auth::tests::TestAuthHandler;
use test_log::test;
use tokio::sync::mpsc;
use super::*;
use crate::common::authentication::AuthMethodHandler;
#[async_trait]
trait TestAuthHandler {
async fn on_initialization(
&mut self,
_: Initialization,
) -> io::Result<InitializationResponse> {
Err(io::Error::from(io::ErrorKind::Unsupported))
}
async fn on_start_method(&mut self, _: StartMethod) -> io::Result<()> {
Err(io::Error::from(io::ErrorKind::Unsupported))
}
async fn on_finished(&mut self) -> io::Result<()> {
Err(io::Error::from(io::ErrorKind::Unsupported))
}
async fn on_challenge(&mut self, _: Challenge) -> io::Result<ChallengeResponse> {
Err(io::Error::from(io::ErrorKind::Unsupported))
}
async fn on_verification(&mut self, _: Verification) -> io::Result<VerificationResponse> {
Err(io::Error::from(io::ErrorKind::Unsupported))
}
async fn on_info(&mut self, _: Info) -> io::Result<()> {
Err(io::Error::from(io::ErrorKind::Unsupported))
}
async fn on_error(&mut self, _: Error) -> io::Result<()> {
Err(io::Error::from(io::ErrorKind::Unsupported))
}
}
#[async_trait]
impl<T: TestAuthHandler + Send> AuthHandler for T {
async fn on_initialization(
&mut self,
x: Initialization,
) -> io::Result<InitializationResponse> {
TestAuthHandler::on_initialization(self, x).await
}
async fn on_start_method(&mut self, x: StartMethod) -> io::Result<()> {
TestAuthHandler::on_start_method(self, x).await
}
async fn on_finished(&mut self) -> io::Result<()> {
TestAuthHandler::on_finished(self).await
}
}
#[async_trait]
impl<T: TestAuthHandler + Send> AuthMethodHandler for T {
async fn on_challenge(&mut self, x: Challenge) -> io::Result<ChallengeResponse> {
TestAuthHandler::on_challenge(self, x).await
}
async fn on_verification(&mut self, x: Verification) -> io::Result<VerificationResponse> {
TestAuthHandler::on_verification(self, x).await
}
async fn on_info(&mut self, x: Info) -> io::Result<()> {
TestAuthHandler::on_info(self, x).await
}
async fn on_error(&mut self, x: Error) -> io::Result<()> {
TestAuthHandler::on_error(self, x).await
}
}
macro_rules! auth_handler {
(@no_challenge @no_verification @tx($tx:ident, $ty:ty) $($methods:item)*) => {
auth_handler! {
@tx($tx, $ty)
async fn on_challenge(&mut self, _: Challenge) -> io::Result<ChallengeResponse> {
Err(io::Error::from(io::ErrorKind::Unsupported))
}
async fn on_verification(
&mut self,
_: Verification,
) -> io::Result<VerificationResponse> {
Err(io::Error::from(io::ErrorKind::Unsupported))
}
$($methods)*
}
};
(@no_challenge @tx($tx:ident, $ty:ty) $($methods:item)*) => {
auth_handler! {
@tx($tx, $ty)
async fn on_challenge(&mut self, _: Challenge) -> io::Result<ChallengeResponse> {
Err(io::Error::from(io::ErrorKind::Unsupported))
}
$($methods)*
}
};
(@no_verification @tx($tx:ident, $ty:ty) $($methods:item)*) => {
auth_handler! {
@tx($tx, $ty)
async fn on_verification(
&mut self,
_: Verification,
) -> io::Result<VerificationResponse> {
Err(io::Error::from(io::ErrorKind::Unsupported))
}
$($methods)*
}
};
(@tx($tx:ident, $ty:ty) $($methods:item)*) => {{
#[allow(dead_code)]
struct __InlineAuthHandler {
tx: mpsc::Sender<$ty>,
}
#[async_trait]
impl TestAuthHandler for __InlineAuthHandler {
$($methods)*
}
__InlineAuthHandler { tx: $tx }
}};
}
#[test(tokio::test)]
async fn authenticator_initialization_should_be_able_to_successfully_complete_round_trip() {
let (mut t1, mut t2) = FramedTransport::test_pair(100);
let (tx, _) = mpsc::channel(1);
let task = tokio::spawn(async move {
t2.authenticate(auth_handler! {
@no_challenge
@no_verification
@tx(tx, ())
async fn on_initialization(
&mut self,
initialization: Initialization,
) -> io::Result<InitializationResponse> {
Ok(InitializationResponse {
methods: initialization.methods,
})
}
t2.authenticate(TestAuthHandler {
on_initialization: Box::new(|x| Ok(InitializationResponse { methods: x.methods })),
..Default::default()
})
.await
.unwrap()
@ -386,29 +207,34 @@ mod tests {
#[test(tokio::test)]
async fn authenticator_challenge_should_be_able_to_successfully_complete_round_trip() {
let (mut t1, mut t2) = FramedTransport::test_pair(100);
let (tx, _) = mpsc::channel(1);
let task = tokio::spawn(async move {
t2.authenticate(auth_handler! {
@no_verification
@tx(tx, ())
async fn on_challenge(&mut self, challenge: Challenge) -> io::Result<ChallengeResponse> {
assert_eq!(challenge.questions, vec![Question {
label: "label".to_string(),
text: "text".to_string(),
options: vec![("question_key".to_string(), "question_value".to_string())]
t2.authenticate(TestAuthHandler {
on_challenge: Box::new(|challenge| {
assert_eq!(
challenge.questions,
vec![Question {
label: "label".to_string(),
text: "text".to_string(),
options: vec![(
"question_key".to_string(),
"question_value".to_string()
)]
.into_iter()
.collect(),
}]);
}]
);
assert_eq!(
challenge.options,
vec![("key".to_string(), "value".to_string())].into_iter().collect(),
vec![("key".to_string(), "value".to_string())]
.into_iter()
.collect(),
);
Ok(ChallengeResponse {
answers: vec!["some answer".to_string()].into_iter().collect(),
})
}
}),
..Default::default()
})
.await
.unwrap()
@ -446,23 +272,15 @@ mod tests {
#[test(tokio::test)]
async fn authenticator_verification_should_be_able_to_successfully_complete_round_trip() {
let (mut t1, mut t2) = FramedTransport::test_pair(100);
let (tx, _) = mpsc::channel(1);
let task = tokio::spawn(async move {
t2.authenticate(auth_handler! {
@no_challenge
@tx(tx, ())
async fn on_verification(
&mut self,
verification: Verification,
) -> io::Result<VerificationResponse> {
t2.authenticate(TestAuthHandler {
on_verification: Box::new(|verification| {
assert_eq!(verification.kind, VerificationKind::Host);
assert_eq!(verification.text, "some text");
Ok(VerificationResponse {
valid: true,
})
}
Ok(VerificationResponse { valid: true })
}),
..Default::default()
})
.await
.unwrap()
@ -490,18 +308,12 @@ mod tests {
let (tx, mut rx) = mpsc::channel(1);
let task = tokio::spawn(async move {
t2.authenticate(auth_handler! {
@no_challenge
@no_verification
@tx(tx, Info)
async fn on_info(
&mut self,
info: Info,
) -> io::Result<()> {
self.tx.send(info).await.unwrap();
t2.authenticate(TestAuthHandler {
on_info: Box::new(move |info| {
tx.try_send(info).unwrap();
Ok(())
}
}),
..Default::default()
})
.await
.unwrap()
@ -532,15 +344,12 @@ mod tests {
let (tx, mut rx) = mpsc::channel(1);
let task = tokio::spawn(async move {
t2.authenticate(auth_handler! {
@no_challenge
@no_verification
@tx(tx, Error)
async fn on_error(&mut self, error: Error) -> io::Result<()> {
self.tx.send(error).await.unwrap();
t2.authenticate(TestAuthHandler {
on_error: Box::new(move |error| {
tx.try_send(error).unwrap();
Ok(())
}
}),
..Default::default()
})
.await
.unwrap()
@ -573,15 +382,12 @@ mod tests {
let (tx, mut rx) = mpsc::channel(1);
let task = tokio::spawn(async move {
t2.authenticate(auth_handler! {
@no_challenge
@no_verification
@tx(tx, Error)
async fn on_error(&mut self, error: Error) -> io::Result<()> {
self.tx.send(error).await.unwrap();
t2.authenticate(TestAuthHandler {
on_error: Box::new(move |error| {
tx.try_send(error).unwrap();
Ok(())
}
}),
..Default::default()
})
.await
.unwrap()
@ -612,15 +418,12 @@ mod tests {
let (tx, mut rx) = mpsc::channel(1);
let task = tokio::spawn(async move {
t2.authenticate(auth_handler! {
@no_challenge
@no_verification
@tx(tx, StartMethod)
async fn on_start_method(&mut self, start_method: StartMethod) -> io::Result<()> {
self.tx.send(start_method).await.unwrap();
t2.authenticate(TestAuthHandler {
on_start_method: Box::new(move |start_method| {
tx.try_send(start_method).unwrap();
Ok(())
}
}),
..Default::default()
})
.await
.unwrap()
@ -651,15 +454,12 @@ mod tests {
let (tx, mut rx) = mpsc::channel(1);
let task = tokio::spawn(async move {
t2.authenticate(auth_handler! {
@no_challenge
@no_verification
@tx(tx, ())
async fn on_finished(&mut self) -> io::Result<()> {
self.tx.send(()).await.unwrap();
t2.authenticate(TestAuthHandler {
on_finished: Box::new(move || {
tx.try_send(()).unwrap();
Ok(())
}
}),
..Default::default()
})
.await
.unwrap()

@ -216,9 +216,7 @@ impl UntypedClient {
// If we have flagged that a reconnect is needed, attempt to do so
if needs_reconnect {
info!("Client encountered issue, attempting to reconnect");
if log::log_enabled!(log::Level::Debug) {
debug!("Using strategy {reconnect_strategy:?}");
}
debug!("Using strategy {reconnect_strategy:?}");
match reconnect_strategy.reconnect(&mut connection).await {
Ok(()) => {
info!("Client successfully reconnected!");
@ -236,7 +234,7 @@ impl UntypedClient {
macro_rules! silence_needs_reconnect {
() => {{
debug!(
info!(
"Client exceeded {}s without server activity, so attempting to reconnect",
silence_duration.as_secs_f32(),
);
@ -260,7 +258,7 @@ impl UntypedClient {
let ready = tokio::select! {
// NOTE: This should NEVER return None as we never allow the channel to close.
cb = shutdown_rx.recv() => {
debug!("Client got shutdown signal, so exiting event loop");
info!("Client got shutdown signal, so exiting event loop");
let cb = cb.expect("Impossible: shutdown channel closed!");
let _ = cb.send(Ok(()));
watcher_tx.send_replace(ConnectionState::Disconnected);
@ -335,7 +333,7 @@ impl UntypedClient {
}
Ok(None) => {
debug!("Connection closed");
info!("Connection closed");
needs_reconnect = true;
watcher_tx.send_replace(ConnectionState::Reconnecting);
continue;

@ -14,13 +14,13 @@ use std::time::Duration;
use std::{convert, io};
use async_trait::async_trait;
use distant_auth::AuthHandler;
#[cfg(windows)]
pub use windows::*;
use super::ClientConfig;
use crate::client::{Client, UntypedClient};
use crate::common::authentication::AuthHandler;
use crate::common::{Connection, Transport};
use crate::common::{Connection, Transport, Version};
/// Interface that performs the connection to produce a [`Transport`] for use by the [`Client`].
#[async_trait]
@ -46,6 +46,7 @@ pub struct ClientBuilder<H, C> {
connector: C,
config: ClientConfig,
connect_timeout: Option<Duration>,
version: Version,
}
impl<H, C> ClientBuilder<H, C> {
@ -56,6 +57,7 @@ impl<H, C> ClientBuilder<H, C> {
config: self.config,
connector: self.connector,
connect_timeout: self.connect_timeout,
version: self.version,
}
}
@ -66,6 +68,7 @@ impl<H, C> ClientBuilder<H, C> {
config,
connector: self.connector,
connect_timeout: self.connect_timeout,
version: self.version,
}
}
@ -76,6 +79,7 @@ impl<H, C> ClientBuilder<H, C> {
config: self.config,
connector,
connect_timeout: self.connect_timeout,
version: self.version,
}
}
@ -86,6 +90,18 @@ impl<H, C> ClientBuilder<H, C> {
config: self.config,
connector: self.connector,
connect_timeout: connect_timeout.into(),
version: self.version,
}
}
/// Configure the version of the client.
pub fn version(self, version: Version) -> Self {
Self {
auth_handler: self.auth_handler,
config: self.config,
connector: self.connector,
connect_timeout: self.connect_timeout,
version,
}
}
}
@ -97,6 +113,7 @@ impl ClientBuilder<(), ()> {
config: Default::default(),
connector: (),
connect_timeout: None,
version: Default::default(),
}
}
}
@ -119,6 +136,7 @@ where
let auth_handler = self.auth_handler;
let config = self.config;
let connect_timeout = self.connect_timeout;
let version = self.version;
let f = async move {
let transport = match connect_timeout {
@ -128,7 +146,7 @@ where
.and_then(convert::identity)?,
None => self.connector.connect().await?,
};
let connection = Connection::client(transport, auth_handler).await?;
let connection = Connection::client(transport, auth_handler, version).await?;
Ok(UntypedClient::spawn(connection, config))
};

@ -1,20 +1,25 @@
mod any;
pub mod authentication;
mod connection;
mod destination;
mod key;
mod keychain;
mod listener;
mod map;
mod packet;
mod port;
mod transport;
pub(crate) mod utils;
mod version;
pub use any::*;
pub(crate) use connection::Connection;
pub use connection::ConnectionId;
pub use destination::*;
pub use key::*;
pub use keychain::*;
pub use listener::*;
pub use map::*;
pub use packet::*;
pub use port::*;
pub use transport::*;
pub use version::*;

@ -1,10 +0,0 @@
mod authenticator;
mod handler;
mod keychain;
mod methods;
pub mod msg;
pub use authenticator::*;
pub use handler::*;
pub use keychain::*;
pub use methods::*;

@ -1,130 +0,0 @@
use std::io;
use async_trait::async_trait;
use super::{AuthenticationMethod, Authenticator, Challenge, Error, Question};
use crate::common::HeapSecretKey;
/// Authenticaton method for a static secret key
#[derive(Clone, Debug)]
pub struct StaticKeyAuthenticationMethod {
key: HeapSecretKey,
}
impl StaticKeyAuthenticationMethod {
#[inline]
pub fn new(key: impl Into<HeapSecretKey>) -> Self {
Self { key: key.into() }
}
}
#[async_trait]
impl AuthenticationMethod for StaticKeyAuthenticationMethod {
fn id(&self) -> &'static str {
"static_key"
}
async fn authenticate(&self, authenticator: &mut dyn Authenticator) -> io::Result<()> {
let response = authenticator
.challenge(Challenge {
questions: vec![Question {
label: "key".to_string(),
text: "Provide a key: ".to_string(),
options: Default::default(),
}],
options: Default::default(),
})
.await?;
if response.answers.is_empty() {
return Err(Error::non_fatal("missing answer").into_io_permission_denied());
}
match response
.answers
.into_iter()
.next()
.unwrap()
.parse::<HeapSecretKey>()
{
Ok(key) if key == self.key => Ok(()),
_ => Err(Error::non_fatal("answer does not match key").into_io_permission_denied()),
}
}
}
#[cfg(test)]
mod tests {
use test_log::test;
use super::*;
use crate::common::authentication::msg::{AuthenticationResponse, ChallengeResponse};
use crate::common::FramedTransport;
#[test(tokio::test)]
async fn authenticate_should_fail_if_key_challenge_fails() {
let method = StaticKeyAuthenticationMethod::new(b"".to_vec());
let (mut t1, mut t2) = FramedTransport::test_pair(100);
// Queue up an invalid frame for our challenge to ensure it fails
t2.write_frame(b"invalid initialization response")
.await
.unwrap();
assert_eq!(
method.authenticate(&mut t1).await.unwrap_err().kind(),
io::ErrorKind::InvalidData
);
}
#[test(tokio::test)]
async fn authenticate_should_fail_if_no_answer_included_in_challenge_response() {
let method = StaticKeyAuthenticationMethod::new(b"".to_vec());
let (mut t1, mut t2) = FramedTransport::test_pair(100);
// Queue up a response to the initialization request
t2.write_frame_for(&AuthenticationResponse::Challenge(ChallengeResponse {
answers: Vec::new(),
}))
.await
.unwrap();
assert_eq!(
method.authenticate(&mut t1).await.unwrap_err().kind(),
io::ErrorKind::PermissionDenied
);
}
#[test(tokio::test)]
async fn authenticate_should_fail_if_answer_does_not_match_key() {
let method = StaticKeyAuthenticationMethod::new(b"answer".to_vec());
let (mut t1, mut t2) = FramedTransport::test_pair(100);
// Queue up a response to the initialization request
t2.write_frame_for(&AuthenticationResponse::Challenge(ChallengeResponse {
answers: vec![HeapSecretKey::from(b"some key".to_vec()).to_string()],
}))
.await
.unwrap();
assert_eq!(
method.authenticate(&mut t1).await.unwrap_err().kind(),
io::ErrorKind::PermissionDenied
);
}
#[test(tokio::test)]
async fn authenticate_should_succeed_if_answer_matches_key() {
let method = StaticKeyAuthenticationMethod::new(b"answer".to_vec());
let (mut t1, mut t2) = FramedTransport::test_pair(100);
// Queue up a response to the initialization request
t2.write_frame_for(&AuthenticationResponse::Challenge(ChallengeResponse {
answers: vec![HeapSecretKey::from(b"answer".to_vec()).to_string()],
}))
.await
.unwrap();
method.authenticate(&mut t1).await.unwrap();
}
}

@ -2,14 +2,17 @@ use std::io;
use std::ops::{Deref, DerefMut};
use async_trait::async_trait;
use distant_auth::{AuthHandler, Authenticate, Verifier};
use log::*;
use serde::{Deserialize, Serialize};
use tokio::sync::oneshot;
use super::authentication::{AuthHandler, Authenticate, Keychain, KeychainResult, Verifier};
#[cfg(test)]
use super::InmemoryTransport;
use super::{Backup, FramedTransport, HeapSecretKey, Reconnectable, Transport};
use crate::common::InmemoryTransport;
use crate::common::{
Backup, FramedTransport, HeapSecretKey, Keychain, KeychainResult, Reconnectable, Transport,
TransportExt, Version,
};
/// Id of the connection
pub type ConnectionId = u32;
@ -108,6 +111,19 @@ where
debug!("[Conn {id}] Re-establishing connection");
Reconnectable::reconnect(transport).await?;
// Wait for exactly version bytes (24 where 8 bytes for major, minor, patch)
// but with a reconnect we don't actually validate it because we did that
// the first time we connected
//
// NOTE: We do this with the raw transport and not the framed version!
debug!("[Conn {id}] Waiting for server version");
if transport.as_mut_inner().read_exact(&mut [0u8; 24]).await? != 24 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Wrong version byte len received",
));
}
// Perform a handshake to ensure that the connection is properly established and encrypted
debug!("[Conn {id}] Performing handshake");
transport.client_handshake().await?;
@ -188,13 +204,42 @@ where
/// Transforms a raw [`Transport`] into an established [`Connection`] from the client-side by
/// performing the following:
///
/// 1. Handshakes to derive the appropriate [`Codec`](crate::Codec) to use
/// 2. Authenticates the established connection to ensure it is valid
/// 3. Restores pre-existing state using the provided backup, replaying any missing frames and
/// 1. Performs a version check with the server
/// 2. Handshakes to derive the appropriate [`Codec`](crate::Codec) to use
/// 3. Authenticates the established connection to ensure it is valid
/// 4. Restores pre-existing state using the provided backup, replaying any missing frames and
/// receiving any frames from the other side
pub async fn client<H: AuthHandler + Send>(transport: T, handler: H) -> io::Result<Self> {
pub async fn client<H: AuthHandler + Send>(
transport: T,
handler: H,
version: Version,
) -> io::Result<Self> {
let id: ConnectionId = rand::random();
// Wait for exactly version bytes (24 where 8 bytes for major, minor, patch)
debug!("[Conn {id}] Waiting for server version");
let mut version_bytes = [0u8; 24];
if transport.read_exact(&mut version_bytes).await? != 24 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Wrong version byte len received",
));
}
// Compare versions for compatibility and drop the connection if incompatible
let server_version = Version::from_be_bytes(version_bytes);
debug!(
"[Conn {id}] Checking compatibility between client {version} & server {server_version}"
);
if !version.is_compatible_with(&server_version) {
return Err(io::Error::new(
io::ErrorKind::Other,
format!(
"Client version {version} is incompatible with server version {server_version}"
),
));
}
// Perform a handshake to ensure that the connection is properly established and encrypted
debug!("[Conn {id}] Performing handshake");
let mut transport: FramedTransport<T> =
@ -236,19 +281,25 @@ where
/// Transforms a raw [`Transport`] into an established [`Connection`] from the server-side by
/// performing the following:
///
/// 1. Handshakes to derive the appropriate [`Codec`](crate::Codec) to use
/// 2. Authenticates the established connection to ensure it is valid by either using the
/// 1. Performs a version check with the client
/// 2. Handshakes to derive the appropriate [`Codec`](crate::Codec) to use
/// 3. Authenticates the established connection to ensure it is valid by either using the
/// given `verifier` or, if working with an existing client connection, will validate an OTP
/// from our database
/// 3. Restores pre-existing state using the provided backup, replaying any missing frames and
/// 4. Restores pre-existing state using the provided backup, replaying any missing frames and
/// receiving any frames from the other side
pub async fn server(
transport: T,
verifier: &Verifier,
keychain: Keychain<oneshot::Receiver<Backup>>,
version: Version,
) -> io::Result<Self> {
let id: ConnectionId = rand::random();
// Write the version as bytes
debug!("[Conn {id}] Sending version {version}");
transport.write_all(&version.to_be_bytes()).await?;
// Perform a handshake to ensure that the connection is properly established and encrypted
debug!("[Conn {id}] Performing handshake");
let mut transport: FramedTransport<T> =
@ -455,13 +506,67 @@ impl<T: Transport> Connection<T> {
mod tests {
use std::sync::Arc;
use distant_auth::msg::Challenge;
use distant_auth::{Authenticator, DummyAuthHandler};
use test_log::test;
use super::*;
use crate::common::authentication::msg::Challenge;
use crate::common::authentication::{Authenticator, DummyAuthHandler};
use crate::common::Frame;
macro_rules! server_version {
() => {
Version::new(1, 2, 3)
};
}
macro_rules! send_server_version {
($transport:expr, $version:expr) => {{
($transport)
.as_mut_inner()
.write_all(&$version.to_be_bytes())
.await
.unwrap();
}};
($transport:expr) => {
send_server_version!($transport, server_version!());
};
}
macro_rules! receive_version {
($transport:expr) => {{
let mut bytes = [0u8; 24];
assert_eq!(
($transport)
.as_mut_inner()
.read_exact(&mut bytes)
.await
.unwrap(),
24,
"Wrong version len received"
);
Version::from_be_bytes(bytes)
}};
}
#[test(tokio::test)]
async fn client_should_fail_when_server_sends_incompatible_version() {
let (mut t1, t2) = FramedTransport::pair(100);
// Spawn a task to perform the client connection so we don't deadlock while simulating the
// server actions on the other side
let task = tokio::spawn(async move {
Connection::client(t2.into_inner(), DummyAuthHandler, Version::new(1, 2, 3))
.await
.unwrap()
});
// Send invalid version to fail the handshake
send_server_version!(t1, Version::new(2, 0, 0));
// Client should fail
task.await.unwrap_err();
}
#[test(tokio::test)]
async fn client_should_fail_if_codec_handshake_fails() {
let (mut t1, t2) = FramedTransport::pair(100);
@ -469,11 +574,14 @@ mod tests {
// Spawn a task to perform the client connection so we don't deadlock while simulating the
// server actions on the other side
let task = tokio::spawn(async move {
Connection::client(t2.into_inner(), DummyAuthHandler)
Connection::client(t2.into_inner(), DummyAuthHandler, server_version!())
.await
.unwrap()
});
// Send server version for client to confirm
send_server_version!(t1);
// Send garbage to fail the handshake
t1.write_frame(Frame::new(b"invalid")).await.unwrap();
@ -488,11 +596,14 @@ mod tests {
// Spawn a task to perform the client connection so we don't deadlock while simulating the
// server actions on the other side
let task = tokio::spawn(async move {
Connection::client(t2.into_inner(), DummyAuthHandler)
Connection::client(t2.into_inner(), DummyAuthHandler, server_version!())
.await
.unwrap()
});
// Send server version for client to confirm
send_server_version!(t1);
// Perform first step of connection by establishing the codec
t1.server_handshake().await.unwrap();
@ -517,11 +628,14 @@ mod tests {
// Spawn a task to perform the client connection so we don't deadlock while simulating the
// server actions on the other side
let task = tokio::spawn(async move {
Connection::client(t2.into_inner(), DummyAuthHandler)
Connection::client(t2.into_inner(), DummyAuthHandler, server_version!())
.await
.unwrap()
});
// Send server version for client to confirm
send_server_version!(t1);
// Perform first step of connection by establishing the codec
t1.server_handshake().await.unwrap();
@ -557,11 +671,14 @@ mod tests {
// Spawn a task to perform the client connection so we don't deadlock while simulating the
// server actions on the other side
let task = tokio::spawn(async move {
Connection::client(t2.into_inner(), DummyAuthHandler)
Connection::client(t2.into_inner(), DummyAuthHandler, server_version!())
.await
.unwrap()
});
// Send server version for client to confirm
send_server_version!(t1);
// Perform first step of connection by establishing the codec
t1.server_handshake().await.unwrap();
@ -595,11 +712,14 @@ mod tests {
// Spawn a task to perform the client connection so we don't deadlock while simulating the
// server actions on the other side
let task = tokio::spawn(async move {
Connection::client(t2.into_inner(), DummyAuthHandler)
Connection::client(t2.into_inner(), DummyAuthHandler, server_version!())
.await
.unwrap()
});
// Send server version for client to confirm
send_server_version!(t1);
// Perform first step of connection by establishing the codec
t1.server_handshake().await.unwrap();
@ -627,6 +747,30 @@ mod tests {
assert_eq!(client.otp(), Some(&otp));
}
#[test(tokio::test)]
async fn server_should_fail_if_client_drops_due_to_version() {
let (mut t1, t2) = FramedTransport::pair(100);
let verifier = Verifier::none();
let keychain = Keychain::new();
// Spawn a task to perform the server connection so we don't deadlock while simulating the
// client actions on the other side
let task = tokio::spawn(async move {
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
});
// Receive the version from the server
let _ = receive_version!(t1);
// Drop client connection as a result of an "incompatible version"
drop(t1);
// Server should fail
task.await.unwrap_err();
}
#[test(tokio::test)]
async fn server_should_fail_if_codec_handshake_fails() {
let (mut t1, t2) = FramedTransport::pair(100);
@ -636,11 +780,14 @@ mod tests {
// Spawn a task to perform the server connection so we don't deadlock while simulating the
// client actions on the other side
let task = tokio::spawn(async move {
Connection::server(t2.into_inner(), &verifier, keychain)
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
});
// Receive the version from the server
let _ = receive_version!(t1);
// Send garbage to fail the handshake
t1.write_frame(Frame::new(b"invalid")).await.unwrap();
@ -657,11 +804,14 @@ mod tests {
// Spawn a task to perform the server connection so we don't deadlock while simulating the
// client actions on the other side
let task = tokio::spawn(async move {
Connection::server(t2.into_inner(), &verifier, keychain)
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
});
// Receive the version from the server
let _ = receive_version!(t1);
// Perform first step of completing client-side of handshake
t1.client_handshake().await.unwrap();
@ -681,11 +831,14 @@ mod tests {
// Spawn a task to perform the server connection so we don't deadlock while simulating the
// client actions on the other side
let task = tokio::spawn(async move {
Connection::server(t2.into_inner(), &verifier, keychain)
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
});
// Receive the version from the server
let _ = receive_version!(t1);
// Perform first step of completing client-side of handshake
t1.client_handshake().await.unwrap();
@ -715,11 +868,14 @@ mod tests {
// Spawn a task to perform the server connection so we don't deadlock while simulating the
// client actions on the other side
let task = tokio::spawn(async move {
Connection::server(t2.into_inner(), &verifier, keychain)
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
});
// Receive the version from the server
let _ = receive_version!(t1);
// Perform first step of completing client-side of handshake
t1.client_handshake().await.unwrap();
@ -748,11 +904,14 @@ mod tests {
// Spawn a task to perform the server connection so we don't deadlock while simulating the
// client actions on the other side
let task = tokio::spawn(async move {
Connection::server(t2.into_inner(), &verifier, keychain)
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
});
// Receive the version from the server
let _ = receive_version!(t1);
// Perform first step of completing client-side of handshake
t1.client_handshake().await.unwrap();
@ -788,11 +947,14 @@ mod tests {
// Spawn a task to perform the server connection so we don't deadlock while simulating the
// client actions on the other side
let task = tokio::spawn(async move {
Connection::server(t2.into_inner(), &verifier, keychain)
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
});
// Receive the version from the server
let _ = receive_version!(t1);
// Perform first step of completing client-side of handshake
t1.client_handshake().await.unwrap();
@ -826,11 +988,14 @@ mod tests {
// Spawn a task to perform the server connection so we don't deadlock while simulating the
// client actions on the other side
let task = tokio::spawn(async move {
Connection::server(t2.into_inner(), &verifier, keychain)
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
});
// Receive the version from the server
let _ = receive_version!(t1);
// Perform first step of completing client-side of handshake
t1.client_handshake().await.unwrap();
@ -864,11 +1029,14 @@ mod tests {
// Spawn a task to perform the server connection so we don't deadlock while simulating the
// client actions on the other side
let task = tokio::spawn(async move {
Connection::server(t2.into_inner(), &verifier, keychain)
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
});
// Receive the version from the server
let _ = receive_version!(t1);
// Perform first step of completing client-side of handshake
t1.client_handshake().await.unwrap();
@ -902,12 +1070,15 @@ mod tests {
let task = tokio::spawn({
let keychain = keychain.clone();
async move {
Connection::server(t2.into_inner(), &verifier, keychain)
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
}
});
// Receive the version from the server
let _ = receive_version!(t1);
// Perform first step of completing client-side of handshake
t1.client_handshake().await.unwrap();
@ -967,12 +1138,15 @@ mod tests {
let task = tokio::spawn({
let keychain = keychain.clone();
async move {
Connection::server(t2.into_inner(), &verifier, keychain)
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
}
});
// Receive the version from the server
let _ = receive_version!(t1);
// Perform first step of completing client-side of handshake
t1.client_handshake().await.unwrap();
@ -1027,13 +1201,13 @@ mod tests {
// Spawn a task to perform the server connection so we don't deadlock
let task = tokio::spawn(async move {
Connection::server(t2, &verifier, keychain)
Connection::server(t2, &verifier, keychain, server_version!())
.await
.expect("Failed to connect from server")
});
// Perform the client-side of the connection
let mut client = Connection::client(t1, DummyAuthHandler)
let mut client = Connection::client(t1, DummyAuthHandler, server_version!())
.await
.expect("Failed to connect from client");
let mut server = task.await.unwrap();
@ -1061,14 +1235,14 @@ mod tests {
let verifier = Arc::clone(&verifier);
let keychain = keychain.clone();
tokio::spawn(async move {
Connection::server(t2, &verifier, keychain)
Connection::server(t2, &verifier, keychain, server_version!())
.await
.expect("Failed to connect from server")
})
};
// Perform the client-side of the connection
let mut client = Connection::client(t1, DummyAuthHandler)
let mut client = Connection::client(t1, DummyAuthHandler, server_version!())
.await
.expect("Failed to connect from client");
@ -1091,6 +1265,9 @@ mod tests {
// Spawn a task to perform the client reconnection so we don't deadlock
let task = tokio::spawn(async move { client.reconnect().await.unwrap() });
// Send a version, although it'll be ignored by a reconnecting client
send_server_version!(transport);
// Send garbage to fail handshake from server-side
transport.write_frame(b"hello").await.unwrap();
@ -1106,6 +1283,9 @@ mod tests {
// Spawn a task to perform the client reconnection so we don't deadlock
let task = tokio::spawn(async move { client.reconnect().await.unwrap() });
// Send a version, although it'll be ignored by a reconnecting client
send_server_version!(transport);
// Perform first step of completing server-side of handshake
transport.server_handshake().await.unwrap();
@ -1124,6 +1304,9 @@ mod tests {
// Spawn a task to perform the client reconnection so we don't deadlock
let task = tokio::spawn(async move { client.reconnect().await.unwrap() });
// Send a version, although it'll be ignored by a reconnecting client
send_server_version!(transport);
// Perform first step of completing server-side of handshake
transport.server_handshake().await.unwrap();
@ -1160,6 +1343,9 @@ mod tests {
// Spawn a task to perform the client reconnection so we don't deadlock
let task = tokio::spawn(async move { client.reconnect().await.unwrap() });
// Send a version, although it'll be ignored by a reconnecting client
send_server_version!(transport);
// Perform first step of completing server-side of handshake
transport.server_handshake().await.unwrap();
@ -1203,6 +1389,9 @@ mod tests {
client
});
// Send a version, although it'll be ignored by a reconnecting client
send_server_version!(transport);
// Perform first step of completing server-side of handshake
transport.server_handshake().await.unwrap();
@ -1273,7 +1462,7 @@ mod tests {
// Spawn a task to perform the server reconnection so we don't deadlock
let task = tokio::spawn(async move {
Connection::server(transport, &verifier, keychain)
Connection::server(transport, &verifier, keychain, server_version!())
.await
.expect("Failed to connect from server")
});

@ -69,11 +69,8 @@ fn parse_scheme(s: &str) -> PResult<&str> {
fn parse_username_password(s: &str) -> PResult<(Option<&str>, Option<&str>)> {
let (auth, remaining) = s.split_once('@').ok_or("Auth missing @")?;
let (auth, username) = maybe(parse_until(|c| !c.is_alphanumeric()))(auth)?;
let (auth, password) = maybe(prefixed(
parse_char(':'),
parse_until(|c| !c.is_alphanumeric()),
))(auth)?;
let (auth, username) = maybe(parse_until(|c| c == ':'))(auth)?;
let (auth, password) = maybe(prefixed(parse_char(':'), |s| Ok(("", s))))(auth)?;
if !auth.is_empty() {
return Err("Dangling characters after username/password");
@ -297,16 +294,6 @@ mod tests {
let _ = parse_username_password("username:password").unwrap_err();
}
#[test]
fn should_fail_if_username_not_alphanumeric() {
let _ = parse_username_password("us\x1bername:password@").unwrap_err();
}
#[test]
fn should_fail_if_password_not_alphanumeric() {
let _ = parse_username_password("username:pas\x1bsword@").unwrap_err();
}
#[test]
fn should_return_username_if_available() {
let (s, username_password) = parse_username_password("username@").unwrap();
@ -331,6 +318,57 @@ mod tests {
assert_eq!(username_password.1, Some("password"));
}
#[test]
fn should_return_username_with_hyphen_and_password() {
let (s, username_password) =
parse_username_password("some-user:password@").unwrap();
assert_eq!(s, "");
assert_eq!(username_password.0, Some("some-user"));
assert_eq!(username_password.1, Some("password"));
}
#[test]
fn should_return_username_password_if_username_starts_or_ends_with_hyphen() {
let (s, username_password) =
parse_username_password("-some-user-:password@").unwrap();
assert_eq!(s, "");
assert_eq!(username_password.0, Some("-some-user-"));
assert_eq!(username_password.1, Some("password"));
}
#[test]
fn should_support_username_with_backslash() {
let (s, username_password) = parse_username_password(r#"orgname\myname@"#).unwrap();
assert_eq!(s, "");
assert_eq!(username_password.0, Some(r#"orgname\myname"#));
assert_eq!(username_password.1, None);
let (s, username_password) =
parse_username_password(r#"orgname\myname:password@"#).unwrap();
assert_eq!(s, "");
assert_eq!(username_password.0, Some(r#"orgname\myname"#));
assert_eq!(username_password.1, Some("password"));
}
#[test]
fn should_support_username_and_password_with_arbitrary_characters() {
let (s, username_password) =
parse_username_password("name1!#$%^&*()[]{{}}\x1b:pass1!#$%^&*()[]{{}}\x1b@")
.unwrap();
assert_eq!(s, "");
assert_eq!(username_password.0, Some("name1!#$%^&*()[]{{}}\x1b"));
assert_eq!(username_password.1, Some("pass1!#$%^&*()[]{{}}\x1b"));
}
#[test]
fn should_support_colons_in_password() {
let (s, username_password) =
parse_username_password("user:name:password@").unwrap();
assert_eq!(s, "");
assert_eq!(username_password.0, Some("user"));
assert_eq!(username_password.1, Some("name:password"));
}
#[test]
fn should_consume_up_to_the_ending_sequence() {
let (s, username_password) =
@ -338,6 +376,18 @@ mod tests {
assert_eq!(s, "example.com");
assert_eq!(username_password.0, Some("username"));
assert_eq!(username_password.1, Some("password"));
let (s, username_password) =
parse_username_password("user@name:password@").unwrap();
assert_eq!(s, "name:password@");
assert_eq!(username_password.0, Some("user"));
assert_eq!(username_password.1, None);
let (s, username_password) =
parse_username_password("username:pass@word@").unwrap();
assert_eq!(s, "word@");
assert_eq!(username_password.0, Some("username"));
assert_eq!(username_password.1, Some("pass"));
}
}
@ -653,6 +703,16 @@ mod tests {
assert_eq!(destination.port, Some(22));
}
#[test]
fn parse_should_succeed_if_given_username_has_hyphen() {
let destination = parse("some-user@example.com:22").unwrap();
assert_eq!(destination.scheme, None);
assert_eq!(destination.username.as_deref(), Some("some-user"));
assert_eq!(destination.password, None);
assert_eq!(destination.host, "example.com");
assert_eq!(destination.port, Some(22));
}
#[test]
fn parse_should_succeed_if_given_password_host_and_port() {
let destination = parse(":password@example.com:22").unwrap();

@ -13,7 +13,6 @@ use crate::common::utils::{deserialize_from_str, serialize_to_str};
/// Contains map information for connections and other use cases
#[derive(Clone, Debug, From, IntoIterator, PartialEq, Eq)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct Map(HashMap<String, String>);
impl Map {
@ -77,13 +76,6 @@ impl Map {
}
}
#[cfg(feature = "schemars")]
impl Map {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(Map)
}
}
impl Default for Map {
fn default() -> Self {
Self::new()

File diff suppressed because it is too large Load Diff

@ -0,0 +1,109 @@
use std::collections::HashMap;
use std::ops::{Deref, DerefMut};
use std::{fmt, io};
use derive_more::IntoIterator;
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use crate::common::{utils, Value};
/// Generates a new [`Header`] of key/value pairs based on literals.
///
/// ```
/// use distant_net::header;
///
/// let _header = header!("key" -> "value", "key2" -> 123);
/// ```
#[macro_export]
macro_rules! header {
($($key:literal -> $value:expr),* $(,)?) => {{
let mut _header = $crate::common::Header::default();
$(
_header.insert($key, $value);
)*
_header
}};
}
/// Represents a packet header comprised of arbitrary data tied to string keys.
#[derive(Clone, Debug, Default, PartialEq, Eq, IntoIterator, Serialize, Deserialize)]
#[serde(transparent)]
pub struct Header(HashMap<String, Value>);
impl Header {
/// Creates an empty [`Header`] newtype wrapper.
pub fn new() -> Self {
Self::default()
}
/// Exists purely to support serde serialization checks.
#[inline]
pub(crate) fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Inserts a key-value pair into the map.
///
/// If the map did not have this key present, [`None`] is returned.
///
/// If the map did have this key present, the value is updated, and the old value is returned.
/// The key is not updated, though; this matters for types that can be `==` without being
/// identical. See the [module-level documentation](std::collections#insert-and-complex-keys)
/// for more.
pub fn insert(&mut self, key: impl Into<String>, value: impl Into<Value>) -> Option<Value> {
self.0.insert(key.into(), value.into())
}
/// Retrieves a value from the header, attempting to convert it to the specified type `T`
/// by cloning the value and then converting it.
pub fn get_as<T>(&self, key: impl AsRef<str>) -> Option<io::Result<T>>
where
T: DeserializeOwned,
{
self.0
.get(key.as_ref())
.map(|value| value.clone().cast_as())
}
/// Serializes the header into bytes.
pub fn to_vec(&self) -> io::Result<Vec<u8>> {
utils::serialize_to_vec(self)
}
/// Deserializes the header from bytes.
pub fn from_slice(slice: &[u8]) -> io::Result<Self> {
utils::deserialize_from_slice(slice)
}
}
impl Deref for Header {
type Target = HashMap<String, Value>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for Header {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl fmt::Display for Header {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{{")?;
for (key, value) in self.0.iter() {
let value = serde_json::to_string(value).unwrap_or_else(|_| String::from("--"));
write!(f, "\"{key}\" = {value}")?;
}
write!(f, "}}")?;
Ok(())
}
}

@ -5,13 +5,17 @@ use derive_more::{Display, Error};
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use super::{parse_msg_pack_str, write_str_msg_pack, Id};
use super::{read_header_bytes, read_key_eq, read_str_bytes, Header, Id};
use crate::common::utils;
use crate::header;
/// Represents a request to send
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct Request<T> {
/// Optional header data to include with request
#[serde(default, skip_serializing_if = "Header::is_empty")]
pub header: Header,
/// Unique id associated with the request
pub id: Id,
@ -20,9 +24,10 @@ pub struct Request<T> {
}
impl<T> Request<T> {
/// Creates a new request with a random, unique id
/// Creates a new request with a random, unique id and no header data
pub fn new(payload: T) -> Self {
Self {
header: header!(),
id: rand::random::<u64>().to_string(),
payload,
}
@ -46,6 +51,11 @@ where
/// Attempts to convert a typed request to an untyped request
pub fn to_untyped_request(&self) -> io::Result<UntypedRequest> {
Ok(UntypedRequest {
header: Cow::Owned(if !self.header.is_empty() {
utils::serialize_to_vec(&self.header)?
} else {
Vec::new()
}),
id: Cow::Borrowed(&self.id),
payload: Cow::Owned(self.to_payload_vec()?),
})
@ -62,13 +72,6 @@ where
}
}
#[cfg(feature = "schemars")]
impl<T: schemars::JsonSchema> Request<T> {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(Request<T>)
}
}
impl<T> From<T> for Request<T> {
fn from(payload: T) -> Self {
Self::new(payload)
@ -81,13 +84,34 @@ pub enum UntypedRequestParseError {
/// When the bytes do not represent a request
WrongType,
/// When a header should be present, but the key is wrong
InvalidHeaderKey,
/// When a header should be present, but the header bytes are wrong
InvalidHeader,
/// When the key for the id is wrong
InvalidIdKey,
/// When the id is not a valid UTF-8 string
InvalidId,
/// When the key for the payload is wrong
InvalidPayloadKey,
}
#[inline]
fn header_is_empty(header: &[u8]) -> bool {
header.is_empty()
}
/// Represents a request to send whose payload is bytes instead of a specific type
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct UntypedRequest<'a> {
/// Header data associated with the request as bytes
#[serde(default, skip_serializing_if = "header_is_empty")]
pub header: Cow<'a, [u8]>,
/// Unique id associated with the request
pub id: Cow<'a, str>,
@ -99,6 +123,11 @@ impl<'a> UntypedRequest<'a> {
/// Attempts to convert an untyped request to a typed request
pub fn to_typed_request<T: DeserializeOwned>(&self) -> io::Result<Request<T>> {
Ok(Request {
header: if header_is_empty(&self.header) {
header!()
} else {
utils::deserialize_from_slice(&self.header)?
},
id: self.id.to_string(),
payload: utils::deserialize_from_slice(&self.payload)?,
})
@ -107,6 +136,10 @@ impl<'a> UntypedRequest<'a> {
/// Convert into a borrowed version
pub fn as_borrowed(&self) -> UntypedRequest<'_> {
UntypedRequest {
header: match &self.header {
Cow::Borrowed(x) => Cow::Borrowed(x),
Cow::Owned(x) => Cow::Borrowed(x.as_slice()),
},
id: match &self.id {
Cow::Borrowed(x) => Cow::Borrowed(x),
Cow::Owned(x) => Cow::Borrowed(x.as_str()),
@ -121,6 +154,10 @@ impl<'a> UntypedRequest<'a> {
/// Convert into an owned version
pub fn into_owned(self) -> UntypedRequest<'static> {
UntypedRequest {
header: match self.header {
Cow::Borrowed(x) => Cow::Owned(x.to_vec()),
Cow::Owned(x) => Cow::Owned(x),
},
id: match self.id {
Cow::Borrowed(x) => Cow::Owned(x.to_string()),
Cow::Owned(x) => Cow::Owned(x),
@ -132,6 +169,11 @@ impl<'a> UntypedRequest<'a> {
}
}
/// Updates the header of the request to the given `header`.
pub fn set_header(&mut self, header: impl IntoIterator<Item = u8>) {
self.header = Cow::Owned(header.into_iter().collect());
}
/// Updates the id of the request to the given `id`.
pub fn set_id(&mut self, id: impl Into<String>) {
self.id = Cow::Owned(id.into());
@ -139,61 +181,80 @@ impl<'a> UntypedRequest<'a> {
/// Allocates a new collection of bytes representing the request.
pub fn to_bytes(&self) -> Vec<u8> {
let mut bytes = vec![0x82];
let mut bytes = vec![];
let has_header = !header_is_empty(&self.header);
if has_header {
rmp::encode::write_map_len(&mut bytes, 3).unwrap();
} else {
rmp::encode::write_map_len(&mut bytes, 2).unwrap();
}
write_str_msg_pack("id", &mut bytes);
write_str_msg_pack(&self.id, &mut bytes);
if has_header {
rmp::encode::write_str(&mut bytes, "header").unwrap();
bytes.extend_from_slice(&self.header);
}
rmp::encode::write_str(&mut bytes, "id").unwrap();
rmp::encode::write_str(&mut bytes, &self.id).unwrap();
write_str_msg_pack("payload", &mut bytes);
rmp::encode::write_str(&mut bytes, "payload").unwrap();
bytes.extend_from_slice(&self.payload);
bytes
}
/// Parses a collection of bytes, returning a partial request if it can be potentially
/// represented as a [`Request`] depending on the payload, or the original bytes if it does not
/// represent a [`Request`]
/// represented as a [`Request`] depending on the payload.
///
/// NOTE: This supports parsing an invalid request where the payload would not properly
/// deserialize, but the bytes themselves represent a complete request of some kind.
pub fn from_slice(input: &'a [u8]) -> Result<Self, UntypedRequestParseError> {
if input.len() < 2 {
if input.is_empty() {
return Err(UntypedRequestParseError::WrongType);
}
// MsgPack marks a fixmap using 0x80 - 0x8f to indicate the size (up to 15 elements).
//
// In the case of the request, there are only two elements: id and payload. So the first
// byte should ALWAYS be 0x82 (130).
if input[0] != 0x82 {
return Err(UntypedRequestParseError::WrongType);
}
let has_header = match rmp::Marker::from_u8(input[0]) {
rmp::Marker::FixMap(2) => false,
rmp::Marker::FixMap(3) => true,
_ => return Err(UntypedRequestParseError::WrongType),
};
// Skip the first byte representing the fixmap
// Advance position by marker
let input = &input[1..];
// Validate that first field is id
let (input, id_key) =
parse_msg_pack_str(input).map_err(|_| UntypedRequestParseError::WrongType)?;
if id_key != "id" {
return Err(UntypedRequestParseError::WrongType);
}
// Parse the header if we have one
let (header, input) = if has_header {
let (_, input) = read_key_eq(input, "header")
.map_err(|_| UntypedRequestParseError::InvalidHeaderKey)?;
let (header, input) =
read_header_bytes(input).map_err(|_| UntypedRequestParseError::InvalidHeader)?;
(header, input)
} else {
([0u8; 0].as_slice(), input)
};
// Validate that next field is id
let (_, input) =
read_key_eq(input, "id").map_err(|_| UntypedRequestParseError::InvalidIdKey)?;
// Get the id itself
let (input, id) =
parse_msg_pack_str(input).map_err(|_| UntypedRequestParseError::InvalidId)?;
let (id, input) = read_str_bytes(input).map_err(|_| UntypedRequestParseError::InvalidId)?;
// Validate that second field is payload
let (input, payload_key) =
parse_msg_pack_str(input).map_err(|_| UntypedRequestParseError::WrongType)?;
if payload_key != "payload" {
return Err(UntypedRequestParseError::WrongType);
}
// Validate that final field is payload
let (_, input) = read_key_eq(input, "payload")
.map_err(|_| UntypedRequestParseError::InvalidPayloadKey)?;
let header = Cow::Borrowed(header);
let id = Cow::Borrowed(id);
let payload = Cow::Borrowed(input);
Ok(Self { id, payload })
Ok(Self {
header,
id,
payload,
})
}
}
@ -206,18 +267,47 @@ mod tests {
const TRUE_BYTE: u8 = 0xc3;
const NEVER_USED_BYTE: u8 = 0xc1;
// fixstr of 6 bytes with str "header"
const HEADER_FIELD_BYTES: &[u8] = &[0xa6, b'h', b'e', b'a', b'd', b'e', b'r'];
// fixmap of 2 objects with
// 1. key fixstr "key" and value fixstr "value"
// 1. key fixstr "num" and value fixint 123
const HEADER_BYTES: &[u8] = &[
0x82, // valid map with 2 pair
0xa3, b'k', b'e', b'y', // key: "key"
0xa5, b'v', b'a', b'l', b'u', b'e', // value: "value"
0xa3, b'n', b'u', b'm', // key: "num"
0x7b, // value: 123
];
// fixstr of 2 bytes with str "id"
const ID_FIELD_BYTES: &[u8] = &[0xa2, 0x69, 0x64];
const ID_FIELD_BYTES: &[u8] = &[0xa2, b'i', b'd'];
// fixstr of 7 bytes with str "payload"
const PAYLOAD_FIELD_BYTES: &[u8] = &[0xa7, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64];
const PAYLOAD_FIELD_BYTES: &[u8] = &[0xa7, b'p', b'a', b'y', b'l', b'o', b'a', b'd'];
/// fixstr of 4 bytes with str "test"
const TEST_STR_BYTES: &[u8] = &[0xa4, 0x74, 0x65, 0x73, 0x74];
// fixstr of 4 bytes with str "test"
const TEST_STR_BYTES: &[u8] = &[0xa4, b't', b'e', b's', b't'];
#[test]
fn untyped_request_should_support_converting_to_bytes() {
let bytes = Request {
header: header!(),
id: "some id".to_string(),
payload: true,
}
.to_vec()
.unwrap();
let untyped_request = UntypedRequest::from_slice(&bytes).unwrap();
assert_eq!(untyped_request.to_bytes(), bytes);
}
#[test]
fn untyped_request_should_support_converting_to_bytes_with_header() {
let bytes = Request {
header: header!("key" -> 123),
id: "some id".to_string(),
payload: true,
}
@ -228,9 +318,30 @@ mod tests {
assert_eq!(untyped_request.to_bytes(), bytes);
}
#[test]
fn untyped_request_should_support_parsing_from_request_bytes_with_header() {
let bytes = Request {
header: header!("key" -> 123),
id: "some id".to_string(),
payload: true,
}
.to_vec()
.unwrap();
assert_eq!(
UntypedRequest::from_slice(&bytes),
Ok(UntypedRequest {
header: Cow::Owned(utils::serialize_to_vec(&header!("key" -> 123)).unwrap()),
id: Cow::Borrowed("some id"),
payload: Cow::Owned(vec![TRUE_BYTE]),
})
);
}
#[test]
fn untyped_request_should_support_parsing_from_request_bytes_with_valid_payload() {
let bytes = Request {
header: header!(),
id: "some id".to_string(),
payload: true,
}
@ -240,6 +351,7 @@ mod tests {
assert_eq!(
UntypedRequest::from_slice(&bytes),
Ok(UntypedRequest {
header: Cow::Owned(vec![]),
id: Cow::Borrowed("some id"),
payload: Cow::Owned(vec![TRUE_BYTE]),
})
@ -250,6 +362,7 @@ mod tests {
fn untyped_request_should_support_parsing_from_request_bytes_with_invalid_payload() {
// Request with id < 32 bytes
let mut bytes = Request {
header: header!(),
id: "".to_string(),
payload: true,
}
@ -263,12 +376,35 @@ mod tests {
assert_eq!(
UntypedRequest::from_slice(&bytes),
Ok(UntypedRequest {
header: Cow::Owned(vec![]),
id: Cow::Owned("".to_string()),
payload: Cow::Owned(vec![TRUE_BYTE, NEVER_USED_BYTE]),
})
);
}
#[test]
fn untyped_request_should_support_parsing_full_request() {
let input = [
&[0x83],
HEADER_FIELD_BYTES,
HEADER_BYTES,
ID_FIELD_BYTES,
TEST_STR_BYTES,
PAYLOAD_FIELD_BYTES,
&[TRUE_BYTE],
]
.concat();
// Convert into typed so we can test
let untyped_request = UntypedRequest::from_slice(&input).unwrap();
let request: Request<bool> = untyped_request.to_typed_request().unwrap();
assert_eq!(request.header, header!("key" -> "value", "num" -> 123));
assert_eq!(request.id, "test");
assert!(request.payload);
}
#[test]
fn untyped_request_should_fail_to_parse_if_given_bytes_not_representing_a_request() {
// Empty byte slice
@ -289,10 +425,46 @@ mod tests {
Err(UntypedRequestParseError::WrongType)
);
// Invalid header key
assert_eq!(
UntypedRequest::from_slice(
[
&[0x83],
&[0xa0], // header key would be defined here, set to empty str
HEADER_BYTES,
ID_FIELD_BYTES,
TEST_STR_BYTES,
PAYLOAD_FIELD_BYTES,
&[TRUE_BYTE],
]
.concat()
.as_slice()
),
Err(UntypedRequestParseError::InvalidHeaderKey)
);
// Invalid header bytes
assert_eq!(
UntypedRequest::from_slice(
[
&[0x83],
HEADER_FIELD_BYTES,
&[0xa0], // header would be defined here, set to empty str
ID_FIELD_BYTES,
TEST_STR_BYTES,
PAYLOAD_FIELD_BYTES,
&[TRUE_BYTE],
]
.concat()
.as_slice()
),
Err(UntypedRequestParseError::InvalidHeader)
);
// Missing fields (corrupt data)
assert_eq!(
UntypedRequest::from_slice(&[0x82]),
Err(UntypedRequestParseError::WrongType)
Err(UntypedRequestParseError::InvalidIdKey)
);
// Missing id field (has valid data itself)
@ -308,7 +480,7 @@ mod tests {
.concat()
.as_slice()
),
Err(UntypedRequestParseError::WrongType)
Err(UntypedRequestParseError::InvalidIdKey)
);
// Non-str id field value
@ -356,7 +528,7 @@ mod tests {
.concat()
.as_slice()
),
Err(UntypedRequestParseError::WrongType)
Err(UntypedRequestParseError::InvalidPayloadKey)
);
}
}

@ -5,13 +5,17 @@ use derive_more::{Display, Error};
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use super::{parse_msg_pack_str, write_str_msg_pack, Id};
use super::{read_header_bytes, read_key_eq, read_str_bytes, Header, Id};
use crate::common::utils;
use crate::header;
/// Represents a response received related to some response
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct Response<T> {
/// Optional header data to include with response
#[serde(default, skip_serializing_if = "Header::is_empty")]
pub header: Header,
/// Unique id associated with the response
pub id: Id,
@ -23,9 +27,10 @@ pub struct Response<T> {
}
impl<T> Response<T> {
/// Creates a new response with a random, unique id
/// Creates a new response with a random, unique id and no header data
pub fn new(origin_id: Id, payload: T) -> Self {
Self {
header: header!(),
id: rand::random::<u64>().to_string(),
origin_id,
payload,
@ -50,6 +55,11 @@ where
/// Attempts to convert a typed response to an untyped response
pub fn to_untyped_response(&self) -> io::Result<UntypedResponse> {
Ok(UntypedResponse {
header: Cow::Owned(if !self.header.is_empty() {
utils::serialize_to_vec(&self.header)?
} else {
Vec::new()
}),
id: Cow::Borrowed(&self.id),
origin_id: Cow::Borrowed(&self.origin_id),
payload: Cow::Owned(self.to_payload_vec()?),
@ -67,29 +77,46 @@ where
}
}
#[cfg(feature = "schemars")]
impl<T: schemars::JsonSchema> Response<T> {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(Response<T>)
}
}
/// Error encountered when attempting to parse bytes as an untyped response
#[derive(Copy, Clone, Debug, Display, Error, PartialEq, Eq, Hash)]
pub enum UntypedResponseParseError {
/// When the bytes do not represent a response
WrongType,
/// When a header should be present, but the key is wrong
InvalidHeaderKey,
/// When a header should be present, but the header bytes are wrong
InvalidHeader,
/// When the key for the id is wrong
InvalidIdKey,
/// When the id is not a valid UTF-8 string
InvalidId,
/// When the key for the origin id is wrong
InvalidOriginIdKey,
/// When the origin id is not a valid UTF-8 string
InvalidOriginId,
/// When the key for the payload is wrong
InvalidPayloadKey,
}
#[inline]
fn header_is_empty(header: &[u8]) -> bool {
header.is_empty()
}
/// Represents a response to send whose payload is bytes instead of a specific type
#[derive(Clone, Debug, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
pub struct UntypedResponse<'a> {
/// Header data associated with the response as bytes
#[serde(default, skip_serializing_if = "header_is_empty")]
pub header: Cow<'a, [u8]>,
/// Unique id associated with the response
pub id: Cow<'a, str>,
@ -101,9 +128,14 @@ pub struct UntypedResponse<'a> {
}
impl<'a> UntypedResponse<'a> {
/// Attempts to convert an untyped request to a typed request
/// Attempts to convert an untyped response to a typed response
pub fn to_typed_response<T: DeserializeOwned>(&self) -> io::Result<Response<T>> {
Ok(Response {
header: if header_is_empty(&self.header) {
header!()
} else {
utils::deserialize_from_slice(&self.header)?
},
id: self.id.to_string(),
origin_id: self.origin_id.to_string(),
payload: utils::deserialize_from_slice(&self.payload)?,
@ -113,6 +145,10 @@ impl<'a> UntypedResponse<'a> {
/// Convert into a borrowed version
pub fn as_borrowed(&self) -> UntypedResponse<'_> {
UntypedResponse {
header: match &self.header {
Cow::Borrowed(x) => Cow::Borrowed(x),
Cow::Owned(x) => Cow::Borrowed(x.as_slice()),
},
id: match &self.id {
Cow::Borrowed(x) => Cow::Borrowed(x),
Cow::Owned(x) => Cow::Borrowed(x.as_str()),
@ -131,6 +167,10 @@ impl<'a> UntypedResponse<'a> {
/// Convert into an owned version
pub fn into_owned(self) -> UntypedResponse<'static> {
UntypedResponse {
header: match self.header {
Cow::Borrowed(x) => Cow::Owned(x.to_vec()),
Cow::Owned(x) => Cow::Owned(x),
},
id: match self.id {
Cow::Borrowed(x) => Cow::Owned(x.to_string()),
Cow::Owned(x) => Cow::Owned(x),
@ -146,6 +186,11 @@ impl<'a> UntypedResponse<'a> {
}
}
/// Updates the header of the response to the given `header`.
pub fn set_header(&mut self, header: impl IntoIterator<Item = u8>) {
self.header = Cow::Owned(header.into_iter().collect());
}
/// Updates the id of the response to the given `id`.
pub fn set_id(&mut self, id: impl Into<String>) {
self.id = Cow::Owned(id.into());
@ -158,76 +203,90 @@ impl<'a> UntypedResponse<'a> {
/// Allocates a new collection of bytes representing the response.
pub fn to_bytes(&self) -> Vec<u8> {
let mut bytes = vec![0x83];
let mut bytes = vec![];
write_str_msg_pack("id", &mut bytes);
write_str_msg_pack(&self.id, &mut bytes);
let has_header = !header_is_empty(&self.header);
if has_header {
rmp::encode::write_map_len(&mut bytes, 4).unwrap();
} else {
rmp::encode::write_map_len(&mut bytes, 3).unwrap();
}
if has_header {
rmp::encode::write_str(&mut bytes, "header").unwrap();
bytes.extend_from_slice(&self.header);
}
write_str_msg_pack("origin_id", &mut bytes);
write_str_msg_pack(&self.origin_id, &mut bytes);
rmp::encode::write_str(&mut bytes, "id").unwrap();
rmp::encode::write_str(&mut bytes, &self.id).unwrap();
write_str_msg_pack("payload", &mut bytes);
rmp::encode::write_str(&mut bytes, "origin_id").unwrap();
rmp::encode::write_str(&mut bytes, &self.origin_id).unwrap();
rmp::encode::write_str(&mut bytes, "payload").unwrap();
bytes.extend_from_slice(&self.payload);
bytes
}
/// Parses a collection of bytes, returning an untyped response if it can be potentially
/// represented as a [`Response`] depending on the payload, or the original bytes if it does not
/// represent a [`Response`].
/// represented as a [`Response`] depending on the payload.
///
/// NOTE: This supports parsing an invalid response where the payload would not properly
/// deserialize, but the bytes themselves represent a complete response of some kind.
pub fn from_slice(input: &'a [u8]) -> Result<Self, UntypedResponseParseError> {
if input.len() < 2 {
if input.is_empty() {
return Err(UntypedResponseParseError::WrongType);
}
// MsgPack marks a fixmap using 0x80 - 0x8f to indicate the size (up to 15 elements).
//
// In the case of the request, there are only three elements: id, origin_id, and payload.
// So the first byte should ALWAYS be 0x83 (131).
if input[0] != 0x83 {
return Err(UntypedResponseParseError::WrongType);
}
let has_header = match rmp::Marker::from_u8(input[0]) {
rmp::Marker::FixMap(3) => false,
rmp::Marker::FixMap(4) => true,
_ => return Err(UntypedResponseParseError::WrongType),
};
// Skip the first byte representing the fixmap
// Advance position by marker
let input = &input[1..];
// Validate that first field is id
let (input, id_key) =
parse_msg_pack_str(input).map_err(|_| UntypedResponseParseError::WrongType)?;
if id_key != "id" {
return Err(UntypedResponseParseError::WrongType);
}
// Parse the header if we have one
let (header, input) = if has_header {
let (_, input) = read_key_eq(input, "header")
.map_err(|_| UntypedResponseParseError::InvalidHeaderKey)?;
let (header, input) =
read_header_bytes(input).map_err(|_| UntypedResponseParseError::InvalidHeader)?;
(header, input)
} else {
([0u8; 0].as_slice(), input)
};
// Validate that next field is id
let (_, input) =
read_key_eq(input, "id").map_err(|_| UntypedResponseParseError::InvalidIdKey)?;
// Get the id itself
let (input, id) =
parse_msg_pack_str(input).map_err(|_| UntypedResponseParseError::InvalidId)?;
let (id, input) =
read_str_bytes(input).map_err(|_| UntypedResponseParseError::InvalidId)?;
// Validate that second field is origin_id
let (input, origin_id_key) =
parse_msg_pack_str(input).map_err(|_| UntypedResponseParseError::WrongType)?;
if origin_id_key != "origin_id" {
return Err(UntypedResponseParseError::WrongType);
}
// Validate that next field is origin_id
let (_, input) = read_key_eq(input, "origin_id")
.map_err(|_| UntypedResponseParseError::InvalidOriginIdKey)?;
// Get the origin_id itself
let (input, origin_id) =
parse_msg_pack_str(input).map_err(|_| UntypedResponseParseError::InvalidOriginId)?;
let (origin_id, input) =
read_str_bytes(input).map_err(|_| UntypedResponseParseError::InvalidOriginId)?;
// Validate that second field is payload
let (input, payload_key) =
parse_msg_pack_str(input).map_err(|_| UntypedResponseParseError::WrongType)?;
if payload_key != "payload" {
return Err(UntypedResponseParseError::WrongType);
}
// Validate that final field is payload
let (_, input) = read_key_eq(input, "payload")
.map_err(|_| UntypedResponseParseError::InvalidPayloadKey)?;
let header = Cow::Borrowed(header);
let id = Cow::Borrowed(id);
let origin_id = Cow::Borrowed(origin_id);
let payload = Cow::Borrowed(input);
Ok(Self {
header,
id,
origin_id,
payload,
@ -244,22 +303,52 @@ mod tests {
const TRUE_BYTE: u8 = 0xc3;
const NEVER_USED_BYTE: u8 = 0xc1;
// fixstr of 6 bytes with str "header"
const HEADER_FIELD_BYTES: &[u8] = &[0xa6, b'h', b'e', b'a', b'd', b'e', b'r'];
// fixmap of 2 objects with
// 1. key fixstr "key" and value fixstr "value"
// 1. key fixstr "num" and value fixint 123
const HEADER_BYTES: &[u8] = &[
0x82, // valid map with 2 pair
0xa3, b'k', b'e', b'y', // key: "key"
0xa5, b'v', b'a', b'l', b'u', b'e', // value: "value"
0xa3, b'n', b'u', b'm', // key: "num"
0x7b, // value: 123
];
// fixstr of 2 bytes with str "id"
const ID_FIELD_BYTES: &[u8] = &[0xa2, 0x69, 0x64];
const ID_FIELD_BYTES: &[u8] = &[0xa2, b'i', b'd'];
// fixstr of 9 bytes with str "origin_id"
const ORIGIN_ID_FIELD_BYTES: &[u8] =
&[0xa9, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x69, 0x64];
// fixstr of 7 bytes with str "payload"
const PAYLOAD_FIELD_BYTES: &[u8] = &[0xa7, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64];
const PAYLOAD_FIELD_BYTES: &[u8] = &[0xa7, b'p', b'a', b'y', b'l', b'o', b'a', b'd'];
/// fixstr of 4 bytes with str "test"
const TEST_STR_BYTES: &[u8] = &[0xa4, 0x74, 0x65, 0x73, 0x74];
const TEST_STR_BYTES: &[u8] = &[0xa4, b't', b'e', b's', b't'];
#[test]
fn untyped_response_should_support_converting_to_bytes() {
let bytes = Response {
header: header!(),
id: "some id".to_string(),
origin_id: "some origin id".to_string(),
payload: true,
}
.to_vec()
.unwrap();
let untyped_response = UntypedResponse::from_slice(&bytes).unwrap();
assert_eq!(untyped_response.to_bytes(), bytes);
}
#[test]
fn untyped_response_should_support_converting_to_bytes_with_header() {
let bytes = Response {
header: header!("key" -> 123),
id: "some id".to_string(),
origin_id: "some origin id".to_string(),
payload: true,
@ -271,9 +360,32 @@ mod tests {
assert_eq!(untyped_response.to_bytes(), bytes);
}
#[test]
fn untyped_response_should_support_parsing_from_response_bytes_with_header() {
let bytes = Response {
header: header!("key" -> 123),
id: "some id".to_string(),
origin_id: "some origin id".to_string(),
payload: true,
}
.to_vec()
.unwrap();
assert_eq!(
UntypedResponse::from_slice(&bytes),
Ok(UntypedResponse {
header: Cow::Owned(utils::serialize_to_vec(&header!("key" -> 123)).unwrap()),
id: Cow::Borrowed("some id"),
origin_id: Cow::Borrowed("some origin id"),
payload: Cow::Owned(vec![TRUE_BYTE]),
})
);
}
#[test]
fn untyped_response_should_support_parsing_from_response_bytes_with_valid_payload() {
let bytes = Response {
header: header!(),
id: "some id".to_string(),
origin_id: "some origin id".to_string(),
payload: true,
@ -284,6 +396,7 @@ mod tests {
assert_eq!(
UntypedResponse::from_slice(&bytes),
Ok(UntypedResponse {
header: Cow::Owned(vec![]),
id: Cow::Borrowed("some id"),
origin_id: Cow::Borrowed("some origin id"),
payload: Cow::Owned(vec![TRUE_BYTE]),
@ -295,6 +408,7 @@ mod tests {
fn untyped_response_should_support_parsing_from_response_bytes_with_invalid_payload() {
// Response with id < 32 bytes
let mut bytes = Response {
header: header!(),
id: "".to_string(),
origin_id: "".to_string(),
payload: true,
@ -309,6 +423,7 @@ mod tests {
assert_eq!(
UntypedResponse::from_slice(&bytes),
Ok(UntypedResponse {
header: Cow::Owned(vec![]),
id: Cow::Owned("".to_string()),
origin_id: Cow::Owned("".to_string()),
payload: Cow::Owned(vec![TRUE_BYTE, NEVER_USED_BYTE]),
@ -316,6 +431,31 @@ mod tests {
);
}
#[test]
fn untyped_response_should_support_parsing_full_request() {
let input = [
&[0x84],
HEADER_FIELD_BYTES,
HEADER_BYTES,
ID_FIELD_BYTES,
TEST_STR_BYTES,
ORIGIN_ID_FIELD_BYTES,
&[0xa2, b'o', b'g'],
PAYLOAD_FIELD_BYTES,
&[TRUE_BYTE],
]
.concat();
// Convert into typed so we can test
let untyped_response = UntypedResponse::from_slice(&input).unwrap();
let response: Response<bool> = untyped_response.to_typed_response().unwrap();
assert_eq!(response.header, header!("key" -> "value", "num" -> 123));
assert_eq!(response.id, "test");
assert_eq!(response.origin_id, "og");
assert!(response.payload);
}
#[test]
fn untyped_response_should_fail_to_parse_if_given_bytes_not_representing_a_response() {
// Empty byte slice
@ -336,10 +476,50 @@ mod tests {
Err(UntypedResponseParseError::WrongType)
);
// Invalid header key
assert_eq!(
UntypedResponse::from_slice(
[
&[0x84],
&[0xa0], // header key would be defined here, set to empty str
HEADER_BYTES,
ID_FIELD_BYTES,
TEST_STR_BYTES,
ORIGIN_ID_FIELD_BYTES,
TEST_STR_BYTES,
PAYLOAD_FIELD_BYTES,
&[TRUE_BYTE],
]
.concat()
.as_slice()
),
Err(UntypedResponseParseError::InvalidHeaderKey)
);
// Invalid header bytes
assert_eq!(
UntypedResponse::from_slice(
[
&[0x84],
HEADER_FIELD_BYTES,
&[0xa0], // header would be defined here, set to empty str
ID_FIELD_BYTES,
TEST_STR_BYTES,
ORIGIN_ID_FIELD_BYTES,
TEST_STR_BYTES,
PAYLOAD_FIELD_BYTES,
&[TRUE_BYTE],
]
.concat()
.as_slice()
),
Err(UntypedResponseParseError::InvalidHeader)
);
// Missing fields (corrupt data)
assert_eq!(
UntypedResponse::from_slice(&[0x83]),
Err(UntypedResponseParseError::WrongType)
Err(UntypedResponseParseError::InvalidIdKey)
);
// Missing id field (has valid data itself)
@ -357,7 +537,7 @@ mod tests {
.concat()
.as_slice()
),
Err(UntypedResponseParseError::WrongType)
Err(UntypedResponseParseError::InvalidIdKey)
);
// Non-str id field value
@ -411,7 +591,7 @@ mod tests {
.concat()
.as_slice()
),
Err(UntypedResponseParseError::WrongType)
Err(UntypedResponseParseError::InvalidOriginIdKey)
);
// Non-str origin_id field value
@ -465,7 +645,7 @@ mod tests {
.concat()
.as_slice()
),
Err(UntypedResponseParseError::WrongType)
Err(UntypedResponseParseError::InvalidPayloadKey)
);
}
}

@ -0,0 +1,112 @@
use std::borrow::Cow;
use std::io;
use std::ops::{Deref, DerefMut};
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use crate::common::utils;
/// Generic value type for data passed through header.
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(transparent)]
pub struct Value(serde_json::Value);
impl Value {
/// Creates a new [`Value`] by converting `value` to the underlying type.
pub fn new(value: impl Into<serde_json::Value>) -> Self {
Self(value.into())
}
/// Serializes the value into bytes.
pub fn to_vec(&self) -> io::Result<Vec<u8>> {
utils::serialize_to_vec(self)
}
/// Deserializes the value from bytes.
pub fn from_slice(slice: &[u8]) -> io::Result<Self> {
utils::deserialize_from_slice(slice)
}
/// Attempts to convert this generic value to a specific type.
pub fn cast_as<T>(self) -> io::Result<T>
where
T: DeserializeOwned,
{
serde_json::from_value(self.0).map_err(|x| io::Error::new(io::ErrorKind::InvalidData, x))
}
}
impl Deref for Value {
type Target = serde_json::Value;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for Value {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
macro_rules! impl_from {
($($type:ty),+) => {
$(
impl From<$type> for Value {
fn from(x: $type) -> Self {
Self(From::from(x))
}
}
)+
};
}
impl_from!(
(),
i8, i16, i32, i64, isize,
u8, u16, u32, u64, usize,
f32, f64,
bool, String, serde_json::Number,
serde_json::Map<String, serde_json::Value>
);
impl<'a, T> From<&'a [T]> for Value
where
T: Clone + Into<serde_json::Value>,
{
fn from(x: &'a [T]) -> Self {
Self(From::from(x))
}
}
impl<'a> From<&'a str> for Value {
fn from(x: &'a str) -> Self {
Self(From::from(x))
}
}
impl<'a> From<Cow<'a, str>> for Value {
fn from(x: Cow<'a, str>) -> Self {
Self(From::from(x))
}
}
impl<T> From<Option<T>> for Value
where
T: Into<serde_json::Value>,
{
fn from(x: Option<T>) -> Self {
Self(From::from(x))
}
}
impl<T> From<Vec<T>> for Value
where
T: Into<serde_json::Value>,
{
fn from(x: Vec<T>) -> Self {
Self(From::from(x))
}
}

@ -9,7 +9,7 @@ use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use super::{InmemoryTransport, Interest, Ready, Reconnectable, Transport};
use crate::common::utils;
use crate::common::{utils, SecretKey32};
mod backup;
mod codec;

@ -3,9 +3,7 @@ use std::{fmt, io};
use derive_more::Display;
use super::{Codec, Frame};
mod key;
pub use key::*;
use crate::common::{SecretKey, SecretKey32};
/// Represents the type of encryption for a [`EncryptionCodec`]
#[derive(

@ -8,7 +8,7 @@ pub struct PlainCodec;
impl PlainCodec {
pub fn new() -> Self {
Self::default()
Self
}
}

@ -6,7 +6,7 @@ use p256::PublicKey;
use rand::rngs::OsRng;
use sha2::Sha256;
use super::SecretKey32;
use crate::common::SecretKey32;
mod pkb;
pub use pkb::PublicKeyBytes;

@ -0,0 +1,132 @@
use semver::{Comparator, Op, Prerelease, Version as SemVer};
use std::fmt;
/// Represents a version and compatibility rules.
#[derive(Clone, Debug)]
pub struct Version {
inner: SemVer,
lower: Comparator,
upper: Comparator,
}
impl Version {
/// Creates a new version in the form `major.minor.patch` with a ruleset that is used to check
/// other versions such that `>=0.1.2, <0.2.0` or `>=1.2.3, <2` depending on whether or not the
/// major version is `0`.
///
/// ```
/// use distant_net::common::Version;
///
/// // Matching versions are compatible
/// let a = Version::new(1, 2, 3);
/// let b = Version::new(1, 2, 3);
/// assert!(a.is_compatible_with(&b));
///
/// // Version 1.2.3 is compatible with 1.2.4, but not the other way
/// let a = Version::new(1, 2, 3);
/// let b = Version::new(1, 2, 4);
/// assert!(a.is_compatible_with(&b));
/// assert!(!b.is_compatible_with(&a));
///
/// // Version 1.2.3 is compatible with 1.3.0, but not 2
/// let a = Version::new(1, 2, 3);
/// assert!(a.is_compatible_with(&Version::new(1, 3, 0)));
/// assert!(!a.is_compatible_with(&Version::new(2, 0, 0)));
///
/// // Version 0.1.2 is compatible with 0.1.3, but not the other way
/// let a = Version::new(0, 1, 2);
/// let b = Version::new(0, 1, 3);
/// assert!(a.is_compatible_with(&b));
/// assert!(!b.is_compatible_with(&a));
///
/// // Version 0.1.2 is not compatible with 0.2
/// let a = Version::new(0, 1, 2);
/// let b = Version::new(0, 2, 0);
/// assert!(!a.is_compatible_with(&b));
/// assert!(!b.is_compatible_with(&a));
/// ```
pub const fn new(major: u64, minor: u64, patch: u64) -> Self {
Self {
inner: SemVer::new(major, minor, patch),
lower: Comparator {
op: Op::GreaterEq,
major,
minor: Some(minor),
patch: Some(patch),
pre: Prerelease::EMPTY,
},
upper: Comparator {
op: Op::Less,
major: if major == 0 { 0 } else { major + 1 },
minor: if major == 0 { Some(minor + 1) } else { None },
patch: None,
pre: Prerelease::EMPTY,
},
}
}
/// Returns true if this version is compatible with another version.
pub fn is_compatible_with(&self, other: &Self) -> bool {
self.lower.matches(&other.inner) && self.upper.matches(&other.inner)
}
/// Converts from a collection of bytes into a version using the byte form major/minor/patch
/// using big endian.
pub const fn from_be_bytes(bytes: [u8; 24]) -> Self {
Self::new(
u64::from_be_bytes([
bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7],
]),
u64::from_be_bytes([
bytes[8], bytes[9], bytes[10], bytes[11], bytes[12], bytes[13], bytes[14],
bytes[15],
]),
u64::from_be_bytes([
bytes[16], bytes[17], bytes[18], bytes[19], bytes[20], bytes[21], bytes[22],
bytes[23],
]),
)
}
/// Converts the version into a byte form of major/minor/patch using big endian.
pub const fn to_be_bytes(&self) -> [u8; 24] {
let major = self.inner.major.to_be_bytes();
let minor = self.inner.minor.to_be_bytes();
let patch = self.inner.patch.to_be_bytes();
[
major[0], major[1], major[2], major[3], major[4], major[5], major[6], major[7],
minor[0], minor[1], minor[2], minor[3], minor[4], minor[5], minor[6], minor[7],
patch[0], patch[1], patch[2], patch[3], patch[4], patch[5], patch[6], patch[7],
]
}
}
impl Default for Version {
/// Default version is `0.0.0`.
fn default() -> Self {
Self::new(0, 0, 0)
}
}
impl fmt::Display for Version {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.inner)
}
}
impl From<semver::Version> for Version {
/// Creates a new [`Version`] using the major, minor, and patch information from
/// [`semver::Version`].
fn from(version: semver::Version) -> Self {
let mut this = Self::new(version.major, version.minor, version.patch);
this.inner = version;
this
}
}
impl From<Version> for semver::Version {
fn from(version: Version) -> Self {
version.inner
}
}

@ -1,8 +1,17 @@
#![doc = include_str!("../README.md")]
#[doc = include_str!("../README.md")]
#[cfg(doctest)]
pub struct ReadmeDoctests;
mod authentication;
pub mod client;
pub mod common;
pub mod manager;
pub mod server;
pub use client::{Client, ReconnectStrategy};
/// Authentication functionality tied to network operations.
pub use distant_auth as auth;
pub use server::Server;
pub use {log, paste};

@ -5,3 +5,12 @@ mod server;
pub use client::*;
pub use data::*;
pub use server::*;
use crate::common::Version;
/// Represents the version associated with the manager's protocol.
pub const PROTOCOL_VERSION: Version = Version::new(
const_str::parse!(env!("CARGO_PKG_VERSION_MAJOR"), u64),
const_str::parse!(env!("CARGO_PKG_VERSION_MINOR"), u64),
const_str::parse!(env!("CARGO_PKG_VERSION_PATCH"), u64),
);

@ -1,13 +1,13 @@
use std::io;
use distant_auth::msg::{Authentication, AuthenticationResponse};
use distant_auth::AuthHandler;
use log::*;
use crate::client::Client;
use crate::common::authentication::msg::{Authentication, AuthenticationResponse};
use crate::common::authentication::AuthHandler;
use crate::common::{ConnectionId, Destination, Map, Request};
use crate::manager::data::{
ConnectionInfo, ConnectionList, ManagerCapabilities, ManagerRequest, ManagerResponse,
ConnectionInfo, ConnectionList, ManagerRequest, ManagerResponse, SemVer,
};
mod channel;
@ -231,12 +231,12 @@ impl ManagerClient {
RawChannel::spawn(connection_id, self).await
}
/// Retrieves a list of supported capabilities
pub async fn capabilities(&mut self) -> io::Result<ManagerCapabilities> {
trace!("capabilities()");
let res = self.send(ManagerRequest::Capabilities).await?;
/// Retrieves the version of the manager.
pub async fn version(&mut self) -> io::Result<SemVer> {
trace!("version()");
let res = self.send(ManagerRequest::Version).await?;
match res.payload {
ManagerResponse::Capabilities { supported } => Ok(supported),
ManagerResponse::Version { version } => Ok(version),
ManagerResponse::Error { description } => {
Err(io::Error::new(io::ErrorKind::Other, description))
}
@ -298,9 +298,10 @@ impl ManagerClient {
#[cfg(test)]
mod tests {
use distant_auth::DummyAuthHandler;
use super::*;
use crate::client::UntypedClient;
use crate::common::authentication::DummyAuthHandler;
use crate::common::{Connection, InmemoryTransport, Request, Response};
fn setup() -> (ManagerClient, Connection<InmemoryTransport>) {

@ -1,8 +1,6 @@
pub type ManagerChannelId = u32;
pub type ManagerAuthenticationId = u32;
mod capabilities;
pub use capabilities::*;
pub use semver::Version as SemVer;
mod info;
pub use info::*;

@ -1,212 +0,0 @@
use std::cmp::Ordering;
use std::collections::HashSet;
use std::hash::{Hash, Hasher};
use std::ops::{BitAnd, BitOr, BitXor};
use std::str::FromStr;
use derive_more::{From, Into, IntoIterator};
use serde::{Deserialize, Serialize};
use strum::{EnumMessage, IntoEnumIterator};
use super::ManagerCapabilityKind;
/// Set of supported capabilities for a manager
#[derive(Clone, Debug, From, Into, PartialEq, Eq, IntoIterator, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(transparent)]
pub struct ManagerCapabilities(#[into_iterator(owned, ref)] HashSet<ManagerCapability>);
impl ManagerCapabilities {
/// Return set of capabilities encompassing all possible capabilities
pub fn all() -> Self {
Self(
ManagerCapabilityKind::iter()
.map(ManagerCapability::from)
.collect(),
)
}
/// Return empty set of capabilities
pub fn none() -> Self {
Self(HashSet::new())
}
/// Returns true if the capability with described kind is included
pub fn contains(&self, kind: impl AsRef<str>) -> bool {
let cap = ManagerCapability {
kind: kind.as_ref().to_string(),
description: String::new(),
};
self.0.contains(&cap)
}
/// Adds the specified capability to the set of capabilities
///
/// * If the set did not have this capability, returns `true`
/// * If the set did have this capability, returns `false`
pub fn insert(&mut self, cap: impl Into<ManagerCapability>) -> bool {
self.0.insert(cap.into())
}
/// Removes the capability with the described kind, returning the capability
pub fn take(&mut self, kind: impl AsRef<str>) -> Option<ManagerCapability> {
let cap = ManagerCapability {
kind: kind.as_ref().to_string(),
description: String::new(),
};
self.0.take(&cap)
}
/// Removes the capability with the described kind, returning true if it existed
pub fn remove(&mut self, kind: impl AsRef<str>) -> bool {
let cap = ManagerCapability {
kind: kind.as_ref().to_string(),
description: String::new(),
};
self.0.remove(&cap)
}
/// Converts into vec of capabilities sorted by kind
pub fn into_sorted_vec(self) -> Vec<ManagerCapability> {
let mut this = self.0.into_iter().collect::<Vec<_>>();
this.sort_unstable();
this
}
}
#[cfg(feature = "schemars")]
impl ManagerCapabilities {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(ManagerCapabilities)
}
}
impl BitAnd for &ManagerCapabilities {
type Output = ManagerCapabilities;
fn bitand(self, rhs: Self) -> Self::Output {
ManagerCapabilities(self.0.bitand(&rhs.0))
}
}
impl BitOr for &ManagerCapabilities {
type Output = ManagerCapabilities;
fn bitor(self, rhs: Self) -> Self::Output {
ManagerCapabilities(self.0.bitor(&rhs.0))
}
}
impl BitOr<ManagerCapability> for &ManagerCapabilities {
type Output = ManagerCapabilities;
fn bitor(self, rhs: ManagerCapability) -> Self::Output {
let mut other = ManagerCapabilities::none();
other.0.insert(rhs);
self.bitor(&other)
}
}
impl BitXor for &ManagerCapabilities {
type Output = ManagerCapabilities;
fn bitxor(self, rhs: Self) -> Self::Output {
ManagerCapabilities(self.0.bitxor(&rhs.0))
}
}
impl FromIterator<ManagerCapability> for ManagerCapabilities {
fn from_iter<I: IntoIterator<Item = ManagerCapability>>(iter: I) -> Self {
let mut this = ManagerCapabilities::none();
for capability in iter {
this.0.insert(capability);
}
this
}
}
/// ManagerCapability tied to a manager. A capability is equivalent based on its kind and not
/// description.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
pub struct ManagerCapability {
/// Label describing the kind of capability
pub kind: String,
/// Information about the capability
pub description: String,
}
impl ManagerCapability {
/// Will convert the [`ManagerCapability`]'s `kind` into a known [`ManagerCapabilityKind`] if
/// possible, returning None if the capability is unknown
pub fn to_capability_kind(&self) -> Option<ManagerCapabilityKind> {
ManagerCapabilityKind::from_str(&self.kind).ok()
}
/// Returns true if the described capability is unknown
pub fn is_unknown(&self) -> bool {
self.to_capability_kind().is_none()
}
}
impl PartialEq for ManagerCapability {
fn eq(&self, other: &Self) -> bool {
self.kind.eq_ignore_ascii_case(&other.kind)
}
}
impl Eq for ManagerCapability {}
impl PartialOrd for ManagerCapability {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for ManagerCapability {
fn cmp(&self, other: &Self) -> Ordering {
self.kind
.to_ascii_lowercase()
.cmp(&other.kind.to_ascii_lowercase())
}
}
impl Hash for ManagerCapability {
fn hash<H: Hasher>(&self, state: &mut H) {
self.kind.to_ascii_lowercase().hash(state);
}
}
impl From<ManagerCapabilityKind> for ManagerCapability {
/// Creates a new capability using the kind's default message
fn from(kind: ManagerCapabilityKind) -> Self {
Self {
kind: kind.to_string(),
description: kind
.get_message()
.map(ToString::to_string)
.unwrap_or_default(),
}
}
}
#[cfg(feature = "schemars")]
impl ManagerCapability {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(ManagerCapability)
}
}
#[cfg(feature = "schemars")]
impl ManagerCapabilityKind {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(ManagerCapabilityKind)
}
}

@ -1,40 +1,17 @@
use derive_more::IsVariant;
use distant_auth::msg::AuthenticationResponse;
use serde::{Deserialize, Serialize};
use strum::{AsRefStr, EnumDiscriminants, EnumIter, EnumMessage, EnumString};
use super::{ManagerAuthenticationId, ManagerChannelId};
use crate::common::authentication::msg::AuthenticationResponse;
use crate::common::{ConnectionId, Destination, Map, UntypedRequest};
#[allow(clippy::large_enum_variant)]
#[derive(Clone, Debug, EnumDiscriminants, Serialize, Deserialize)]
#[strum_discriminants(derive(
AsRefStr,
strum::Display,
EnumIter,
EnumMessage,
EnumString,
Hash,
PartialOrd,
Ord,
IsVariant,
Serialize,
Deserialize
))]
#[cfg_attr(
feature = "schemars",
strum_discriminants(derive(schemars::JsonSchema))
)]
#[strum_discriminants(name(ManagerCapabilityKind))]
#[strum_discriminants(strum(serialize_all = "snake_case"))]
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case", deny_unknown_fields, tag = "type")]
pub enum ManagerRequest {
/// Retrieve information about the server's capabilities
#[strum_discriminants(strum(message = "Supports retrieving capabilities"))]
Capabilities,
/// Retrieve information about the manager's version.
Version,
/// Launch a server using the manager
#[strum_discriminants(strum(message = "Supports launching a server on remote machines"))]
Launch {
// NOTE: Boxed per clippy's large_enum_variant warning
destination: Box<Destination>,
@ -44,7 +21,6 @@ pub enum ManagerRequest {
},
/// Initiate a connection through the manager
#[strum_discriminants(strum(message = "Supports connecting to remote servers"))]
Connect {
// NOTE: Boxed per clippy's large_enum_variant warning
destination: Box<Destination>,
@ -54,7 +30,6 @@ pub enum ManagerRequest {
},
/// Submit some authentication message for the manager to use with an active connection
#[strum_discriminants(strum(message = "Supports authenticating with a remote server"))]
Authenticate {
/// Id of the authentication request that is being responded to
id: ManagerAuthenticationId,
@ -64,16 +39,12 @@ pub enum ManagerRequest {
},
/// Opens a channel for communication with an already-connected server
#[strum_discriminants(strum(message = "Supports opening a channel with a remote server"))]
OpenChannel {
/// Id of the connection
id: ConnectionId,
},
/// Sends data through channel
#[strum_discriminants(strum(
message = "Supports sending data through a channel with a remote server"
))]
Channel {
/// Id of the channel
id: ManagerChannelId,
@ -83,21 +54,17 @@ pub enum ManagerRequest {
},
/// Closes an open channel
#[strum_discriminants(strum(message = "Supports closing a channel with a remote server"))]
CloseChannel {
/// Id of the channel to close
id: ManagerChannelId,
},
/// Retrieve information about a specific connection
#[strum_discriminants(strum(message = "Supports retrieving connection-specific information"))]
Info { id: ConnectionId },
/// Kill a specific connection
#[strum_discriminants(strum(message = "Supports killing a remote connection"))]
Kill { id: ConnectionId },
/// Retrieve list of connections being managed
#[strum_discriminants(strum(message = "Supports retrieving a list of managed connections"))]
List,
}

@ -1,9 +1,7 @@
use distant_auth::msg::Authentication;
use serde::{Deserialize, Serialize};
use super::{
ConnectionInfo, ConnectionList, ManagerAuthenticationId, ManagerCapabilities, ManagerChannelId,
};
use crate::common::authentication::msg::Authentication;
use super::{ConnectionInfo, ConnectionList, ManagerAuthenticationId, ManagerChannelId, SemVer};
use crate::common::{ConnectionId, Destination, UntypedResponse};
#[derive(Clone, Debug, Serialize, Deserialize)]
@ -15,8 +13,8 @@ pub enum ManagerResponse {
/// Indicates that some error occurred during a request
Error { description: String },
/// Response to retrieving information about the manager's capabilities
Capabilities { supported: ManagerCapabilities },
/// Information about the manager's version.
Version { version: SemVer },
/// Confirmation of a server being launched
Launched {

@ -3,16 +3,16 @@ use std::io;
use std::sync::Arc;
use async_trait::async_trait;
use distant_auth::msg::AuthenticationResponse;
use log::*;
use tokio::sync::{oneshot, RwLock};
use crate::common::authentication::msg::AuthenticationResponse;
use crate::common::{ConnectionId, Destination, Map};
use crate::manager::{
ConnectionInfo, ConnectionList, ManagerAuthenticationId, ManagerCapabilities, ManagerChannelId,
ManagerRequest, ManagerResponse,
ConnectionInfo, ConnectionList, ManagerAuthenticationId, ManagerChannelId, ManagerRequest,
ManagerResponse, SemVer,
};
use crate::server::{Server, ServerCtx, ServerHandler};
use crate::server::{RequestCtx, Server, ServerHandler};
mod authentication;
pub use authentication::*;
@ -31,6 +31,10 @@ pub struct ManagerServer {
/// Configuration settings for the server
config: Config,
/// Holds on to open channels feeding data back from a server to some connected client,
/// enabling us to cancel the tasks on demand
channels: RwLock<HashMap<ManagerChannelId, ManagerChannel>>,
/// Mapping of connection id -> connection
connections: RwLock<HashMap<ConnectionId, ManagerConnection>>,
@ -46,6 +50,7 @@ impl ManagerServer {
pub fn new(config: Config) -> Server<Self> {
Server::new().handler(Self {
config,
channels: RwLock::new(HashMap::new()),
connections: RwLock::new(HashMap::new()),
registry: Arc::new(RwLock::new(HashMap::new())),
})
@ -133,9 +138,11 @@ impl ManagerServer {
Ok(id)
}
/// Retrieves the list of supported capabilities for this manager
async fn capabilities(&self) -> io::Result<ManagerCapabilities> {
Ok(ManagerCapabilities::all())
/// Retrieves the manager's version.
async fn version(&self) -> io::Result<SemVer> {
env!("CARGO_PKG_VERSION")
.parse()
.map_err(|x| io::Error::new(io::ErrorKind::Other, x))
}
/// Retrieves information about the connection to the server with the specified `id`
@ -168,7 +175,25 @@ impl ManagerServer {
/// Kills the connection to the server with the specified `id`
async fn kill(&self, id: ConnectionId) -> io::Result<()> {
match self.connections.write().await.remove(&id) {
Some(_) => Ok(()),
Some(connection) => {
// Close any open channels
if let Ok(ids) = connection.channel_ids().await {
let mut channels_lock = self.channels.write().await;
for id in ids {
if let Some(channel) = channels_lock.remove(&id) {
if let Err(x) = channel.close() {
error!("[Conn {id}] {x}");
}
}
}
}
// Make sure the connection is aborted so nothing new can happen
debug!("[Conn {id}] Aborting");
connection.abort();
Ok(())
}
None => Err(io::Error::new(
io::ErrorKind::NotConnected,
"No connection found",
@ -177,104 +202,120 @@ impl ManagerServer {
}
}
#[derive(Default)]
pub struct DistantManagerServerConnection {
/// Holds on to open channels feeding data back from a server to some connected client,
/// enabling us to cancel the tasks on demand
channels: RwLock<HashMap<ManagerChannelId, ManagerChannel>>,
}
#[async_trait]
impl ServerHandler for ManagerServer {
type LocalData = DistantManagerServerConnection;
type Request = ManagerRequest;
type Response = ManagerResponse;
async fn on_request(&self, ctx: ServerCtx<Self::Request, Self::Response, Self::LocalData>) {
let ServerCtx {
async fn on_request(&self, ctx: RequestCtx<Self::Request, Self::Response>) {
debug!("manager::on_request({ctx:?})");
let RequestCtx {
connection_id,
request,
reply,
local_data,
} = ctx;
let response = match request.payload {
ManagerRequest::Capabilities {} => match self.capabilities().await {
Ok(supported) => ManagerResponse::Capabilities { supported },
Err(x) => ManagerResponse::from(x),
},
ManagerRequest::Version {} => {
debug!("Looking up version");
match self.version().await {
Ok(version) => ManagerResponse::Version { version },
Err(x) => ManagerResponse::from(x),
}
}
ManagerRequest::Launch {
destination,
options,
} => match self
.launch(
*destination,
options,
ManagerAuthenticator {
reply: reply.clone(),
registry: Arc::clone(&self.registry),
},
)
.await
{
Ok(destination) => ManagerResponse::Launched { destination },
Err(x) => ManagerResponse::from(x),
},
} => {
info!("Launching {destination} with {options}");
match self
.launch(
*destination,
options,
ManagerAuthenticator {
reply: reply.clone(),
registry: Arc::clone(&self.registry),
},
)
.await
{
Ok(destination) => ManagerResponse::Launched { destination },
Err(x) => ManagerResponse::from(x),
}
}
ManagerRequest::Connect {
destination,
options,
} => match self
.connect(
*destination,
options,
ManagerAuthenticator {
reply: reply.clone(),
registry: Arc::clone(&self.registry),
},
)
.await
{
Ok(id) => ManagerResponse::Connected { id },
Err(x) => ManagerResponse::from(x),
},
} => {
info!("Connecting to {destination} with {options}");
match self
.connect(
*destination,
options,
ManagerAuthenticator {
reply: reply.clone(),
registry: Arc::clone(&self.registry),
},
)
.await
{
Ok(id) => ManagerResponse::Connected { id },
Err(x) => ManagerResponse::from(x),
}
}
ManagerRequest::Authenticate { id, msg } => {
trace!("Retrieving authentication callback registry");
match self.registry.write().await.remove(&id) {
Some(cb) => match cb.send(msg) {
Ok(_) => return,
Err(_) => ManagerResponse::Error {
description: "Unable to forward authentication callback".to_string(),
},
},
Some(cb) => {
trace!("Sending {msg:?} through authentication callback");
match cb.send(msg) {
Ok(_) => return,
Err(_) => ManagerResponse::Error {
description: "Unable to forward authentication callback"
.to_string(),
},
}
}
None => ManagerResponse::from(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid authentication id",
)),
}
}
ManagerRequest::OpenChannel { id } => match self.connections.read().await.get(&id) {
Some(connection) => match connection.open_channel(reply.clone()) {
Ok(channel) => {
debug!("[Conn {id}] Channel {} has been opened", channel.id());
let id = channel.id();
local_data.channels.write().await.insert(id, channel);
ManagerResponse::ChannelOpened { id }
ManagerRequest::OpenChannel { id } => {
debug!("Attempting to retrieve connection {id}");
match self.connections.read().await.get(&id) {
Some(connection) => {
debug!("Opening channel through connection {id}");
match connection.open_channel(reply.clone()) {
Ok(channel) => {
info!("[Conn {id}] Channel {} has been opened", channel.id());
let id = channel.id();
self.channels.write().await.insert(id, channel);
ManagerResponse::ChannelOpened { id }
}
Err(x) => ManagerResponse::from(x),
}
}
Err(x) => ManagerResponse::from(x),
},
None => ManagerResponse::from(io::Error::new(
io::ErrorKind::NotConnected,
"Connection does not exist",
)),
},
None => ManagerResponse::from(io::Error::new(
io::ErrorKind::NotConnected,
"Connection does not exist",
)),
}
}
ManagerRequest::Channel { id, request } => {
match local_data.channels.read().await.get(&id) {
debug!("Attempting to retrieve channel {id}");
match self.channels.read().await.get(&id) {
// TODO: For now, we are NOT sending back a response to acknowledge
// a successful channel send. We could do this in order for
// the client to listen for a complete send, but is it worth it?
Some(channel) => match channel.send(request) {
Ok(_) => return,
Err(x) => ManagerResponse::from(x),
},
Some(channel) => {
debug!("Sending {request:?} through channel {id}");
match channel.send(request) {
Ok(_) => return,
Err(x) => ManagerResponse::from(x),
}
}
None => ManagerResponse::from(io::Error::new(
io::ErrorKind::NotConnected,
"Channel is not open or does not exist",
@ -282,35 +323,57 @@ impl ServerHandler for ManagerServer {
}
}
ManagerRequest::CloseChannel { id } => {
match local_data.channels.write().await.remove(&id) {
Some(channel) => match channel.close() {
Ok(_) => {
debug!("Channel {id} has been closed");
ManagerResponse::ChannelClosed { id }
debug!("Attempting to remove channel {id}");
match self.channels.write().await.remove(&id) {
Some(channel) => {
debug!("Removed channel {}", channel.id());
match channel.close() {
Ok(_) => {
info!("Channel {id} has been closed");
ManagerResponse::ChannelClosed { id }
}
Err(x) => ManagerResponse::from(x),
}
Err(x) => ManagerResponse::from(x),
},
}
None => ManagerResponse::from(io::Error::new(
io::ErrorKind::NotConnected,
"Channel is not open or does not exist",
)),
}
}
ManagerRequest::Info { id } => match self.info(id).await {
Ok(info) => ManagerResponse::Info(info),
Err(x) => ManagerResponse::from(x),
},
ManagerRequest::List => match self.list().await {
Ok(list) => ManagerResponse::List(list),
Err(x) => ManagerResponse::from(x),
},
ManagerRequest::Kill { id } => match self.kill(id).await {
Ok(()) => ManagerResponse::Killed,
Err(x) => ManagerResponse::from(x),
},
ManagerRequest::Info { id } => {
debug!("Attempting to retrieve information for connection {id}");
match self.info(id).await {
Ok(info) => {
info!("Retrieved information for connection {id}");
ManagerResponse::Info(info)
}
Err(x) => ManagerResponse::from(x),
}
}
ManagerRequest::List => {
debug!("Attempting to retrieve the list of connections");
match self.list().await {
Ok(list) => {
info!("Retrieved list of connections");
ManagerResponse::List(list)
}
Err(x) => ManagerResponse::from(x),
}
}
ManagerRequest::Kill { id } => {
debug!("Attempting to kill connection {id}");
match self.kill(id).await {
Ok(()) => {
info!("Killed connection {id}");
ManagerResponse::Killed
}
Err(x) => ManagerResponse::from(x),
}
}
};
if let Err(x) = reply.send(response).await {
if let Err(x) = reply.send(response) {
error!("[Conn {}] {}", connection_id, x);
}
}
@ -349,13 +412,14 @@ mod tests {
let authenticator = ManagerAuthenticator {
reply: ServerReply {
origin_id: format!("{}", rand::random::<u8>()),
tx: mpsc::channel(1).0,
tx: mpsc::unbounded_channel().0,
},
registry: Arc::clone(&registry),
};
let server = ManagerServer {
config,
channels: RwLock::new(HashMap::new()),
connections: RwLock::new(HashMap::new()),
registry,
};

@ -3,10 +3,10 @@ use std::io;
use std::sync::Arc;
use async_trait::async_trait;
use distant_auth::msg::*;
use distant_auth::Authenticator;
use tokio::sync::{oneshot, RwLock};
use crate::common::authentication::msg::*;
use crate::common::authentication::Authenticator;
use crate::manager::data::{ManagerAuthenticationId, ManagerResponse};
use crate::server::ServerReply;
@ -29,19 +29,15 @@ impl ManagerAuthenticator {
let id = rand::random();
self.registry.write().await.insert(id, tx);
self.reply
.send(ManagerResponse::Authenticate { id, msg })
.await?;
self.reply.send(ManagerResponse::Authenticate { id, msg })?;
rx.await
.map_err(|x| io::Error::new(io::ErrorKind::Other, x))
}
/// Sends an [`Authentication`] `msg` without expecting a reply. No callback is stored.
async fn fire(&self, msg: Authentication) -> io::Result<()> {
fn fire(&self, msg: Authentication) -> io::Result<()> {
let id = rand::random();
self.reply
.send(ManagerResponse::Authenticate { id, msg })
.await?;
self.reply.send(ManagerResponse::Authenticate { id, msg })?;
Ok(())
}
}
@ -89,18 +85,18 @@ impl Authenticator for ManagerAuthenticator {
}
async fn info(&mut self, info: Info) -> io::Result<()> {
self.fire(Authentication::Info(info)).await
self.fire(Authentication::Info(info))
}
async fn error(&mut self, error: Error) -> io::Result<()> {
self.fire(Authentication::Error(error)).await
self.fire(Authentication::Error(error))
}
async fn start_method(&mut self, start_method: StartMethod) -> io::Result<()> {
self.fire(Authentication::StartMethod(start_method)).await
self.fire(Authentication::StartMethod(start_method))
}
async fn finished(&mut self) -> io::Result<()> {
self.fire(Authentication::Finished).await
self.fire(Authentication::Finished)
}
}

@ -1,8 +1,8 @@
use std::collections::HashMap;
use std::io;
use std::{fmt, io};
use log::*;
use tokio::sync::mpsc;
use tokio::sync::{mpsc, oneshot};
use tokio::task::JoinHandle;
use crate::client::{Mailbox, UntypedClient};
@ -62,11 +62,17 @@ impl ManagerConnection {
pub async fn spawn(
spawn: Destination,
options: Map,
client: UntypedClient,
mut client: UntypedClient,
) -> io::Result<Self> {
let connection_id = rand::random();
let (tx, rx) = mpsc::unbounded_channel();
// NOTE: Ensure that the connection is severed when the client is dropped; otherwise, when
// the connection is terminated via aborting it or the connection being dropped, the
// connection will persist which can cause problems such as lonely shutdown of the server
// never triggering!
client.shutdown_on_drop(true);
let (request_tx, request_rx) = mpsc::unbounded_channel();
let action_task = tokio::spawn(action_task(connection_id, rx, request_tx));
let response_task = tokio::spawn(response_task(
@ -105,16 +111,41 @@ impl ManagerConnection {
tx: self.tx.clone(),
})
}
}
impl Drop for ManagerConnection {
fn drop(&mut self) {
pub async fn channel_ids(&self) -> io::Result<Vec<ManagerChannelId>> {
let (tx, rx) = oneshot::channel();
self.tx
.send(Action::GetRegistered { cb: tx })
.map_err(|x| {
io::Error::new(
io::ErrorKind::BrokenPipe,
format!("channel_ids failed: {x}"),
)
})?;
let channel_ids = rx.await.map_err(|x| {
io::Error::new(
io::ErrorKind::BrokenPipe,
format!("channel_ids callback dropped: {x}"),
)
})?;
Ok(channel_ids)
}
/// Aborts the tasks used to engage with the connection.
pub fn abort(&self) {
self.action_task.abort();
self.request_task.abort();
self.response_task.abort();
}
}
impl Drop for ManagerConnection {
fn drop(&mut self) {
self.abort();
}
}
enum Action {
Register {
id: ManagerChannelId,
@ -125,6 +156,10 @@ enum Action {
id: ManagerChannelId,
},
GetRegistered {
cb: oneshot::Sender<Vec<ManagerChannelId>>,
},
Read {
res: UntypedResponse<'static>,
},
@ -135,6 +170,18 @@ enum Action {
},
}
impl fmt::Debug for Action {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Register { id, .. } => write!(f, "Action::Register {{ id: {id}, .. }}"),
Self::Unregister { id } => write!(f, "Action::Unregister {{ id: {id} }}"),
Self::GetRegistered { .. } => write!(f, "Action::GetRegistered {{ .. }}"),
Self::Read { .. } => write!(f, "Action::Read {{ .. }}"),
Self::Write { id, .. } => write!(f, "Action::Write {{ id: {id}, .. }}"),
}
}
}
/// Internal task to process outgoing [`UntypedRequest`]s.
async fn request_task(
id: ConnectionId,
@ -142,10 +189,13 @@ async fn request_task(
mut rx: mpsc::UnboundedReceiver<UntypedRequest<'static>>,
) {
while let Some(req) = rx.recv().await {
trace!("[Conn {id}] Firing off request {}", req.id);
if let Err(x) = client.fire(req).await {
error!("[Conn {id}] Failed to send request: {x}");
}
}
trace!("[Conn {id}] Manager request task closed");
}
/// Internal task to process incoming [`UntypedResponse`]s.
@ -155,10 +205,17 @@ async fn response_task(
tx: mpsc::UnboundedSender<Action>,
) {
while let Some(res) = mailbox.next().await {
trace!(
"[Conn {id}] Receiving response {} to request {}",
res.id,
res.origin_id
);
if let Err(x) = tx.send(Action::Read { res }) {
error!("[Conn {id}] Failed to forward received response: {x}");
}
}
trace!("[Conn {id}] Manager response task closed");
}
/// Internal task to process [`Action`] items.
@ -174,6 +231,8 @@ async fn action_task(
let mut registered = HashMap::new();
while let Some(action) = rx.recv().await {
trace!("[Conn {id}] {action:?}");
match action {
Action::Register { id, reply } => {
registered.insert(id, reply);
@ -181,6 +240,9 @@ async fn action_task(
Action::Unregister { id } => {
registered.remove(&id);
}
Action::GetRegistered { cb } => {
let _ = cb.send(registered.keys().copied().collect());
}
Action::Read { mut res } => {
// Split {channel id}_{request id} back into pieces and
// update the origin id to match the request id only
@ -201,7 +263,8 @@ async fn action_task(
id: channel_id,
response: res,
};
if let Err(x) = reply.send(response).await {
if let Err(x) = reply.send(response) {
error!("[Conn {id}] {x}");
}
}
@ -217,4 +280,6 @@ async fn action_task(
}
}
}
trace!("[Conn {id}] Manager action task closed");
}

@ -2,9 +2,9 @@ use std::future::Future;
use std::io;
use async_trait::async_trait;
use distant_auth::Authenticator;
use crate::client::UntypedClient;
use crate::common::authentication::Authenticator;
use crate::common::{Destination, Map};
pub type BoxedLaunchHandler = Box<dyn LaunchHandler>;
@ -67,19 +67,15 @@ macro_rules! boxed_launch_handler {
let x: $crate::manager::BoxedLaunchHandler = Box::new(
|$destination: &$crate::common::Destination,
$options: &$crate::common::Map,
$authenticator: &mut dyn $crate::common::authentication::Authenticator| async {
$body
},
$authenticator: &mut dyn $crate::auth::Authenticator| async { $body },
);
x
}};
(move |$destination:ident, $options:ident, $authenticator:ident| $(async)? $body:block) => {{
let x: $crate::manager::BoxedLaunchHandler = Box::new(
move |$destination: &$crate::common::Destination,
$options: &$crate::common::Map,
$authenticator: &mut dyn $crate::common::authentication::Authenticator| async move {
$body
},
$options: &$crate::common::Map,
$authenticator: &mut dyn $crate::auth::Authenticator| async move { $body },
);
x
}};
@ -141,19 +137,15 @@ macro_rules! boxed_connect_handler {
let x: $crate::manager::BoxedConnectHandler = Box::new(
|$destination: &$crate::common::Destination,
$options: &$crate::common::Map,
$authenticator: &mut dyn $crate::common::authentication::Authenticator| async {
$body
},
$authenticator: &mut dyn $crate::auth::Authenticator| async { $body },
);
x
}};
(move |$destination:ident, $options:ident, $authenticator:ident| $(async)? $body:block) => {{
let x: $crate::manager::BoxedConnectHandler = Box::new(
move |$destination: &$crate::common::Destination,
$options: &$crate::common::Map,
$authenticator: &mut dyn $crate::common::authentication::Authenticator| async move {
$body
},
$options: &$crate::common::Map,
$authenticator: &mut dyn $crate::auth::Authenticator| async move { $body },
);
x
}};

@ -3,13 +3,13 @@ use std::sync::Arc;
use std::time::Duration;
use async_trait::async_trait;
use distant_auth::Verifier;
use log::*;
use serde::de::DeserializeOwned;
use serde::Serialize;
use tokio::sync::{broadcast, RwLock};
use crate::common::authentication::Verifier;
use crate::common::{Listener, Response, Transport};
use crate::common::{ConnectionId, Listener, Response, Transport, Version};
mod builder;
pub use builder::*;
@ -45,6 +45,9 @@ pub struct Server<T> {
/// Performs authentication using various methods
verifier: Verifier,
/// Version associated with the server used by clients to verify compatibility
version: Version,
}
/// Interface for a handler that receives connections and requests
@ -56,23 +59,21 @@ pub trait ServerHandler: Send {
/// Type of data sent back by the server
type Response;
/// Type of data to store locally tied to the specific connection
type LocalData: Send;
/// Invoked upon a new connection becoming established.
///
/// ### Note
///
/// This can be useful in performing some additional initialization on the connection's local
/// data prior to it being used anywhere else.
#[allow(unused_variables)]
async fn on_accept(&self, ctx: ConnectionCtx<'_, Self::LocalData>) -> io::Result<()> {
async fn on_connect(&self, id: ConnectionId) -> io::Result<()> {
Ok(())
}
/// Invoked upon an existing connection getting dropped.
#[allow(unused_variables)]
async fn on_disconnect(&self, id: ConnectionId) -> io::Result<()> {
Ok(())
}
/// Invoked upon receiving a request from a client. The server should process this
/// request, which can be found in `ctx`, and send one or more replies in response.
async fn on_request(&self, ctx: ServerCtx<Self::Request, Self::Response, Self::LocalData>);
async fn on_request(&self, ctx: RequestCtx<Self::Request, Self::Response>);
}
impl Server<()> {
@ -83,6 +84,7 @@ impl Server<()> {
config: Default::default(),
handler: (),
verifier: Verifier::empty(),
version: Default::default(),
}
}
@ -117,6 +119,7 @@ impl<T> Server<T> {
config,
handler: self.handler,
verifier: self.verifier,
version: self.version,
}
}
@ -126,6 +129,7 @@ impl<T> Server<T> {
config: self.config,
handler,
verifier: self.verifier,
version: self.version,
}
}
@ -135,6 +139,17 @@ impl<T> Server<T> {
config: self.config,
handler: self.handler,
verifier,
version: self.version,
}
}
/// Consumes the current server, replacing its version with `version` and returning it.
pub fn version(self, version: Version) -> Self {
Self {
config: self.config,
handler: self.handler,
verifier: self.verifier,
version,
}
}
}
@ -144,11 +159,10 @@ where
T: ServerHandler + Sync + 'static,
T::Request: DeserializeOwned + Send + Sync + 'static,
T::Response: Serialize + Send + 'static,
T::LocalData: Default + Send + Sync + 'static,
{
/// Consumes the server, starting a task to process connections from the `listener` and
/// returning a [`ServerRef`] that can be used to control the active server instance.
pub fn start<L>(self, listener: L) -> io::Result<Box<dyn ServerRef>>
pub fn start<L>(self, listener: L) -> io::Result<ServerRef>
where
L: Listener + 'static,
L::Output: Transport + 'static,
@ -157,7 +171,7 @@ where
let (tx, rx) = broadcast::channel(1);
let task = tokio::spawn(self.task(Arc::clone(&state), listener, tx.clone(), rx));
Ok(Box::new(GenericServerRef { shutdown: tx, task }))
Ok(ServerRef { shutdown: tx, task })
}
/// Internal task that is run to receive connections and spawn connection tasks
@ -175,6 +189,7 @@ where
config,
handler,
verifier,
version,
} = self;
let handler = Arc::new(handler);
@ -224,8 +239,12 @@ where
.sleep_duration(config.connection_sleep)
.heartbeat_duration(config.connection_heartbeat)
.verifier(Arc::downgrade(&verifier))
.version(version.clone())
.spawn(),
);
// Clean up current tasks being tracked
connection_tasks.retain(|task| !task.is_finished());
}
// Once we stop listening, we still want to wait until all connections have terminated
@ -246,30 +265,29 @@ mod tests {
use std::time::Duration;
use async_trait::async_trait;
use distant_auth::{AuthenticationMethod, DummyAuthHandler, NoneAuthenticationMethod};
use test_log::test;
use tokio::sync::mpsc;
use super::*;
use crate::common::authentication::{
AuthenticationMethod, DummyAuthHandler, NoneAuthenticationMethod,
};
use crate::common::{Connection, InmemoryTransport, MpscListener, Request, Response};
macro_rules! server_version {
() => {
Version::new(1, 2, 3)
};
}
pub struct TestServerHandler;
#[async_trait]
impl ServerHandler for TestServerHandler {
type LocalData = ();
type Request = u16;
type Response = String;
async fn on_accept(&self, _: ConnectionCtx<'_, Self::LocalData>) -> io::Result<()> {
Ok(())
}
async fn on_request(&self, ctx: ServerCtx<Self::Request, Self::Response, Self::LocalData>) {
async fn on_request(&self, ctx: RequestCtx<Self::Request, Self::Response>) {
// Always send back "hello"
ctx.reply.send("hello".to_string()).await.unwrap();
ctx.reply.send("hello".to_string()).unwrap();
}
}
@ -282,6 +300,7 @@ mod tests {
config,
handler: TestServerHandler,
verifier: Verifier::new(methods),
version: server_version!(),
}
}
@ -311,7 +330,7 @@ mod tests {
.expect("Failed to start server");
// Perform handshake and authentication with the server before beginning to send data
let mut connection = Connection::client(transport, DummyAuthHandler)
let mut connection = Connection::client(transport, DummyAuthHandler, server_version!())
.await
.expect("Failed to connect to server");

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save