Compare commits

...

27 Commits

Author SHA1 Message Date
Chip Senkbeil 3fe1fba339
Correct wget usage for installation 9 months ago
Chip Senkbeil 48f7eb74ec
Update readme example to use --daemon instead of & for background manager 10 months ago
Chip Senkbeil 96abcefdc5
Add extra debug logging when starting a manager 11 months ago
Chip Senkbeil 22f3c2dd76
Fix bugs in set permissions for CLI and distant-local 11 months ago
Chip Senkbeil 0320e7fe24
Bump to v0.20.0 11 months ago
Chip Senkbeil 9e48300e83
Fix zombies being leftover from distant launch manager://localhost when servers self-terminate 11 months ago
Chip Senkbeil e304e6a689
Fix shutting down killed connections from a manager 11 months ago
Chip Senkbeil 8972013716
Refactor capabilities to version for manager, integrate version checking for client/server/manager, and define protocol version (#219) 11 months ago
Chip Senkbeil 0efb5aee4c
Add --shell support to CLI (#218) 11 months ago
Chip Senkbeil 56b3b8f4f1
Fix CLI commands with --format json not outputting errors in JSON 11 months ago
Chip Senkbeil eb23b4e1ad
Fix win service 11 months ago
Chip Senkbeil dc7e9b5309
Bump to alpha.12 11 months ago
Chip Senkbeil e0b8769087
Fix return code of --help and --version on cli 11 months ago
Chip Senkbeil 9bc50886bb
Update latest tagging with custom code that uses a personal access token to trigger workflows 11 months ago
Chip Senkbeil bd3b068651
Add workflow to tag latest 11 months ago
Chip Senkbeil c61393750a
Bump minimum version of Rust to 1.70.0 11 months ago
Chip Senkbeil 2abaf0b814
Use sparse checkout during publish 11 months ago
Chip Senkbeil 0e03fc3011
Reintroduce checkout to publish step 11 months ago
Chip Senkbeil cb8ea0507f
Bump to 0.20.0-alpha.11 and restore ci tests 11 months ago
Chip Senkbeil 8a34fec1f7
Update README 11 months ago
Chip Senkbeil 6feeb2d012
Tweaking release config until it works 11 months ago
Chip Senkbeil fefbe19a3c
Switch to stripping using cargo and supporting a latest release tag 11 months ago
Chip Senkbeil be7a15caa0
Refactor generation commands to use --output for files and printing to stdout by default 11 months ago
Chip Senkbeil 84ea28402d
Add support for distant spawn -c 'cmd str' 11 months ago
Chip Senkbeil b74cba28df
Bump to v0.20.0-alpha.10 11 months ago
Chip Senkbeil f4180f6245
Change search default to not use standard filters, and provide options to set filters manually 11 months ago
Chip Senkbeil c250acdfb4
Fix search task exiting on failing to start a search with distant-local 11 months ago

@ -87,7 +87,7 @@ jobs:
- { rust: stable, os: windows-latest, target: x86_64-pc-windows-msvc }
- { rust: stable, os: macos-latest }
- { rust: stable, os: ubuntu-latest }
- { rust: 1.68.0, os: ubuntu-latest }
- { rust: 1.70.0, os: ubuntu-latest }
steps:
- uses: actions/checkout@v3
- name: Install Rust ${{ matrix.rust }}

@ -0,0 +1,24 @@
name: 'Tag latest'
on:
push:
branches:
- master
jobs:
action:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Tag latest and push
env:
GITHUB_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
run: |
git config user.name "${GITHUB_ACTOR}"
git config user.email "${GITHUB_ACTOR}@users.noreply.github.com"
origin_url="$(git config --get remote.origin.url)"
origin_url="${origin_url/#https:\/\//https:\/\/$GITHUB_TOKEN@}" # add token to URL
git tag latest --force
git push "$origin_url" --tags --force

@ -0,0 +1,28 @@
name: 'Lock Threads'
on:
schedule:
- cron: '0 3 * * *'
workflow_dispatch:
permissions:
issues: write
pull-requests: write
concurrency:
group: lock
jobs:
action:
runs-on: ubuntu-latest
steps:
- uses: dessant/lock-threads@v4
with:
issue-inactive-days: '30'
issue-comment: >
I'm going to lock this issue because it has been closed for _30 days_ ⏳.
This helps our maintainers find and focus on the active issues.
If you have found a problem that seems similar to this, please open a new
issue and complete the issue template so we can capture all the details
necessary to investigate further.
process-only: 'issues'

@ -5,402 +5,312 @@ on:
tags:
- v[0-9]+.[0-9]+.[0-9]+
- v[0-9]+.[0-9]+.[0-9]+-**
- latest
# Status of Targets:
#
# ✅ x86_64-apple-darwin
# ✅ aarch64-apple-darwin
#
# ✅ x86_64-pc-windows-msvc
# ✅ aarch64-pc-windows-msvc
#
# ✅ x86_64-unknown-linux-gnu
# ✅ aarch64-unknown-linux-gnu
# ❌ aarch64-linux-android (fails due to termios)
# ✅ armv7-unknown-linux-gnueabihf
#
# ✅ x86_64-unknown-linux-musl
# ✅ aarch64-unknown-linux-musl
#
# ✅ x86_64-unknown-freebsd
# ❓ aarch64-unknown-freebsd (works manually, but cannot cross-compile via CI)
#
# ❌ x86_64-unknown-netbsd (fails due to termios)
# ❌ aarch64-unknown-netbsd (???)
#
# ❌ x86_64-unknown-openbsd (fails due to rustc internal error at end)
# ❌ aarch64-unknown-openbsd (fails due to openssl-src)
#
jobs:
macos:
name: "Build release on MacOS"
name: "Build release on MacOS (${{ matrix.target }})"
runs-on: macos-11.0
if: startsWith(github.ref, 'refs/tags/')
env:
UPLOAD_NAME: macos
X86_ARCH: x86_64-apple-darwin
ARM_ARCH: aarch64-apple-darwin
X86_DIR: target/x86_64-apple-darwin/release
ARM_DIR: target/aarch64-apple-darwin/release
BUILD_BIN: distant
UNIVERSAL_REL_BIN: distant-macos
strategy:
matrix:
target:
- x86_64-apple-darwin
- aarch64-apple-darwin
steps:
- uses: actions/checkout@v3
- name: Install Rust (x86)
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
target: ${{ env.X86_ARCH }}
- name: Install Rust (ARM)
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
target: ${{ env.ARM_ARCH }}
- uses: Swatinem/rust-cache@v2
- name: Build binary (x86_64)
run: |
cargo build --release --all-features --target ${{ env.X86_ARCH }}
ls -l ./${{ env.X86_DIR }}
strip ./${{ env.X86_DIR }}/${{ env.BUILD_BIN }}
- name: Build binary (aarch64)
run: |
cargo build --release --all-features --target ${{ env.ARM_ARCH }}
ls -l ./${{ env.ARM_DIR }}
strip ./${{ env.ARM_DIR }}/${{ env.BUILD_BIN }}
- name: Unify binaries
run: |
lipo -create -output ${{ env.UNIVERSAL_REL_BIN }} \
./${{ env.X86_DIR }}/${{ env.BUILD_BIN }} \
./${{ env.ARM_DIR }}/${{ env.BUILD_BIN }}
chmod +x ./${{ env.UNIVERSAL_REL_BIN }}
- name: Upload
uses: actions/upload-artifact@v2
with:
name: ${{ env.UPLOAD_NAME }}
path: |
${{ env.UNIVERSAL_REL_BIN }}
windows:
name: "Build release on Windows"
runs-on: windows-latest
if: startsWith(github.ref, 'refs/tags/')
env:
UPLOAD_NAME: win64
X86_ARCH: x86_64-pc-windows-msvc
X86_DIR: target/x86_64-pc-windows-msvc/release
BUILD_BIN: distant.exe
X86_REL_BIN: distant-win64.exe
steps:
- uses: actions/checkout@v2
- name: Install Rust (MSVC)
- name: Install Rust (${{ matrix.target }})
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
target: ${{ env.X86_ARCH }}
- uses: Swatinem/rust-cache@v2
- name: Build binary (x86_64)
run: |
cargo build --release --all-features --target ${{ env.X86_ARCH }}
ls -l ./${{ env.X86_DIR }}
strip ./${{ env.X86_DIR }}/${{ env.BUILD_BIN }}
mv ./${{ env.X86_DIR }}/${{ env.BUILD_BIN }} ./${{ env.X86_REL_BIN }}
chmod +x ./${{ env.X86_REL_BIN }}
- name: Upload
uses: actions/upload-artifact@v2
with:
name: ${{ env.UPLOAD_NAME }}
path: |
${{ env.X86_REL_BIN }}
linux_gnu_x86:
name: "Build release on Linux (GNU x86)"
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
env:
UPLOAD_NAME: linux64-gnu-x86
X86_GNU_ARCH: x86_64-unknown-linux-gnu
X86_GNU_DIR: target/x86_64-unknown-linux-gnu/release
BUILD_BIN: distant
X86_GNU_REL_BIN: distant-linux64-gnu-x86
steps:
- uses: actions/checkout@v2
- name: Install Rust (GNU x86)
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
target: ${{ env.X86_GNU_ARCH }}
- uses: Swatinem/rust-cache@v2
- name: Build binary (GNU x86_64)
run: |
cargo build --release --all-features --target ${{ env.X86_GNU_ARCH }}
ls -l ./${{ env.X86_GNU_DIR }}
strip ./${{ env.X86_GNU_DIR }}/${{ env.BUILD_BIN }}
mv ./${{ env.X86_GNU_DIR }}/${{ env.BUILD_BIN }} ./${{ env.X86_GNU_REL_BIN }}
chmod +x ./${{ env.X86_GNU_REL_BIN }}
- name: Upload
uses: actions/upload-artifact@v2
with:
name: ${{ env.UPLOAD_NAME }}
path: |
${{ env.X86_GNU_REL_BIN }}
linux_gnu_aarch64:
name: "Build release on Linux (GNU aarch64)"
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
env:
UPLOAD_NAME: linux64-gnu-aarch64
AARCH64_GNU_ARCH: aarch64-unknown-linux-gnu
AARCH64_GNU_DIR: target/aarch64-unknown-linux-gnu/release
BUILD_BIN: distant
AARCH64_GNU_REL_BIN: distant-linux64-gnu-aarch64
steps:
- uses: actions/checkout@v2
- name: Install Rust (GNU aarch64)
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
target: ${{ env.AARCH64_GNU_ARCH }}
target: ${{ matrix.target }}
override: true
- uses: Swatinem/rust-cache@v2
- name: Install linker & binutils (gcc-aarch64-linux-gnu)
- name: Build binary (${{ matrix.target }})
run: |
sudo apt update
sudo apt install -y gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu
- name: Build binary (GNU aarch64)
run: |
cargo build --release --all-features --target ${{ env.AARCH64_GNU_ARCH }}
ls -l ./${{ env.AARCH64_GNU_DIR }}
/usr/aarch64-linux-gnu/bin/strip ./${{ env.AARCH64_GNU_DIR }}/${{ env.BUILD_BIN }}
mv ./${{ env.AARCH64_GNU_DIR }}/${{ env.BUILD_BIN }} ./${{ env.AARCH64_GNU_REL_BIN }}
chmod +x ./${{ env.AARCH64_GNU_REL_BIN }}
cargo build --release --all-features --target ${{ matrix.target }}
mv ./target/${{ matrix.target }}/release/distant ./distant-${{ matrix.target }}
chmod +x ./distant-${{ matrix.target }}
- name: Upload
uses: actions/upload-artifact@v2
with:
name: ${{ env.UPLOAD_NAME }}
path: |
${{ env.AARCH64_GNU_REL_BIN }}
name: ${{ matrix.target }}
path: ./distant-${{ matrix.target }}
if-no-files-found: error
retention-days: 5
linux_gnu_arm_v7:
name: "Build release on Linux (GNU arm-v7)"
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
env:
UPLOAD_NAME: linux64-gnu-arm-v7
ARMV7_GNU_ARCH: armv7-unknown-linux-gnueabihf
ARMV7_GNU_DIR: target/armv7-unknown-linux-gnueabihf/release
BUILD_BIN: distant
ARMV7_GNU_REL_BIN: distant-linux64-gnu-arm-v7
macos_unify:
name: "Build universal binary on MacOS"
needs: [macos]
runs-on: macos-11.0
steps:
- uses: actions/checkout@v2
- name: Install Rust (GNU arm-v7)
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
target: ${{ env.ARMV7_GNU_ARCH }}
- uses: Swatinem/rust-cache@v2
- name: Install linker & binutils (gcc-arm-linux-gnueabihf)
run: |
sudo apt update
sudo apt install -y gcc-arm-linux-gnueabihf binutils-arm-linux-gnueabihf
- name: Build binary (GNU arm-v7)
- uses: actions/download-artifact@v2
- name: Unify binaries
run: |
cargo build --release --all-features --target ${{ env.ARMV7_GNU_ARCH }}
ls -l ./${{ env.ARMV7_GNU_DIR }}
/usr/arm-linux-gnueabihf/bin/strip ./${{ env.ARMV7_GNU_DIR }}/${{ env.BUILD_BIN }}
mv ./${{ env.ARMV7_GNU_DIR }}/${{ env.BUILD_BIN }} ./${{ env.ARMV7_GNU_REL_BIN }}
chmod +x ./${{ env.ARMV7_GNU_REL_BIN }}
lipo -create -output distant-universal-apple-darwin \
./x86_64-apple-darwin/distant-x86_64-apple-darwin \
./aarch64-apple-darwin/distant-aarch64-apple-darwin
chmod +x ./distant-universal-apple-darwin
- name: Upload
uses: actions/upload-artifact@v2
with:
name: ${{ env.UPLOAD_NAME }}
path: |
${{ env.ARMV7_GNU_REL_BIN }}
name: universal-apple-darwin
path: ./distant-universal-apple-darwin
if-no-files-found: error
retention-days: 5
# NOTE: For musl, we only support ssh2 and not libssh for the time being due to some
# build issue with libssh-rs-sys not finding the symbol ENGINE_cleanup in libcrypto
linux_musl_x86:
name: "Build release on Linux (musl x86)"
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
env:
UPLOAD_NAME: linux64-musl-x86
X86_MUSL_ARCH: x86_64-unknown-linux-musl
X86_MUSL_DIR: target/x86_64-unknown-linux-musl/release
BUILD_BIN: distant
X86_MUSL_REL_BIN: distant-linux64-musl-x86
windows:
name: "Build release on Windows (${{ matrix.target }})"
runs-on: windows-latest
strategy:
matrix:
target:
- x86_64-pc-windows-msvc
- aarch64-pc-windows-msvc
steps:
- uses: actions/checkout@v2
- name: Install Rust (MUSL x86)
- name: Install Rust (${{ matrix.target }})
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
target: ${{ env.X86_MUSL_ARCH }}
target: ${{ matrix.target }}
override: true
- name: Install musl tools
run: |
sudo apt update
sudo apt install -y musl-tools
- uses: Swatinem/rust-cache@v2
- name: Build binary (MUSL x86_64)
- name: Build binary (${{ matrix.target }})
run: |
cargo build --release --no-default-features --features ssh2 --target ${{ env.X86_MUSL_ARCH }}
ls -l ./${{ env.X86_MUSL_DIR }}
strip ./${{ env.X86_MUSL_DIR }}/${{ env.BUILD_BIN }}
mv ./${{ env.X86_MUSL_DIR }}/${{ env.BUILD_BIN }} ./${{ env.X86_MUSL_REL_BIN }}
chmod +x ./${{ env.X86_MUSL_REL_BIN }}
cargo build --release --all-features --target ${{ matrix.target }}
mv ./target/${{ matrix.target }}/release/distant.exe ./distant-${{ matrix.target }}.exe
chmod +x ./distant-${{ matrix.target }}.exe
- name: Upload
uses: actions/upload-artifact@v2
with:
name: ${{ env.UPLOAD_NAME }}
path: |
${{ env.X86_MUSL_REL_BIN }}
name: ${{ matrix.target }}
path: ./distant-${{ matrix.target }}.exe
if-no-files-found: error
retention-days: 5
# NOTE: For musl, we only support ssh2 and not libssh for the time being due to some
# build issue with libssh-rs-sys not finding the symbol ENGINE_cleanup in libcrypto
linux_musl_aarch64:
name: "Build release on Linux (musl aarch64)"
linux:
name: "Build release on Linux (${{ matrix.target }})"
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
env:
UPLOAD_NAME: linux64-musl-aarch64
AARCH64_MUSL_ARCH: aarch64-unknown-linux-musl
AARCH64_MUSL_DIR: target/aarch64-unknown-linux-musl/release
BUILD_BIN: distant
AARCH64_MUSL_REL_BIN: distant-linux64-musl-aarch64
strategy:
matrix:
include:
- target: x86_64-unknown-linux-gnu
build: --all-features
cargo: cargo
- target: aarch64-unknown-linux-gnu
build: --all-features
deps: gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu
cargo: cargo
- target: armv7-unknown-linux-gnueabihf
build: --all-features
deps: gcc-arm-linux-gnueabihf binutils-arm-linux-gnueabihf
cargo: cargo
- target: x86_64-unknown-linux-musl
build: --no-default-features --features ssh2
deps: musl-tools
cargo: cargo
- target: aarch64-unknown-linux-musl
build: --no-default-features --features ssh2
deps: musl-tools gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu
cargo: cross
prepare: |
curl -L "https://github.com/cross-rs/cross/releases/download/v0.2.5/cross-x86_64-unknown-linux-musl.tar.gz" |
tar xz -C $HOME/.cargo/bin
- target: x86_64-unknown-freebsd
build: --all-features
cargo: cross
prepare: |
curl -L "https://github.com/cross-rs/cross/releases/download/v0.2.5/cross-x86_64-unknown-linux-musl.tar.gz" |
tar xz -C $HOME/.cargo/bin
steps:
- uses: actions/checkout@v2
- name: Install Rust (MUSL aarch64)
- name: Install Rust (${{ matrix.target }})
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
target: ${{ env.AARCH64_MUSL_ARCH }}
target: ${{ matrix.target }}
override: true
- name: Install musl tools
- uses: Swatinem/rust-cache@v2
- name: Install dependencies
if: ${{ matrix.deps }}
run: |
sudo apt update
sudo apt install -y musl-tools gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu
- name: Install cross
env:
LINK: https://github.com/cross-rs/cross/releases/download
CROSS_VERSION: 0.2.4
CROSS_FILE: cross-x86_64-unknown-linux-musl
run: |
curl -L "$LINK/v$CROSS_VERSION/$CROSS_FILE.tar.gz" |
tar xz -C $HOME/.cargo/bin
- uses: Swatinem/rust-cache@v2
- name: Build binary (MUSL aarch64)
sudo apt install -y ${{ matrix.deps }}
- name: Preparing system
if: ${{ matrix.prepare }}
run: ${{ matrix.prepare }}
- name: Build binary (${{ matrix.target }})
run: |
cross build --release --no-default-features --features ssh2 --target ${{ env.AARCH64_MUSL_ARCH }}
ls -l ./${{ env.AARCH64_MUSL_DIR }}
aarch64-linux-gnu-strip ./${{ env.AARCH64_MUSL_DIR }}/${{ env.BUILD_BIN }}
mv ./${{ env.AARCH64_MUSL_DIR }}/${{ env.BUILD_BIN }} ./${{ env.AARCH64_MUSL_REL_BIN }}
chmod +x ./${{ env.AARCH64_MUSL_REL_BIN }}
${{ matrix.cargo }} build --release ${{ matrix.build }} --target ${{ matrix.target }}
mv ./target/${{ matrix.target }}/release/distant ./distant-${{ matrix.target }}
chmod +x ./distant-${{ matrix.target }}
- name: Upload
uses: actions/upload-artifact@v2
with:
name: ${{ env.UPLOAD_NAME }}
path: |
${{ env.AARCH64_MUSL_REL_BIN }}
name: ${{ matrix.target }}
path: ./distant-${{ matrix.target }}
if-no-files-found: error
retention-days: 5
# bsd:
# name: "Build release on ${{ matrix.os.name }} (${{ matrix.os.target }})"
# runs-on: ${{ matrix.os.host }}
# strategy:
# matrix:
# os:
# - name: freebsd
# architecture: x86-64
# version: '13.2'
# host: macos-12
# target: x86_64-unknown-freebsd
# build: --all-features
# prepare: sudo pkg install -y openssl gmake lang/rust devel/llvm-devel
# - name: netbsd
# architecture: x86-64
# version: '9.3'
# host: macos-12
# target: x86_64-unknown-netbsd
# build: --all-features
# prepare: |
# PATH="/usr/pkg/sbin:/usr/pkg/bin:$PATH"
# PKG_PATH="https://cdn.NetBSD.org/pub/pkgsrc/packages"
# PKG_PATH="$PKG_PATH/NetBSD/x86_64/9.3/All/"
# export PATH PKG_PATH
# sudo -E pkg_add -I gmake rust
# cargo update --dry-run
# - name: openbsd
# architecture: x86-64
# version: '7.3'
# host: macos-12
# target: x86_64-unknown-openbsd
# build: --all-features
# prepare: |
# sudo pkg_add -I gmake rust llvm
# sed -i 's/lto = true/lto = false/' Cargo.toml
# steps:
# - uses: actions/checkout@v3
# - uses: Swatinem/rust-cache@v2
# - name: Build in VM
# uses: cross-platform-actions/action@v0.15.0
# env:
# CARGO_INCREMENTAL: 0
# with:
# environment_variables: CARGO_INCREMENTAL
# operating_system: ${{ matrix.os.name }}
# architecture: ${{ matrix.os.architecture }}
# version: ${{ matrix.os.version }}
# shell: bash
# run: |
# ${{ matrix.os.prepare }}
# cargo build --release ${{ matrix.os.build }} --target ${{ matrix.os.target }}
# mv ./target/${{ matrix.os.target }}/release/distant ./distant-${{ matrix.os.target }}
# chmod +x ./distant-${{ matrix.os.target }}
# - name: Upload
# uses: actions/upload-artifact@v2
# with:
# name: ${{ matrix.os.target }}
# path: ./distant-${{ matrix.os.target }}
# if-no-files-found: error
# retention-days: 5
publish:
needs: [macos, windows, linux_gnu_x86, linux_gnu_aarch64, linux_gnu_arm_v7, linux_musl_x86, linux_musl_aarch64]
needs: [macos, macos_unify, windows, linux]
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
permissions:
contents: write
env:
MACOS: macos
MACOS_UNIVERSAL_BIN: distant-macos
WIN64: win64
WIN64_BIN: distant-win64.exe
LINUX64_GNU_X86: linux64-gnu-x86
LINUX64_GNU_X86_BIN: distant-linux64-gnu-x86
LINUX64_GNU_AARCH64: linux64-gnu-aarch64
LINUX64_GNU_AARCH64_BIN: distant-linux64-gnu-aarch64
LINUX64_GNU_ARMV7: linux64-gnu-arm-v7
LINUX64_GNU_ARMV7_BIN: distant-linux64-gnu-arm-v7
LINUX64_MUSL_X86: linux64-musl-x86
LINUX64_MUSL_X86_BIN: distant-linux64-musl-x86
LINUX64_MUSL_AARCH64: linux64-musl-aarch64
LINUX64_MUSL_AARCH64_BIN: distant-linux64-musl-aarch64
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
sparse-checkout: |
CHANGELOG.md
sparse-checkout-cone-mode: false
- uses: actions/download-artifact@v2
- name: Generate MacOS SHA256 checksums
run: |
cd ${{ env.MACOS }}
sha256sum ${{ env.MACOS_UNIVERSAL_BIN }} > ${{ env.MACOS_UNIVERSAL_BIN }}.sha256sum
echo "SHA_MACOS_BIN=$(cat ${{ env.MACOS_UNIVERSAL_BIN }}.sha256sum)" >> $GITHUB_ENV
- name: Generate Win64 SHA256 checksums
run: |
cd ${{ env.WIN64 }}
sha256sum ${{ env.WIN64_BIN }} > ${{ env.WIN64_BIN }}.sha256sum
echo "SHA_WIN64_BIN=$(cat ${{ env.WIN64_BIN }}.sha256sum)" >> $GITHUB_ENV
- name: Generate Linux64 (gnu x86) SHA256 checksums
run: |
cd ${{ env.LINUX64_GNU_X86 }}
sha256sum ${{ env.LINUX64_GNU_X86_BIN }} > ${{ env.LINUX64_GNU_X86_BIN }}.sha256sum
echo "SHA_LINUX64_GNU_X86_BIN=$(cat ${{ env.LINUX64_GNU_X86_BIN }}.sha256sum)" >> $GITHUB_ENV
- name: Generate Linux64 (gnu aarch64) SHA256 checksums
- name: Generate SHA256 checksums
run: |
cd ${{ env.LINUX64_GNU_AARCH64 }}
sha256sum ${{ env.LINUX64_GNU_AARCH64_BIN }} > ${{ env.LINUX64_GNU_AARCH64_BIN }}.sha256sum
echo "SHA_LINUX64_GNU_AARCH64_BIN=$(cat ${{ env.LINUX64_GNU_AARCH64_BIN }}.sha256sum)" >> $GITHUB_ENV
- name: Generate Linux64 (gnu arm-v7) SHA256 checksums
run: |
cd ${{ env.LINUX64_GNU_ARMV7 }}
sha256sum ${{ env.LINUX64_GNU_ARMV7_BIN }} > ${{ env.LINUX64_GNU_ARMV7_BIN }}.sha256sum
echo "SHA_LINUX64_GNU_ARMV7_BIN=$(cat ${{ env.LINUX64_GNU_ARMV7_BIN }}.sha256sum)" >> $GITHUB_ENV
- name: Generate Linux64 (musl x86) SHA256 checksums
run: |
cd ${{ env.LINUX64_MUSL_X86 }}
sha256sum ${{ env.LINUX64_MUSL_X86_BIN }} > ${{ env.LINUX64_MUSL_X86_BIN }}.sha256sum
echo "SHA_LINUX64_MUSL_X86_BIN=$(cat ${{ env.LINUX64_MUSL_X86_BIN }}.sha256sum)" >> $GITHUB_ENV
- name: Generate Linux64 (musl aarch64) SHA256 checksums
run: |
cd ${{ env.LINUX64_MUSL_AARCH64 }}
sha256sum ${{ env.LINUX64_MUSL_AARCH64_BIN }} > ${{ env.LINUX64_MUSL_AARCH64_BIN }}.sha256sum
echo "SHA_LINUX64_MUSL_AARCH64_BIN=$(cat ${{ env.LINUX64_MUSL_AARCH64_BIN }}.sha256sum)" >> $GITHUB_ENV
for i in $(find . -name "distant-*" -type f); do
echo "Generating checksum for ${i}"
sha256sum "${i}" > "${i}.sha256sum"
done
- name: Determine git tag
if: github.event_name == 'push'
run: |
TAG_NAME=${{ github.ref }}
echo "TAG_NAME=${TAG_NAME#refs/tags/}" >> $GITHUB_ENV
echo "TAG_VERSION=${TAG_NAME#refs/tags/v}" >> $GITHUB_ENV
- name: Check git tag for pre-release
- name: Check git tag for pre-release or latest
id: check-tag
run: |
if [[ ${{ github.ref }} =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+-.*$ ]]; then
echo ::set-output name=match::true
echo "is_prerelease=true" >> $GITHUB_OUTPUT
elif [[ ${{ github.ref }} =~ ^refs/tags/latest$ ]]; then
echo "is_latest=true" >> $GITHUB_OUTPUT
fi
- name: Print pre-release status
run: |
echo "Is ${{ github.ref }} a pre-release: ${{ steps.check-tag.outputs.match }}"
echo "Is ${{ github.ref }} pre-release: ${{ steps.check-tag.outputs.is_prerelease }}"
echo "Is ${{ github.ref }} latest: ${{ steps.check-tag.outputs.is_latest }}"
- name: Get Changelog Entry
id: changelog
uses: mindsers/changelog-reader-action@v2
with:
version: ${{ env.TAG_VERSION }}
path: "./CHANGELOG.md"
- name: Publish
if: ${{ steps.check-tag.outputs.is_latest != 'true' }}
- name: Publish (latest)
if: ${{ steps.check-tag.outputs.is_latest == 'true' }}
uses: softprops/action-gh-release@v1
with:
name: Latest Build
fail_on_unmatched_files: true
target_commitish: ${{ github.sha }}
draft: false
prerelease: true
files: |
**/distant-*
body: |
This is the latest commit (${{ github.sha }}) built for testing.
This is not guaranteed to pass all tests or even function properly.
- name: Publish (release)
if: ${{ steps.check-tag.outputs.is_latest != 'true' }}
uses: softprops/action-gh-release@v1
with:
name: distant ${{ env.TAG_NAME }}
fail_on_unmatched_files: true
target_commitish: ${{ github.sha }}
draft: false
prerelease: ${{ steps.check-tag.outputs.match == 'true' }}
prerelease: ${{ steps.check-tag.outputs.is_prerelease == 'true' }}
files: |
${{ env.MACOS }}/${{ env.MACOS_UNIVERSAL_BIN }}
${{ env.WIN64 }}/${{ env.WIN64_BIN }}
${{ env.LINUX64_GNU_X86 }}/${{ env.LINUX64_GNU_X86_BIN }}
${{ env.LINUX64_GNU_AARCH64 }}/${{ env.LINUX64_GNU_AARCH64_BIN }}
${{ env.LINUX64_GNU_ARMV7 }}/${{ env.LINUX64_GNU_ARMV7_BIN }}
${{ env.LINUX64_MUSL_X86 }}/${{ env.LINUX64_MUSL_X86_BIN }}
${{ env.LINUX64_MUSL_AARCH64 }}/${{ env.LINUX64_MUSL_AARCH64_BIN }}
**/*.sha256sum
**/distant-*
body: |
## Release Notes
${{ steps.changelog.outputs.changes }}
## Binaries
Standalone binaries are built out for Windows (x86_64), MacOS (Intel & ARM), and Linux (x86_64, aarch64, armv7).
- **linux64-gnu-x86** is the x86-64 release on Linux using libc
- **linux64-gnu-aarch64** is the aarch64 release on Linux using libc
- **linux64-gnu-arm-v7** is the arm-v7 release on Linux using libc (for Raspberry PI)
- **linux64-musl-x86** is the x86-64 release on Linux using musl (static binary, no libc dependency)
- **linux64-musl-aarch64** is the aarch64 release on Linux using musl (static binary, no libc dependency)
- **macos** is a universal binary for Mac OS that supports x86-64 and aarch64 (ARM) platforms
- **win64** is the x86-64 release on Windows using MSVC
## SHA256 Checksums
```
${{ env.SHA_MACOS_BIN }}
${{ env.SHA_WIN64_BIN }}
${{ env.SHA_LINUX64_GNU_X86_BIN }}
${{ env.SHA_LINUX64_GNU_AARCH64_BIN }}
${{ env.SHA_LINUX64_GNU_ARMV7_BIN }}
${{ env.SHA_LINUX64_MUSL_X86_BIN }}
${{ env.SHA_LINUX64_MUSL_AARCH64_BIN }}
```

@ -7,6 +7,121 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
### Fixed
- Bug in `distant fs set-permissions` where partial permissions such as `go-w`
would result in clearing all permissions
- Bug in `distant-local` implementation of `SetPermissions` where read-only
status was being set/cleared prior to Unix permissions being applied,
resulting in applying an invalid change to the permissions
## [0.20.0]
All changes described in these alpha releases:
- [Alpha 13](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.13)
- [Alpha 12](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.12)
- [Alpha 11](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.11)
- [Alpha 10](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.10)
- [Alpha 9](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.9)
- [Alpha 8](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.8)
- [Alpha 7](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.7)
- [Alpha 6](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.6)
- [Alpha 5](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.5)
- [Alpha 4](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.4)
- [Alpha 3](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.3)
- [Alpha 2](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.2)
- [Alpha 1](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.1)
### Fixed
- When terminating a connection using `distant manager kill`, the connection is
now properly dropped, resulting servers waiting to terminate due to
`--shutdown lonely=N` to now shutdown accordingly
- Zombies from spawned servers via `distant launch manager://localhost` are now
properly terminated by checking the exit status of processes
## [0.20.0-alpha.13]
### Added
- Support for `--shell` with optional path to an explicit shell as an option
when executing `distant spawn` in order to run the command within a shell
rather than directly
- `semver` crate to be used for version information in protocol and manager
- `is_compatible_with` function to root of `distant-protocol` crate that checks
if a provided version is compatible with the protocol
### Changed
- `distant_protocol::PROTOCOL_VERSION` now uses the crate's major, minor, and
patch version at compile-time (parsed via `const-str` crate) to streamline
version handling between crate and protocol
- Protocol and manager now supply a version request instead of capabilities and
the capabilities of protocol are now a `Vec<String>` to contain a set of more
broad capabilities instead of every possible request type
### Fixed
- CLI commands like `distant manager select` will now output errors in a JSON
format when configured to communicate using JSON
- `distant-ssh2` no longer caches the remote family globally, but instead
caches it per `Ssh` instance
### Removed
- `Cmd::program` and `Cmd::arguments` functions as they were misleading (didn't
do what `distant-local` or `distant-ssh2` do)
- Removed `Capability` and `Capabilities` from protocol and manager
## [0.20.0-alpha.12]
### Changed
- Minimum Rust version is now `1.70.0` due to bump in `grep-cli` minimum
requirement. This technically applied to v0.20.0-alpha.11, but wasn't caught
until the dependency updated
### Fixed
- `distant --help` will now return exit code of 0
- `distant --version` will now return exit code of 0
## [0.20.0-alpha.11]
### Added
- CLI now supports `-c <STR>` and `--cmd <STR>` to use a given string as the
command as an alternative to `-- <CMD> <ARG> <ARG>`
- Add build for FreeBSD
### Changed
- Cli no longer uses `-c` as shorthand for specifying a config file
- `--file` option for generating completion has been renamed to `--output`
- CLI command to generate config files now defaults to printing to stdout with
`--output` providing the option to write to a file
- Artifacts built now use format of `distant-<TRIPLE>`
## [0.20.0-alpha.10]
### Added
- `use_hidden`, `use_ignore_files`, `use_parent_ignore_files`,
`use_git_ignore`, `use_global_git_ignore`, and `use_git_exclude` as new
options for searching
### Changed
- Searching now disables all standard filters by default with re-introducing
the ability to set the filters by individual options
### Fixed
- Failing to start a search will no longer cause the search task to exit when
using the local server, which would result in no more searches being able to
be executed
## [0.20.0-alpha.9]
### Added
@ -528,7 +643,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
pending upon full channel and no longer locks up
- stdout, stderr, and stdin of `RemoteProcess` no longer cause deadlock
[Unreleased]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.9...HEAD
[Unreleased]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.13...HEAD
[0.20.0-alpha.13]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.12...v0.20.0-alpha.13
[0.20.0-alpha.12]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.11...v0.20.0-alpha.12
[0.20.0-alpha.11]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.10...v0.20.0-alpha.11
[0.20.0-alpha.10]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.9...v0.20.0-alpha.10
[0.20.0-alpha.9]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.8...v0.20.0-alpha.9
[0.20.0-alpha.8]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.7...v0.20.0-alpha.8
[0.20.0-alpha.7]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.6...v0.20.0-alpha.7

28
Cargo.lock generated

@ -571,6 +571,12 @@ version = "0.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913"
[[package]]
name = "const-str"
version = "0.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aca749d3d3f5b87a0d6100509879f9cf486ab510803a4a4e1001da1ff61c2bd6"
[[package]]
name = "convert_case"
version = "0.4.0"
@ -807,7 +813,7 @@ dependencies = [
[[package]]
name = "distant"
version = "0.20.0-alpha.9"
version = "0.20.0"
dependencies = [
"anyhow",
"assert_cmd",
@ -844,6 +850,7 @@ dependencies = [
"test-log",
"tokio",
"toml_edit",
"typed-path",
"which",
"whoami",
"windows-service",
@ -852,7 +859,7 @@ dependencies = [
[[package]]
name = "distant-auth"
version = "0.20.0-alpha.9"
version = "0.20.0"
dependencies = [
"async-trait",
"derive_more",
@ -865,7 +872,7 @@ dependencies = [
[[package]]
name = "distant-core"
version = "0.20.0-alpha.9"
version = "0.20.0"
dependencies = [
"async-trait",
"bitflags 2.3.1",
@ -891,7 +898,7 @@ dependencies = [
[[package]]
name = "distant-local"
version = "0.20.0-alpha.9"
version = "0.20.0"
dependencies = [
"assert_fs",
"async-trait",
@ -919,11 +926,12 @@ dependencies = [
[[package]]
name = "distant-net"
version = "0.20.0-alpha.9"
version = "0.20.0"
dependencies = [
"async-trait",
"bytes",
"chacha20poly1305",
"const-str",
"derive_more",
"distant-auth",
"dyn-clone",
@ -937,6 +945,7 @@ dependencies = [
"rand",
"rmp",
"rmp-serde",
"semver 1.0.17",
"serde",
"serde_bytes",
"serde_json",
@ -949,13 +958,15 @@ dependencies = [
[[package]]
name = "distant-protocol"
version = "0.20.0-alpha.9"
version = "0.20.0"
dependencies = [
"bitflags 2.3.1",
"const-str",
"derive_more",
"regex",
"rmp",
"rmp-serde",
"semver 1.0.17",
"serde",
"serde_bytes",
"serde_json",
@ -964,7 +975,7 @@ dependencies = [
[[package]]
name = "distant-ssh2"
version = "0.20.0-alpha.9"
version = "0.20.0"
dependencies = [
"anyhow",
"assert_fs",
@ -2768,6 +2779,9 @@ name = "semver"
version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed"
dependencies = [
"serde",
]
[[package]]
name = "semver-parser"

@ -3,7 +3,7 @@ name = "distant"
description = "Operate on a remote computer through file and process manipulation"
categories = ["command-line-utilities"]
keywords = ["cli"]
version = "0.20.0-alpha.9"
version = "0.20.0"
authors = ["Chip Senkbeil <chip@senkbeil.org>"]
edition = "2021"
homepage = "https://github.com/chipsenkbeil/distant"
@ -25,6 +25,7 @@ members = [
opt-level = 'z'
lto = true
codegen-units = 1
strip = true
[features]
default = ["libssh", "ssh2"]
@ -39,8 +40,8 @@ clap_complete = "4.3.0"
config = { version = "0.13.3", default-features = false, features = ["toml"] }
derive_more = { version = "0.99.17", default-features = false, features = ["display", "from", "error", "is_variant"] }
dialoguer = { version = "0.10.4", default-features = false }
distant-core = { version = "=0.20.0-alpha.9", path = "distant-core" }
distant-local = { version = "=0.20.0-alpha.9", path = "distant-local" }
distant-core = { version = "=0.20.0", path = "distant-core" }
distant-local = { version = "=0.20.0", path = "distant-local" }
directories = "5.0.1"
file-mode = "0.1.2"
flexi_logger = "0.25.5"
@ -58,12 +59,13 @@ tokio = { version = "1.28.2", features = ["full"] }
toml_edit = { version = "0.19.10", features = ["serde"] }
terminal_size = "0.2.6"
termwiz = "0.20.0"
typed-path = "0.3.2"
which = "4.4.0"
winsplit = "0.1.0"
whoami = "1.4.0"
# Optional native SSH functionality
distant-ssh2 = { version = "=0.20.0-alpha.9", path = "distant-ssh2", default-features = false, features = ["serde"], optional = true }
distant-ssh2 = { version = "=0.20.0", path = "distant-ssh2", default-features = false, features = ["serde"], optional = true }
[target.'cfg(unix)'.dependencies]
fork = "0.1.21"

@ -1,6 +1,11 @@
# distant - remotely edit files and run programs
<h1 align="center">
<img src="https://distant.dev/assets/images/distant-with-logo-300x87.png" alt="Distant">
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![CI][distant_ci_img]][distant_ci_lnk] [![RustC 1.68+][distant_rustc_img]][distant_rustc_lnk]
<a href="https://distant.dev/">Documentation</a> |
<a href="https://github.com/chipsenkbeil/distant/discussions">Discussion</a>
</h1>
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![CI][distant_ci_img]][distant_ci_lnk] [![RustC 1.70+][distant_rustc_img]][distant_rustc_lnk]
[distant_crates_img]: https://img.shields.io/crates/v/distant.svg
[distant_crates_lnk]: https://crates.io/crates/distant
@ -8,164 +13,52 @@
[distant_doc_lnk]: https://docs.rs/distant
[distant_ci_img]: https://github.com/chipsenkbeil/distant/actions/workflows/ci.yml/badge.svg
[distant_ci_lnk]: https://github.com/chipsenkbeil/distant/actions/workflows/ci.yml
[distant_rustc_img]: https://img.shields.io/badge/distant-rustc_1.68+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/03/09/Rust-1.68.0.html
[distant_rustc_img]: https://img.shields.io/badge/distant-rustc_1.70+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/06/01/Rust-1.70.0.html
🚧 **(Alpha stage software) This program is in rapid development and may break or change frequently!** 🚧
## Details
The `distant` binary supplies both a server and client component as well as
a command to start a server and configure the local client to be able to
talk to the server.
- Asynchronous in nature, powered by [`tokio`](https://tokio.rs/)
- Data is serialized to send across the wire via [`msgpack`](https://msgpack.org/)
- Encryption & authentication are handled via
[XChaCha20Poly1305](https://tools.ietf.org/html/rfc8439) for an authenticated
encryption scheme via
[RustCrypto/ChaCha20Poly1305](https://github.com/RustCrypto/AEADs/tree/master/chacha20poly1305)
Additionally, the core of the distant client and server codebase can be pulled
in to be used with your own Rust crates via the `distant-core` crate. The
networking library, which is agnostic of `distant` protocols, can be used via
the `distant-net` crate.
## Installation
### Prebuilt Binaries
If you would like a pre-built binary, check out the
[releases section](https://github.com/chipsenkbeil/distant/releases).
### Building from Source
### Unix
If you have [`cargo`](https://github.com/rust-lang/cargo) installed, you can
directly download and build the source via:
```sh
# Need to include -L to follow redirects as this returns 301
curl -L https://sh.distant.dev | sh
```bash
cargo install distant
# Can also use wget to the same result
wget -q -O- https://sh.distant.dev | sh
```
Alternatively, you can clone this repository and build from source following
the [build guide](./BUILDING.md).
## Backend Feature Matrix
Distant supports multiple backends to facilitate remote communication with
another server. Today, these backends include:
* `distant` - a standalone server acting as the reference implementation
* `ssh` - a wrapper around an `ssh` client that translates the distant protocol
into ssh server requests
Not every backend supports every feature of distant. Below is a table outlining
the available features and which backend supports each feature:
| Feature | distant | ssh |
| --------------------- | --------| ----|
| Filesystem I/O | ✅ | ✅ |
| Filesystem Watching | ✅ | ✅ |
| Process Execution | ✅ | ✅ |
| Reconnect | ✅ | ❌ |
| Search | ✅ | ❌ |
| System Information | ✅ | ⚠ |
| Version | ✅ | ✅ |
* ✅ means full support
* ⚠ means partial support
* ❌ means no support
### Feature Details
* `Filesystem I/O` - able to read from and write to the filesystem
* `Filesystem Watching` - able to receive notifications when changes to the
filesystem occur
* `Process Execution` - able to execute processes
* `Reconnect` - able to reconnect after network outages
* `Search` - able to search the filesystem
* `System Information` - able to retrieve information about the system
* `Version` - able to report back version information
## Example
### Starting the manager
In order to facilitate communication between a client and server, you first
need to start the manager. This can be done in one of two ways:
1. Leverage the `service` functionality to spawn the manager using one of the
following supported service management platforms:
- [`sc.exe`](https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-server-2012-r2-and-2012/cc754599(v=ws.11)) for use with [Window Service](https://en.wikipedia.org/wiki/Windows_service) (Windows)
- [Launchd](https://en.wikipedia.org/wiki/Launchd) (MacOS)
- [systemd](https://en.wikipedia.org/wiki/Systemd) (Linux)
- [OpenRC](https://en.wikipedia.org/wiki/OpenRC) (Linux)
- [rc.d](https://en.wikipedia.org/wiki/Init#Research_Unix-style/BSD-style) (FreeBSD)
2. Run the manager manually by using the `listen` subcommand
#### Service management
```bash
# If you want to install the manager as a service, you can use the service
# interface available directly from the CLI
#
# By default, this will install a system-level service, which means that you
# will need elevated permissions to both install AND communicate with the
# manager
distant manager service install
# If you want to maintain a user-level manager service, you can include the
# --user flag. Note that this is only supported on MacOS (via launchd) and
# Linux (via systemd)
distant manager service install --user
# ........
# Once you have installed the service, you will normally need to start it
# manually or restart your machine to trigger startup on boot
distant manager service start # --user if you are working with user-level
```
#### Manual start
See https://distant.dev/getting-started/installation/unix/ for more details.
```bash
# If you choose to run the manager without a service management platform, you
# can either run the manager in the foreground or provide --daemon to spawn and
# detach the manager
### Windows
# Run in the foreground
distant manager listen
# Detach the manager where it will not terminate even if the parent exits
distant manager listen --daemon
```powershell
Set-ExecutionPolicy RemoteSigned -Scope CurrentUser # Optional: Needed to run a remote script the first time
irm sh.distant.dev | iex
```
### Interacting with a remote machine
See https://distant.dev/getting-started/installation/windows/ for more details.
Once you have a manager listening for client requests, you can begin
interacting with the manager, spawn and/or connect to servers, and interact
with remote machines.
## Usage
```bash
# Connect to my.example.com on port 22 via SSH and start a distant server
distant client launch ssh://my.example.com
# After the connection is established, you can perform different operations
# on the remote machine via `distant client action {command} [args]`
distant client action copy path/to/file new/path/to/file
distant client action spawn -- echo 'Hello, this is from the other side'
```sh
# Start a manager in the background
distant manager listen --daemon
# Opening a shell to the remote machine is trivial
distant client shell
# SSH into a server, start distant, and connect to the distant server
distant launch ssh://example.com
# If you have more than one connection open, you can switch between active
# connections by using the `select` subcommand
distant client select '<ID>'
# Read the current working directory
distant fs read .
# For programmatic use, a REPL following the JSON API is available
distant client repl --format json
# Start a shell on the remote machine
distant shell
```
See https://distant.dev/getting-started/usage/ for more details.
## License
This project is licensed under either of

@ -3,7 +3,7 @@ name = "distant-auth"
description = "Authentication library for distant, providing various implementations"
categories = ["authentication"]
keywords = ["auth", "authentication", "async"]
version = "0.20.0-alpha.9"
version = "0.20.0"
authors = ["Chip Senkbeil <chip@senkbeil.org>"]
edition = "2021"
homepage = "https://github.com/chipsenkbeil/distant"

@ -1,13 +1,13 @@
# distant auth
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.68.0][distant_rustc_img]][distant_rustc_lnk]
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.70.0][distant_rustc_img]][distant_rustc_lnk]
[distant_crates_img]: https://img.shields.io/crates/v/distant-auth.svg
[distant_crates_lnk]: https://crates.io/crates/distant-auth
[distant_doc_img]: https://docs.rs/distant-auth/badge.svg
[distant_doc_lnk]: https://docs.rs/distant-auth
[distant_rustc_img]: https://img.shields.io/badge/distant_auth-rustc_1.68+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/03/09/Rust-1.68.0.html
[distant_rustc_img]: https://img.shields.io/badge/distant_auth-rustc_1.70+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/06/01/Rust-1.70.0.html
## Details

@ -3,7 +3,7 @@ name = "distant-core"
description = "Core library for distant, enabling operation on a remote computer through file and process manipulation"
categories = ["network-programming"]
keywords = ["api", "async"]
version = "0.20.0-alpha.9"
version = "0.20.0"
authors = ["Chip Senkbeil <chip@senkbeil.org>"]
edition = "2021"
homepage = "https://github.com/chipsenkbeil/distant"
@ -16,8 +16,8 @@ async-trait = "0.1.68"
bitflags = "2.3.1"
bytes = "1.4.0"
derive_more = { version = "0.99.17", default-features = false, features = ["as_mut", "as_ref", "deref", "deref_mut", "display", "from", "error", "into", "into_iterator", "is_variant", "try_into"] }
distant-net = { version = "=0.20.0-alpha.9", path = "../distant-net" }
distant-protocol = { version = "=0.20.0-alpha.9", path = "../distant-protocol" }
distant-net = { version = "=0.20.0", path = "../distant-net" }
distant-protocol = { version = "=0.20.0", path = "../distant-protocol" }
futures = "0.3.28"
hex = "0.4.3"
log = "0.4.18"

@ -1,13 +1,13 @@
# distant core
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.68.0][distant_rustc_img]][distant_rustc_lnk]
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.70.0][distant_rustc_img]][distant_rustc_lnk]
[distant_crates_img]: https://img.shields.io/crates/v/distant-core.svg
[distant_crates_lnk]: https://crates.io/crates/distant-core
[distant_doc_img]: https://docs.rs/distant-core/badge.svg
[distant_doc_lnk]: https://docs.rs/distant-core
[distant_rustc_img]: https://img.shields.io/badge/distant_core-rustc_1.68+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/03/09/Rust-1.68.0.html
[distant_rustc_img]: https://img.shields.io/badge/distant_core-rustc_1.70+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/06/01/Rust-1.70.0.html
## Details

@ -44,8 +44,12 @@ pub trait DistantChannelExt {
/// Creates a remote directory, optionally creating all parent components if specified
fn create_dir(&mut self, path: impl Into<PathBuf>, all: bool) -> AsyncReturn<'_, ()>;
/// Checks whether the `path` exists on the remote machine
fn exists(&mut self, path: impl Into<PathBuf>) -> AsyncReturn<'_, bool>;
/// Checks whether this client is compatible with the remote server
fn is_compatible(&mut self) -> AsyncReturn<'_, bool>;
/// Retrieves metadata about a path on a remote machine
fn metadata(
&mut self,
@ -136,6 +140,9 @@ pub trait DistantChannelExt {
/// Retrieves server version information
fn version(&mut self) -> AsyncReturn<'_, Version>;
/// Returns version of protocol that the client uses
fn protocol_version(&self) -> protocol::semver::Version;
/// Writes a remote file with the data from a collection of bytes
fn write_file(
&mut self,
@ -232,6 +239,15 @@ impl DistantChannelExt
)
}
fn is_compatible(&mut self) -> AsyncReturn<'_, bool> {
make_body!(self, protocol::Request::Version {}, |data| match data {
protocol::Response::Version(version) =>
Ok(protocol::is_compatible_with(&version.protocol_version)),
protocol::Response::Error(x) => Err(io::Error::from(x)),
_ => Err(mismatched_response()),
})
}
fn metadata(
&mut self,
path: impl Into<PathBuf>,
@ -453,6 +469,10 @@ impl DistantChannelExt
})
}
fn protocol_version(&self) -> protocol::semver::Version {
protocol::PROTOCOL_VERSION
}
fn write_file(
&mut self,
path: impl Into<PathBuf>,

@ -7,8 +7,9 @@ use distant_core::{
};
use distant_net::auth::{DummyAuthHandler, Verifier};
use distant_net::client::Client;
use distant_net::common::{InmemoryTransport, OneshotListener};
use distant_net::common::{InmemoryTransport, OneshotListener, Version};
use distant_net::server::{Server, ServerRef};
use distant_protocol::PROTOCOL_VERSION;
/// Stands up an inmemory client and server using the given api.
async fn setup(api: impl DistantApi + Send + Sync + 'static) -> (DistantClient, ServerRef) {
@ -17,12 +18,22 @@ async fn setup(api: impl DistantApi + Send + Sync + 'static) -> (DistantClient,
let server = Server::new()
.handler(DistantApiServerHandler::new(api))
.verifier(Verifier::none())
.version(Version::new(
PROTOCOL_VERSION.major,
PROTOCOL_VERSION.minor,
PROTOCOL_VERSION.patch,
))
.start(OneshotListener::from_value(t2))
.expect("Failed to start server");
let client: DistantClient = Client::build()
.auth_handler(DummyAuthHandler)
.connector(t1)
.version(Version::new(
PROTOCOL_VERSION.major,
PROTOCOL_VERSION.minor,
PROTOCOL_VERSION.patch,
))
.connect()
.await
.expect("Failed to connect to server");

@ -2,7 +2,7 @@
name = "distant-local"
description = "Library implementing distant API for local interactions"
categories = ["network-programming"]
version = "0.20.0-alpha.9"
version = "0.20.0"
authors = ["Chip Senkbeil <chip@senkbeil.org>"]
edition = "2021"
homepage = "https://github.com/chipsenkbeil/distant"
@ -21,7 +21,7 @@ macos-kqueue = ["notify/macos_kqueue"]
[dependencies]
async-trait = "0.1.68"
distant-core = { version = "=0.20.0-alpha.9", path = "../distant-core" }
distant-core = { version = "=0.20.0", path = "../distant-core" }
grep = "0.2.12"
ignore = "0.4.20"
log = "0.4.18"

@ -1,13 +1,13 @@
# distant local
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.68.0][distant_rustc_img]][distant_rustc_lnk]
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.70.0][distant_rustc_img]][distant_rustc_lnk]
[distant_crates_img]: https://img.shields.io/crates/v/distant-local.svg
[distant_crates_lnk]: https://crates.io/crates/distant-local
[distant_doc_img]: https://docs.rs/distant-local/badge.svg
[distant_doc_lnk]: https://docs.rs/distant-local
[distant_rustc_img]: https://img.shields.io/badge/distant_local-rustc_1.68+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/03/09/Rust-1.68.0.html
[distant_rustc_img]: https://img.shields.io/badge/distant_local-rustc_1.70+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/06/01/Rust-1.70.0.html
## Details

@ -3,10 +3,10 @@ use std::time::SystemTime;
use std::{env, io};
use async_trait::async_trait;
use distant_core::protocol::semver;
use distant_core::protocol::{
Capabilities, ChangeKind, ChangeKindSet, DirEntry, Environment, FileType, Metadata,
Permissions, ProcessId, PtySize, SearchId, SearchQuery, SetPermissionsOptions, SystemInfo,
Version, PROTOCOL_VERSION,
ChangeKind, ChangeKindSet, DirEntry, Environment, FileType, Metadata, Permissions, ProcessId,
PtySize, SearchId, SearchQuery, SetPermissionsOptions, SystemInfo, Version, PROTOCOL_VERSION,
};
use distant_core::{DistantApi, DistantCtx};
use ignore::{DirEntry as WalkDirEntry, WalkBuilder};
@ -451,9 +451,11 @@ impl DistantApi for Api {
})?
.permissions();
// Apply the readonly flag for all platforms
if let Some(readonly) = permissions.is_readonly() {
std_permissions.set_readonly(readonly);
// Apply the readonly flag for all platforms but junix
if !cfg!(unix) {
if let Some(readonly) = permissions.is_readonly() {
std_permissions.set_readonly(readonly);
}
}
// On Unix platforms, we can apply a bitset change
@ -462,7 +464,9 @@ impl DistantApi for Api {
use std::os::unix::prelude::*;
let mut current = Permissions::from(std_permissions.clone());
current.apply_from(permissions);
std_permissions.set_mode(current.to_unix_mode());
let mode = current.to_unix_mode();
std_permissions.set_mode(mode);
}
Ok(std_permissions)
@ -635,10 +639,32 @@ impl DistantApi for Api {
async fn version(&self, ctx: DistantCtx) -> io::Result<Version> {
debug!("[Conn {}] Querying version", ctx.connection_id);
// Parse our server's version
let mut server_version: semver::Version = env!("CARGO_PKG_VERSION")
.parse()
.map_err(|x| io::Error::new(io::ErrorKind::Other, x))?;
// Add the package name to the version information
if server_version.build.is_empty() {
server_version.build = semver::BuildMetadata::new(env!("CARGO_PKG_NAME"))
.map_err(|x| io::Error::new(io::ErrorKind::Other, x))?;
} else {
let raw_build_str = format!(
"{}.{}",
server_version.build.as_str(),
env!("CARGO_PKG_NAME")
);
server_version.build = semver::BuildMetadata::new(&raw_build_str)
.map_err(|x| io::Error::new(io::ErrorKind::Other, x))?;
}
Ok(Version {
server_version: format!("{} {}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION")),
server_version,
protocol_version: PROTOCOL_VERSION,
capabilities: Capabilities::all(),
capabilities: Version::capabilities()
.iter()
.map(ToString::to_string)
.collect(),
})
}
}

@ -85,6 +85,7 @@ impl ProcessInstance {
let args = cmd_and_args.split_off(1);
let cmd = cmd_and_args.into_iter().next().unwrap();
debug!("Spawning process: {cmd} {args:?}");
let mut child: Box<dyn Process> = match pty {
Some(size) => Box::new(PtyProcess::spawn(
cmd.clone(),

@ -137,7 +137,11 @@ async fn search_task(tx: mpsc::Sender<InnerSearchMsg>, mut rx: mpsc::Receiver<In
Ok(executor) => executor,
Err(x) => {
let _ = cb.send(Err(x));
return;
// NOTE: We do not want to exit our task! This processes all of our search
// requests, so if we exit, things have gone terrible. This is just a
// regular error, so we merely continue to wait for the next request.
continue;
}
};
@ -341,6 +345,13 @@ impl SearchQueryExecutor {
.build()
.map_err(|x| io::Error::new(io::ErrorKind::Other, x))?,
)
.standard_filters(false)
.hidden(query.options.ignore_hidden)
.ignore(query.options.use_ignore_files)
.parents(query.options.use_parent_ignore_files)
.git_ignore(query.options.use_git_ignore_files)
.git_global(query.options.use_global_git_ignore_files)
.git_exclude(query.options.use_git_exclude_files)
.skip_stdout(true);
if query.options.upward {

@ -3,7 +3,7 @@ name = "distant-net"
description = "Network library for distant, providing implementations to support client/server architecture"
categories = ["network-programming"]
keywords = ["api", "async"]
version = "0.20.0-alpha.9"
version = "0.20.0"
authors = ["Chip Senkbeil <chip@senkbeil.org>"]
edition = "2021"
homepage = "https://github.com/chipsenkbeil/distant"
@ -15,8 +15,9 @@ license = "MIT OR Apache-2.0"
async-trait = "0.1.68"
bytes = "1.4.0"
chacha20poly1305 = "0.10.1"
const-str = "0.5.6"
derive_more = { version = "0.99.17", default-features = false, features = ["as_mut", "as_ref", "deref", "deref_mut", "display", "from", "error", "into", "into_iterator", "is_variant", "try_into"] }
distant-auth = { version = "=0.20.0-alpha.9", path = "../distant-auth" }
distant-auth = { version = "=0.20.0", path = "../distant-auth" }
dyn-clone = "1.0.11"
flate2 = "1.0.26"
hex = "0.4.3"
@ -28,6 +29,7 @@ rand = { version = "0.8.5", features = ["getrandom"] }
rmp = "0.8.11"
rmp-serde = "1.1.1"
sha2 = "0.10.6"
semver = { version = "1.0.17", features = ["serde"] }
serde = { version = "1.0.163", features = ["derive"] }
serde_bytes = "0.11.9"
serde_json = "1.0.96"
@ -35,7 +37,7 @@ strum = { version = "0.24.1", features = ["derive"] }
tokio = { version = "1.28.2", features = ["full"] }
[dev-dependencies]
distant-auth = { version = "=0.20.0-alpha.9", path = "../distant-auth", features = ["tests"] }
distant-auth = { version = "=0.20.0", path = "../distant-auth", features = ["tests"] }
env_logger = "0.10.0"
serde_json = "1.0.96"
tempfile = "3.5.0"

@ -1,13 +1,13 @@
# distant net
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.68.0][distant_rustc_img]][distant_rustc_lnk]
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.70.0][distant_rustc_img]][distant_rustc_lnk]
[distant_crates_img]: https://img.shields.io/crates/v/distant-net.svg
[distant_crates_lnk]: https://crates.io/crates/distant-net
[distant_doc_img]: https://docs.rs/distant-net/badge.svg
[distant_doc_lnk]: https://docs.rs/distant-net
[distant_rustc_img]: https://img.shields.io/badge/distant_net-rustc_1.68+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/03/09/Rust-1.68.0.html
[distant_rustc_img]: https://img.shields.io/badge/distant_net-rustc_1.70+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/06/01/Rust-1.70.0.html
## Details

@ -20,7 +20,7 @@ pub use windows::*;
use super::ClientConfig;
use crate::client::{Client, UntypedClient};
use crate::common::{Connection, Transport};
use crate::common::{Connection, Transport, Version};
/// Interface that performs the connection to produce a [`Transport`] for use by the [`Client`].
#[async_trait]
@ -46,6 +46,7 @@ pub struct ClientBuilder<H, C> {
connector: C,
config: ClientConfig,
connect_timeout: Option<Duration>,
version: Version,
}
impl<H, C> ClientBuilder<H, C> {
@ -56,6 +57,7 @@ impl<H, C> ClientBuilder<H, C> {
config: self.config,
connector: self.connector,
connect_timeout: self.connect_timeout,
version: self.version,
}
}
@ -66,6 +68,7 @@ impl<H, C> ClientBuilder<H, C> {
config,
connector: self.connector,
connect_timeout: self.connect_timeout,
version: self.version,
}
}
@ -76,6 +79,7 @@ impl<H, C> ClientBuilder<H, C> {
config: self.config,
connector,
connect_timeout: self.connect_timeout,
version: self.version,
}
}
@ -86,6 +90,18 @@ impl<H, C> ClientBuilder<H, C> {
config: self.config,
connector: self.connector,
connect_timeout: connect_timeout.into(),
version: self.version,
}
}
/// Configure the version of the client.
pub fn version(self, version: Version) -> Self {
Self {
auth_handler: self.auth_handler,
config: self.config,
connector: self.connector,
connect_timeout: self.connect_timeout,
version,
}
}
}
@ -97,6 +113,7 @@ impl ClientBuilder<(), ()> {
config: Default::default(),
connector: (),
connect_timeout: None,
version: Default::default(),
}
}
}
@ -119,6 +136,7 @@ where
let auth_handler = self.auth_handler;
let config = self.config;
let connect_timeout = self.connect_timeout;
let version = self.version;
let f = async move {
let transport = match connect_timeout {
@ -128,7 +146,7 @@ where
.and_then(convert::identity)?,
None => self.connector.connect().await?,
};
let connection = Connection::client(transport, auth_handler).await?;
let connection = Connection::client(transport, auth_handler, version).await?;
Ok(UntypedClient::spawn(connection, config))
};

@ -9,6 +9,7 @@ mod packet;
mod port;
mod transport;
pub(crate) mod utils;
mod version;
pub use any::*;
pub(crate) use connection::Connection;
@ -21,3 +22,4 @@ pub use map::*;
pub use packet::*;
pub use port::*;
pub use transport::*;
pub use version::*;

@ -11,6 +11,7 @@ use tokio::sync::oneshot;
use crate::common::InmemoryTransport;
use crate::common::{
Backup, FramedTransport, HeapSecretKey, Keychain, KeychainResult, Reconnectable, Transport,
TransportExt, Version,
};
/// Id of the connection
@ -110,6 +111,19 @@ where
debug!("[Conn {id}] Re-establishing connection");
Reconnectable::reconnect(transport).await?;
// Wait for exactly version bytes (24 where 8 bytes for major, minor, patch)
// but with a reconnect we don't actually validate it because we did that
// the first time we connected
//
// NOTE: We do this with the raw transport and not the framed version!
debug!("[Conn {id}] Waiting for server version");
if transport.as_mut_inner().read_exact(&mut [0u8; 24]).await? != 24 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Wrong version byte len received",
));
}
// Perform a handshake to ensure that the connection is properly established and encrypted
debug!("[Conn {id}] Performing handshake");
transport.client_handshake().await?;
@ -190,13 +204,42 @@ where
/// Transforms a raw [`Transport`] into an established [`Connection`] from the client-side by
/// performing the following:
///
/// 1. Handshakes to derive the appropriate [`Codec`](crate::Codec) to use
/// 2. Authenticates the established connection to ensure it is valid
/// 3. Restores pre-existing state using the provided backup, replaying any missing frames and
/// 1. Performs a version check with the server
/// 2. Handshakes to derive the appropriate [`Codec`](crate::Codec) to use
/// 3. Authenticates the established connection to ensure it is valid
/// 4. Restores pre-existing state using the provided backup, replaying any missing frames and
/// receiving any frames from the other side
pub async fn client<H: AuthHandler + Send>(transport: T, handler: H) -> io::Result<Self> {
pub async fn client<H: AuthHandler + Send>(
transport: T,
handler: H,
version: Version,
) -> io::Result<Self> {
let id: ConnectionId = rand::random();
// Wait for exactly version bytes (24 where 8 bytes for major, minor, patch)
debug!("[Conn {id}] Waiting for server version");
let mut version_bytes = [0u8; 24];
if transport.read_exact(&mut version_bytes).await? != 24 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Wrong version byte len received",
));
}
// Compare versions for compatibility and drop the connection if incompatible
let server_version = Version::from_be_bytes(version_bytes);
debug!(
"[Conn {id}] Checking compatibility between client {version} & server {server_version}"
);
if !version.is_compatible_with(&server_version) {
return Err(io::Error::new(
io::ErrorKind::Other,
format!(
"Client version {version} is incompatible with server version {server_version}"
),
));
}
// Perform a handshake to ensure that the connection is properly established and encrypted
debug!("[Conn {id}] Performing handshake");
let mut transport: FramedTransport<T> =
@ -238,19 +281,25 @@ where
/// Transforms a raw [`Transport`] into an established [`Connection`] from the server-side by
/// performing the following:
///
/// 1. Handshakes to derive the appropriate [`Codec`](crate::Codec) to use
/// 2. Authenticates the established connection to ensure it is valid by either using the
/// 1. Performs a version check with the client
/// 2. Handshakes to derive the appropriate [`Codec`](crate::Codec) to use
/// 3. Authenticates the established connection to ensure it is valid by either using the
/// given `verifier` or, if working with an existing client connection, will validate an OTP
/// from our database
/// 3. Restores pre-existing state using the provided backup, replaying any missing frames and
/// 4. Restores pre-existing state using the provided backup, replaying any missing frames and
/// receiving any frames from the other side
pub async fn server(
transport: T,
verifier: &Verifier,
keychain: Keychain<oneshot::Receiver<Backup>>,
version: Version,
) -> io::Result<Self> {
let id: ConnectionId = rand::random();
// Write the version as bytes
debug!("[Conn {id}] Sending version {version}");
transport.write_all(&version.to_be_bytes()).await?;
// Perform a handshake to ensure that the connection is properly established and encrypted
debug!("[Conn {id}] Performing handshake");
let mut transport: FramedTransport<T> =
@ -464,6 +513,60 @@ mod tests {
use super::*;
use crate::common::Frame;
macro_rules! server_version {
() => {
Version::new(1, 2, 3)
};
}
macro_rules! send_server_version {
($transport:expr, $version:expr) => {{
($transport)
.as_mut_inner()
.write_all(&$version.to_be_bytes())
.await
.unwrap();
}};
($transport:expr) => {
send_server_version!($transport, server_version!());
};
}
macro_rules! receive_version {
($transport:expr) => {{
let mut bytes = [0u8; 24];
assert_eq!(
($transport)
.as_mut_inner()
.read_exact(&mut bytes)
.await
.unwrap(),
24,
"Wrong version len received"
);
Version::from_be_bytes(bytes)
}};
}
#[test(tokio::test)]
async fn client_should_fail_when_server_sends_incompatible_version() {
let (mut t1, t2) = FramedTransport::pair(100);
// Spawn a task to perform the client connection so we don't deadlock while simulating the
// server actions on the other side
let task = tokio::spawn(async move {
Connection::client(t2.into_inner(), DummyAuthHandler, Version::new(1, 2, 3))
.await
.unwrap()
});
// Send invalid version to fail the handshake
send_server_version!(t1, Version::new(2, 0, 0));
// Client should fail
task.await.unwrap_err();
}
#[test(tokio::test)]
async fn client_should_fail_if_codec_handshake_fails() {
let (mut t1, t2) = FramedTransport::pair(100);
@ -471,11 +574,14 @@ mod tests {
// Spawn a task to perform the client connection so we don't deadlock while simulating the
// server actions on the other side
let task = tokio::spawn(async move {
Connection::client(t2.into_inner(), DummyAuthHandler)
Connection::client(t2.into_inner(), DummyAuthHandler, server_version!())
.await
.unwrap()
});
// Send server version for client to confirm
send_server_version!(t1);
// Send garbage to fail the handshake
t1.write_frame(Frame::new(b"invalid")).await.unwrap();
@ -490,11 +596,14 @@ mod tests {
// Spawn a task to perform the client connection so we don't deadlock while simulating the
// server actions on the other side
let task = tokio::spawn(async move {
Connection::client(t2.into_inner(), DummyAuthHandler)
Connection::client(t2.into_inner(), DummyAuthHandler, server_version!())
.await
.unwrap()
});
// Send server version for client to confirm
send_server_version!(t1);
// Perform first step of connection by establishing the codec
t1.server_handshake().await.unwrap();
@ -519,11 +628,14 @@ mod tests {
// Spawn a task to perform the client connection so we don't deadlock while simulating the
// server actions on the other side
let task = tokio::spawn(async move {
Connection::client(t2.into_inner(), DummyAuthHandler)
Connection::client(t2.into_inner(), DummyAuthHandler, server_version!())
.await
.unwrap()
});
// Send server version for client to confirm
send_server_version!(t1);
// Perform first step of connection by establishing the codec
t1.server_handshake().await.unwrap();
@ -559,11 +671,14 @@ mod tests {
// Spawn a task to perform the client connection so we don't deadlock while simulating the
// server actions on the other side
let task = tokio::spawn(async move {
Connection::client(t2.into_inner(), DummyAuthHandler)
Connection::client(t2.into_inner(), DummyAuthHandler, server_version!())
.await
.unwrap()
});
// Send server version for client to confirm
send_server_version!(t1);
// Perform first step of connection by establishing the codec
t1.server_handshake().await.unwrap();
@ -597,11 +712,14 @@ mod tests {
// Spawn a task to perform the client connection so we don't deadlock while simulating the
// server actions on the other side
let task = tokio::spawn(async move {
Connection::client(t2.into_inner(), DummyAuthHandler)
Connection::client(t2.into_inner(), DummyAuthHandler, server_version!())
.await
.unwrap()
});
// Send server version for client to confirm
send_server_version!(t1);
// Perform first step of connection by establishing the codec
t1.server_handshake().await.unwrap();
@ -629,6 +747,30 @@ mod tests {
assert_eq!(client.otp(), Some(&otp));
}
#[test(tokio::test)]
async fn server_should_fail_if_client_drops_due_to_version() {
let (mut t1, t2) = FramedTransport::pair(100);
let verifier = Verifier::none();
let keychain = Keychain::new();
// Spawn a task to perform the server connection so we don't deadlock while simulating the
// client actions on the other side
let task = tokio::spawn(async move {
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
});
// Receive the version from the server
let _ = receive_version!(t1);
// Drop client connection as a result of an "incompatible version"
drop(t1);
// Server should fail
task.await.unwrap_err();
}
#[test(tokio::test)]
async fn server_should_fail_if_codec_handshake_fails() {
let (mut t1, t2) = FramedTransport::pair(100);
@ -638,11 +780,14 @@ mod tests {
// Spawn a task to perform the server connection so we don't deadlock while simulating the
// client actions on the other side
let task = tokio::spawn(async move {
Connection::server(t2.into_inner(), &verifier, keychain)
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
});
// Receive the version from the server
let _ = receive_version!(t1);
// Send garbage to fail the handshake
t1.write_frame(Frame::new(b"invalid")).await.unwrap();
@ -659,11 +804,14 @@ mod tests {
// Spawn a task to perform the server connection so we don't deadlock while simulating the
// client actions on the other side
let task = tokio::spawn(async move {
Connection::server(t2.into_inner(), &verifier, keychain)
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
});
// Receive the version from the server
let _ = receive_version!(t1);
// Perform first step of completing client-side of handshake
t1.client_handshake().await.unwrap();
@ -683,11 +831,14 @@ mod tests {
// Spawn a task to perform the server connection so we don't deadlock while simulating the
// client actions on the other side
let task = tokio::spawn(async move {
Connection::server(t2.into_inner(), &verifier, keychain)
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
});
// Receive the version from the server
let _ = receive_version!(t1);
// Perform first step of completing client-side of handshake
t1.client_handshake().await.unwrap();
@ -717,11 +868,14 @@ mod tests {
// Spawn a task to perform the server connection so we don't deadlock while simulating the
// client actions on the other side
let task = tokio::spawn(async move {
Connection::server(t2.into_inner(), &verifier, keychain)
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
});
// Receive the version from the server
let _ = receive_version!(t1);
// Perform first step of completing client-side of handshake
t1.client_handshake().await.unwrap();
@ -750,11 +904,14 @@ mod tests {
// Spawn a task to perform the server connection so we don't deadlock while simulating the
// client actions on the other side
let task = tokio::spawn(async move {
Connection::server(t2.into_inner(), &verifier, keychain)
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
});
// Receive the version from the server
let _ = receive_version!(t1);
// Perform first step of completing client-side of handshake
t1.client_handshake().await.unwrap();
@ -790,11 +947,14 @@ mod tests {
// Spawn a task to perform the server connection so we don't deadlock while simulating the
// client actions on the other side
let task = tokio::spawn(async move {
Connection::server(t2.into_inner(), &verifier, keychain)
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
});
// Receive the version from the server
let _ = receive_version!(t1);
// Perform first step of completing client-side of handshake
t1.client_handshake().await.unwrap();
@ -828,11 +988,14 @@ mod tests {
// Spawn a task to perform the server connection so we don't deadlock while simulating the
// client actions on the other side
let task = tokio::spawn(async move {
Connection::server(t2.into_inner(), &verifier, keychain)
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
});
// Receive the version from the server
let _ = receive_version!(t1);
// Perform first step of completing client-side of handshake
t1.client_handshake().await.unwrap();
@ -866,11 +1029,14 @@ mod tests {
// Spawn a task to perform the server connection so we don't deadlock while simulating the
// client actions on the other side
let task = tokio::spawn(async move {
Connection::server(t2.into_inner(), &verifier, keychain)
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
});
// Receive the version from the server
let _ = receive_version!(t1);
// Perform first step of completing client-side of handshake
t1.client_handshake().await.unwrap();
@ -904,12 +1070,15 @@ mod tests {
let task = tokio::spawn({
let keychain = keychain.clone();
async move {
Connection::server(t2.into_inner(), &verifier, keychain)
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
}
});
// Receive the version from the server
let _ = receive_version!(t1);
// Perform first step of completing client-side of handshake
t1.client_handshake().await.unwrap();
@ -969,12 +1138,15 @@ mod tests {
let task = tokio::spawn({
let keychain = keychain.clone();
async move {
Connection::server(t2.into_inner(), &verifier, keychain)
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
}
});
// Receive the version from the server
let _ = receive_version!(t1);
// Perform first step of completing client-side of handshake
t1.client_handshake().await.unwrap();
@ -1029,13 +1201,13 @@ mod tests {
// Spawn a task to perform the server connection so we don't deadlock
let task = tokio::spawn(async move {
Connection::server(t2, &verifier, keychain)
Connection::server(t2, &verifier, keychain, server_version!())
.await
.expect("Failed to connect from server")
});
// Perform the client-side of the connection
let mut client = Connection::client(t1, DummyAuthHandler)
let mut client = Connection::client(t1, DummyAuthHandler, server_version!())
.await
.expect("Failed to connect from client");
let mut server = task.await.unwrap();
@ -1063,14 +1235,14 @@ mod tests {
let verifier = Arc::clone(&verifier);
let keychain = keychain.clone();
tokio::spawn(async move {
Connection::server(t2, &verifier, keychain)
Connection::server(t2, &verifier, keychain, server_version!())
.await
.expect("Failed to connect from server")
})
};
// Perform the client-side of the connection
let mut client = Connection::client(t1, DummyAuthHandler)
let mut client = Connection::client(t1, DummyAuthHandler, server_version!())
.await
.expect("Failed to connect from client");
@ -1093,6 +1265,9 @@ mod tests {
// Spawn a task to perform the client reconnection so we don't deadlock
let task = tokio::spawn(async move { client.reconnect().await.unwrap() });
// Send a version, although it'll be ignored by a reconnecting client
send_server_version!(transport);
// Send garbage to fail handshake from server-side
transport.write_frame(b"hello").await.unwrap();
@ -1108,6 +1283,9 @@ mod tests {
// Spawn a task to perform the client reconnection so we don't deadlock
let task = tokio::spawn(async move { client.reconnect().await.unwrap() });
// Send a version, although it'll be ignored by a reconnecting client
send_server_version!(transport);
// Perform first step of completing server-side of handshake
transport.server_handshake().await.unwrap();
@ -1126,6 +1304,9 @@ mod tests {
// Spawn a task to perform the client reconnection so we don't deadlock
let task = tokio::spawn(async move { client.reconnect().await.unwrap() });
// Send a version, although it'll be ignored by a reconnecting client
send_server_version!(transport);
// Perform first step of completing server-side of handshake
transport.server_handshake().await.unwrap();
@ -1162,6 +1343,9 @@ mod tests {
// Spawn a task to perform the client reconnection so we don't deadlock
let task = tokio::spawn(async move { client.reconnect().await.unwrap() });
// Send a version, although it'll be ignored by a reconnecting client
send_server_version!(transport);
// Perform first step of completing server-side of handshake
transport.server_handshake().await.unwrap();
@ -1205,6 +1389,9 @@ mod tests {
client
});
// Send a version, although it'll be ignored by a reconnecting client
send_server_version!(transport);
// Perform first step of completing server-side of handshake
transport.server_handshake().await.unwrap();
@ -1275,7 +1462,7 @@ mod tests {
// Spawn a task to perform the server reconnection so we don't deadlock
let task = tokio::spawn(async move {
Connection::server(transport, &verifier, keychain)
Connection::server(transport, &verifier, keychain, server_version!())
.await
.expect("Failed to connect from server")
});

@ -8,7 +8,7 @@ pub struct PlainCodec;
impl PlainCodec {
pub fn new() -> Self {
Self::default()
Self
}
}

@ -0,0 +1,132 @@
use semver::{Comparator, Op, Prerelease, Version as SemVer};
use std::fmt;
/// Represents a version and compatibility rules.
#[derive(Clone, Debug)]
pub struct Version {
inner: SemVer,
lower: Comparator,
upper: Comparator,
}
impl Version {
/// Creates a new version in the form `major.minor.patch` with a ruleset that is used to check
/// other versions such that `>=0.1.2, <0.2.0` or `>=1.2.3, <2` depending on whether or not the
/// major version is `0`.
///
/// ```
/// use distant_net::common::Version;
///
/// // Matching versions are compatible
/// let a = Version::new(1, 2, 3);
/// let b = Version::new(1, 2, 3);
/// assert!(a.is_compatible_with(&b));
///
/// // Version 1.2.3 is compatible with 1.2.4, but not the other way
/// let a = Version::new(1, 2, 3);
/// let b = Version::new(1, 2, 4);
/// assert!(a.is_compatible_with(&b));
/// assert!(!b.is_compatible_with(&a));
///
/// // Version 1.2.3 is compatible with 1.3.0, but not 2
/// let a = Version::new(1, 2, 3);
/// assert!(a.is_compatible_with(&Version::new(1, 3, 0)));
/// assert!(!a.is_compatible_with(&Version::new(2, 0, 0)));
///
/// // Version 0.1.2 is compatible with 0.1.3, but not the other way
/// let a = Version::new(0, 1, 2);
/// let b = Version::new(0, 1, 3);
/// assert!(a.is_compatible_with(&b));
/// assert!(!b.is_compatible_with(&a));
///
/// // Version 0.1.2 is not compatible with 0.2
/// let a = Version::new(0, 1, 2);
/// let b = Version::new(0, 2, 0);
/// assert!(!a.is_compatible_with(&b));
/// assert!(!b.is_compatible_with(&a));
/// ```
pub const fn new(major: u64, minor: u64, patch: u64) -> Self {
Self {
inner: SemVer::new(major, minor, patch),
lower: Comparator {
op: Op::GreaterEq,
major,
minor: Some(minor),
patch: Some(patch),
pre: Prerelease::EMPTY,
},
upper: Comparator {
op: Op::Less,
major: if major == 0 { 0 } else { major + 1 },
minor: if major == 0 { Some(minor + 1) } else { None },
patch: None,
pre: Prerelease::EMPTY,
},
}
}
/// Returns true if this version is compatible with another version.
pub fn is_compatible_with(&self, other: &Self) -> bool {
self.lower.matches(&other.inner) && self.upper.matches(&other.inner)
}
/// Converts from a collection of bytes into a version using the byte form major/minor/patch
/// using big endian.
pub const fn from_be_bytes(bytes: [u8; 24]) -> Self {
Self::new(
u64::from_be_bytes([
bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7],
]),
u64::from_be_bytes([
bytes[8], bytes[9], bytes[10], bytes[11], bytes[12], bytes[13], bytes[14],
bytes[15],
]),
u64::from_be_bytes([
bytes[16], bytes[17], bytes[18], bytes[19], bytes[20], bytes[21], bytes[22],
bytes[23],
]),
)
}
/// Converts the version into a byte form of major/minor/patch using big endian.
pub const fn to_be_bytes(&self) -> [u8; 24] {
let major = self.inner.major.to_be_bytes();
let minor = self.inner.minor.to_be_bytes();
let patch = self.inner.patch.to_be_bytes();
[
major[0], major[1], major[2], major[3], major[4], major[5], major[6], major[7],
minor[0], minor[1], minor[2], minor[3], minor[4], minor[5], minor[6], minor[7],
patch[0], patch[1], patch[2], patch[3], patch[4], patch[5], patch[6], patch[7],
]
}
}
impl Default for Version {
/// Default version is `0.0.0`.
fn default() -> Self {
Self::new(0, 0, 0)
}
}
impl fmt::Display for Version {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.inner)
}
}
impl From<semver::Version> for Version {
/// Creates a new [`Version`] using the major, minor, and patch information from
/// [`semver::Version`].
fn from(version: semver::Version) -> Self {
let mut this = Self::new(version.major, version.minor, version.patch);
this.inner = version;
this
}
}
impl From<Version> for semver::Version {
fn from(version: Version) -> Self {
version.inner
}
}

@ -5,3 +5,12 @@ mod server;
pub use client::*;
pub use data::*;
pub use server::*;
use crate::common::Version;
/// Represents the version associated with the manager's protocol.
pub const PROTOCOL_VERSION: Version = Version::new(
const_str::parse!(env!("CARGO_PKG_VERSION_MAJOR"), u64),
const_str::parse!(env!("CARGO_PKG_VERSION_MINOR"), u64),
const_str::parse!(env!("CARGO_PKG_VERSION_PATCH"), u64),
);

@ -7,7 +7,7 @@ use log::*;
use crate::client::Client;
use crate::common::{ConnectionId, Destination, Map, Request};
use crate::manager::data::{
ConnectionInfo, ConnectionList, ManagerCapabilities, ManagerRequest, ManagerResponse,
ConnectionInfo, ConnectionList, ManagerRequest, ManagerResponse, SemVer,
};
mod channel;
@ -231,12 +231,12 @@ impl ManagerClient {
RawChannel::spawn(connection_id, self).await
}
/// Retrieves a list of supported capabilities
pub async fn capabilities(&mut self) -> io::Result<ManagerCapabilities> {
trace!("capabilities()");
let res = self.send(ManagerRequest::Capabilities).await?;
/// Retrieves the version of the manager.
pub async fn version(&mut self) -> io::Result<SemVer> {
trace!("version()");
let res = self.send(ManagerRequest::Version).await?;
match res.payload {
ManagerResponse::Capabilities { supported } => Ok(supported),
ManagerResponse::Version { version } => Ok(version),
ManagerResponse::Error { description } => {
Err(io::Error::new(io::ErrorKind::Other, description))
}

@ -1,8 +1,6 @@
pub type ManagerChannelId = u32;
pub type ManagerAuthenticationId = u32;
mod capabilities;
pub use capabilities::*;
pub use semver::Version as SemVer;
mod info;
pub use info::*;

@ -1,189 +0,0 @@
use std::cmp::Ordering;
use std::collections::HashSet;
use std::hash::{Hash, Hasher};
use std::ops::{BitAnd, BitOr, BitXor};
use std::str::FromStr;
use derive_more::{From, Into, IntoIterator};
use serde::{Deserialize, Serialize};
use strum::{EnumMessage, IntoEnumIterator};
use super::ManagerCapabilityKind;
/// Set of supported capabilities for a manager
#[derive(Clone, Debug, From, Into, PartialEq, Eq, IntoIterator, Serialize, Deserialize)]
#[serde(transparent)]
pub struct ManagerCapabilities(#[into_iterator(owned, ref)] HashSet<ManagerCapability>);
impl ManagerCapabilities {
/// Return set of capabilities encompassing all possible capabilities
pub fn all() -> Self {
Self(
ManagerCapabilityKind::iter()
.map(ManagerCapability::from)
.collect(),
)
}
/// Return empty set of capabilities
pub fn none() -> Self {
Self(HashSet::new())
}
/// Returns true if the capability with described kind is included
pub fn contains(&self, kind: impl AsRef<str>) -> bool {
let cap = ManagerCapability {
kind: kind.as_ref().to_string(),
description: String::new(),
};
self.0.contains(&cap)
}
/// Adds the specified capability to the set of capabilities
///
/// * If the set did not have this capability, returns `true`
/// * If the set did have this capability, returns `false`
pub fn insert(&mut self, cap: impl Into<ManagerCapability>) -> bool {
self.0.insert(cap.into())
}
/// Removes the capability with the described kind, returning the capability
pub fn take(&mut self, kind: impl AsRef<str>) -> Option<ManagerCapability> {
let cap = ManagerCapability {
kind: kind.as_ref().to_string(),
description: String::new(),
};
self.0.take(&cap)
}
/// Removes the capability with the described kind, returning true if it existed
pub fn remove(&mut self, kind: impl AsRef<str>) -> bool {
let cap = ManagerCapability {
kind: kind.as_ref().to_string(),
description: String::new(),
};
self.0.remove(&cap)
}
/// Converts into vec of capabilities sorted by kind
pub fn into_sorted_vec(self) -> Vec<ManagerCapability> {
let mut this = self.0.into_iter().collect::<Vec<_>>();
this.sort_unstable();
this
}
}
impl BitAnd for &ManagerCapabilities {
type Output = ManagerCapabilities;
fn bitand(self, rhs: Self) -> Self::Output {
ManagerCapabilities(self.0.bitand(&rhs.0))
}
}
impl BitOr for &ManagerCapabilities {
type Output = ManagerCapabilities;
fn bitor(self, rhs: Self) -> Self::Output {
ManagerCapabilities(self.0.bitor(&rhs.0))
}
}
impl BitOr<ManagerCapability> for &ManagerCapabilities {
type Output = ManagerCapabilities;
fn bitor(self, rhs: ManagerCapability) -> Self::Output {
let mut other = ManagerCapabilities::none();
other.0.insert(rhs);
self.bitor(&other)
}
}
impl BitXor for &ManagerCapabilities {
type Output = ManagerCapabilities;
fn bitxor(self, rhs: Self) -> Self::Output {
ManagerCapabilities(self.0.bitxor(&rhs.0))
}
}
impl FromIterator<ManagerCapability> for ManagerCapabilities {
fn from_iter<I: IntoIterator<Item = ManagerCapability>>(iter: I) -> Self {
let mut this = ManagerCapabilities::none();
for capability in iter {
this.0.insert(capability);
}
this
}
}
/// ManagerCapability tied to a manager. A capability is equivalent based on its kind and not
/// description.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
pub struct ManagerCapability {
/// Label describing the kind of capability
pub kind: String,
/// Information about the capability
pub description: String,
}
impl ManagerCapability {
/// Will convert the [`ManagerCapability`]'s `kind` into a known [`ManagerCapabilityKind`] if
/// possible, returning None if the capability is unknown
pub fn to_capability_kind(&self) -> Option<ManagerCapabilityKind> {
ManagerCapabilityKind::from_str(&self.kind).ok()
}
/// Returns true if the described capability is unknown
pub fn is_unknown(&self) -> bool {
self.to_capability_kind().is_none()
}
}
impl PartialEq for ManagerCapability {
fn eq(&self, other: &Self) -> bool {
self.kind.eq_ignore_ascii_case(&other.kind)
}
}
impl Eq for ManagerCapability {}
impl PartialOrd for ManagerCapability {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for ManagerCapability {
fn cmp(&self, other: &Self) -> Ordering {
self.kind
.to_ascii_lowercase()
.cmp(&other.kind.to_ascii_lowercase())
}
}
impl Hash for ManagerCapability {
fn hash<H: Hasher>(&self, state: &mut H) {
self.kind.to_ascii_lowercase().hash(state);
}
}
impl From<ManagerCapabilityKind> for ManagerCapability {
/// Creates a new capability using the kind's default message
fn from(kind: ManagerCapabilityKind) -> Self {
Self {
kind: kind.to_string(),
description: kind
.get_message()
.map(ToString::to_string)
.unwrap_or_default(),
}
}
}

@ -1,36 +1,17 @@
use derive_more::IsVariant;
use distant_auth::msg::AuthenticationResponse;
use serde::{Deserialize, Serialize};
use strum::{AsRefStr, EnumDiscriminants, EnumIter, EnumMessage, EnumString};
use super::{ManagerAuthenticationId, ManagerChannelId};
use crate::common::{ConnectionId, Destination, Map, UntypedRequest};
#[allow(clippy::large_enum_variant)]
#[derive(Clone, Debug, EnumDiscriminants, Serialize, Deserialize)]
#[strum_discriminants(derive(
AsRefStr,
strum::Display,
EnumIter,
EnumMessage,
EnumString,
Hash,
PartialOrd,
Ord,
IsVariant,
Serialize,
Deserialize
))]
#[strum_discriminants(name(ManagerCapabilityKind))]
#[strum_discriminants(strum(serialize_all = "snake_case"))]
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case", deny_unknown_fields, tag = "type")]
pub enum ManagerRequest {
/// Retrieve information about the server's capabilities
#[strum_discriminants(strum(message = "Supports retrieving capabilities"))]
Capabilities,
/// Retrieve information about the manager's version.
Version,
/// Launch a server using the manager
#[strum_discriminants(strum(message = "Supports launching a server on remote machines"))]
Launch {
// NOTE: Boxed per clippy's large_enum_variant warning
destination: Box<Destination>,
@ -40,7 +21,6 @@ pub enum ManagerRequest {
},
/// Initiate a connection through the manager
#[strum_discriminants(strum(message = "Supports connecting to remote servers"))]
Connect {
// NOTE: Boxed per clippy's large_enum_variant warning
destination: Box<Destination>,
@ -50,7 +30,6 @@ pub enum ManagerRequest {
},
/// Submit some authentication message for the manager to use with an active connection
#[strum_discriminants(strum(message = "Supports authenticating with a remote server"))]
Authenticate {
/// Id of the authentication request that is being responded to
id: ManagerAuthenticationId,
@ -60,16 +39,12 @@ pub enum ManagerRequest {
},
/// Opens a channel for communication with an already-connected server
#[strum_discriminants(strum(message = "Supports opening a channel with a remote server"))]
OpenChannel {
/// Id of the connection
id: ConnectionId,
},
/// Sends data through channel
#[strum_discriminants(strum(
message = "Supports sending data through a channel with a remote server"
))]
Channel {
/// Id of the channel
id: ManagerChannelId,
@ -79,21 +54,17 @@ pub enum ManagerRequest {
},
/// Closes an open channel
#[strum_discriminants(strum(message = "Supports closing a channel with a remote server"))]
CloseChannel {
/// Id of the channel to close
id: ManagerChannelId,
},
/// Retrieve information about a specific connection
#[strum_discriminants(strum(message = "Supports retrieving connection-specific information"))]
Info { id: ConnectionId },
/// Kill a specific connection
#[strum_discriminants(strum(message = "Supports killing a remote connection"))]
Kill { id: ConnectionId },
/// Retrieve list of connections being managed
#[strum_discriminants(strum(message = "Supports retrieving a list of managed connections"))]
List,
}

@ -1,9 +1,7 @@
use distant_auth::msg::Authentication;
use serde::{Deserialize, Serialize};
use super::{
ConnectionInfo, ConnectionList, ManagerAuthenticationId, ManagerCapabilities, ManagerChannelId,
};
use super::{ConnectionInfo, ConnectionList, ManagerAuthenticationId, ManagerChannelId, SemVer};
use crate::common::{ConnectionId, Destination, UntypedResponse};
#[derive(Clone, Debug, Serialize, Deserialize)]
@ -15,8 +13,8 @@ pub enum ManagerResponse {
/// Indicates that some error occurred during a request
Error { description: String },
/// Response to retrieving information about the manager's capabilities
Capabilities { supported: ManagerCapabilities },
/// Information about the manager's version.
Version { version: SemVer },
/// Confirmation of a server being launched
Launched {

@ -9,8 +9,8 @@ use tokio::sync::{oneshot, RwLock};
use crate::common::{ConnectionId, Destination, Map};
use crate::manager::{
ConnectionInfo, ConnectionList, ManagerAuthenticationId, ManagerCapabilities, ManagerChannelId,
ManagerRequest, ManagerResponse,
ConnectionInfo, ConnectionList, ManagerAuthenticationId, ManagerChannelId, ManagerRequest,
ManagerResponse, SemVer,
};
use crate::server::{RequestCtx, Server, ServerHandler};
@ -138,9 +138,11 @@ impl ManagerServer {
Ok(id)
}
/// Retrieves the list of supported capabilities for this manager
async fn capabilities(&self) -> io::Result<ManagerCapabilities> {
Ok(ManagerCapabilities::all())
/// Retrieves the manager's version.
async fn version(&self) -> io::Result<SemVer> {
env!("CARGO_PKG_VERSION")
.parse()
.map_err(|x| io::Error::new(io::ErrorKind::Other, x))
}
/// Retrieves information about the connection to the server with the specified `id`
@ -173,7 +175,25 @@ impl ManagerServer {
/// Kills the connection to the server with the specified `id`
async fn kill(&self, id: ConnectionId) -> io::Result<()> {
match self.connections.write().await.remove(&id) {
Some(_) => Ok(()),
Some(connection) => {
// Close any open channels
if let Ok(ids) = connection.channel_ids().await {
let mut channels_lock = self.channels.write().await;
for id in ids {
if let Some(channel) = channels_lock.remove(&id) {
if let Err(x) = channel.close() {
error!("[Conn {id}] {x}");
}
}
}
}
// Make sure the connection is aborted so nothing new can happen
debug!("[Conn {id}] Aborting");
connection.abort();
Ok(())
}
None => Err(io::Error::new(
io::ErrorKind::NotConnected,
"No connection found",
@ -196,10 +216,10 @@ impl ServerHandler for ManagerServer {
} = ctx;
let response = match request.payload {
ManagerRequest::Capabilities {} => {
debug!("Looking up capabilities");
match self.capabilities().await {
Ok(supported) => ManagerResponse::Capabilities { supported },
ManagerRequest::Version {} => {
debug!("Looking up version");
match self.version().await {
Ok(version) => ManagerResponse::Version { version },
Err(x) => ManagerResponse::from(x),
}
}

@ -2,7 +2,7 @@ use std::collections::HashMap;
use std::{fmt, io};
use log::*;
use tokio::sync::mpsc;
use tokio::sync::{mpsc, oneshot};
use tokio::task::JoinHandle;
use crate::client::{Mailbox, UntypedClient};
@ -62,11 +62,17 @@ impl ManagerConnection {
pub async fn spawn(
spawn: Destination,
options: Map,
client: UntypedClient,
mut client: UntypedClient,
) -> io::Result<Self> {
let connection_id = rand::random();
let (tx, rx) = mpsc::unbounded_channel();
// NOTE: Ensure that the connection is severed when the client is dropped; otherwise, when
// the connection is terminated via aborting it or the connection being dropped, the
// connection will persist which can cause problems such as lonely shutdown of the server
// never triggering!
client.shutdown_on_drop(true);
let (request_tx, request_rx) = mpsc::unbounded_channel();
let action_task = tokio::spawn(action_task(connection_id, rx, request_tx));
let response_task = tokio::spawn(response_task(
@ -105,16 +111,41 @@ impl ManagerConnection {
tx: self.tx.clone(),
})
}
}
impl Drop for ManagerConnection {
fn drop(&mut self) {
pub async fn channel_ids(&self) -> io::Result<Vec<ManagerChannelId>> {
let (tx, rx) = oneshot::channel();
self.tx
.send(Action::GetRegistered { cb: tx })
.map_err(|x| {
io::Error::new(
io::ErrorKind::BrokenPipe,
format!("channel_ids failed: {x}"),
)
})?;
let channel_ids = rx.await.map_err(|x| {
io::Error::new(
io::ErrorKind::BrokenPipe,
format!("channel_ids callback dropped: {x}"),
)
})?;
Ok(channel_ids)
}
/// Aborts the tasks used to engage with the connection.
pub fn abort(&self) {
self.action_task.abort();
self.request_task.abort();
self.response_task.abort();
}
}
impl Drop for ManagerConnection {
fn drop(&mut self) {
self.abort();
}
}
enum Action {
Register {
id: ManagerChannelId,
@ -125,6 +156,10 @@ enum Action {
id: ManagerChannelId,
},
GetRegistered {
cb: oneshot::Sender<Vec<ManagerChannelId>>,
},
Read {
res: UntypedResponse<'static>,
},
@ -140,6 +175,7 @@ impl fmt::Debug for Action {
match self {
Self::Register { id, .. } => write!(f, "Action::Register {{ id: {id}, .. }}"),
Self::Unregister { id } => write!(f, "Action::Unregister {{ id: {id} }}"),
Self::GetRegistered { .. } => write!(f, "Action::GetRegistered {{ .. }}"),
Self::Read { .. } => write!(f, "Action::Read {{ .. }}"),
Self::Write { id, .. } => write!(f, "Action::Write {{ id: {id}, .. }}"),
}
@ -204,6 +240,9 @@ async fn action_task(
Action::Unregister { id } => {
registered.remove(&id);
}
Action::GetRegistered { cb } => {
let _ = cb.send(registered.keys().copied().collect());
}
Action::Read { mut res } => {
// Split {channel id}_{request id} back into pieces and
// update the origin id to match the request id only

@ -9,7 +9,7 @@ use serde::de::DeserializeOwned;
use serde::Serialize;
use tokio::sync::{broadcast, RwLock};
use crate::common::{ConnectionId, Listener, Response, Transport};
use crate::common::{ConnectionId, Listener, Response, Transport, Version};
mod builder;
pub use builder::*;
@ -45,6 +45,9 @@ pub struct Server<T> {
/// Performs authentication using various methods
verifier: Verifier,
/// Version associated with the server used by clients to verify compatibility
version: Version,
}
/// Interface for a handler that receives connections and requests
@ -81,6 +84,7 @@ impl Server<()> {
config: Default::default(),
handler: (),
verifier: Verifier::empty(),
version: Default::default(),
}
}
@ -115,6 +119,7 @@ impl<T> Server<T> {
config,
handler: self.handler,
verifier: self.verifier,
version: self.version,
}
}
@ -124,6 +129,7 @@ impl<T> Server<T> {
config: self.config,
handler,
verifier: self.verifier,
version: self.version,
}
}
@ -133,6 +139,17 @@ impl<T> Server<T> {
config: self.config,
handler: self.handler,
verifier,
version: self.version,
}
}
/// Consumes the current server, replacing its version with `version` and returning it.
pub fn version(self, version: Version) -> Self {
Self {
config: self.config,
handler: self.handler,
verifier: self.verifier,
version,
}
}
}
@ -172,6 +189,7 @@ where
config,
handler,
verifier,
version,
} = self;
let handler = Arc::new(handler);
@ -221,6 +239,7 @@ where
.sleep_duration(config.connection_sleep)
.heartbeat_duration(config.connection_heartbeat)
.verifier(Arc::downgrade(&verifier))
.version(version.clone())
.spawn(),
);
@ -253,6 +272,12 @@ mod tests {
use super::*;
use crate::common::{Connection, InmemoryTransport, MpscListener, Request, Response};
macro_rules! server_version {
() => {
Version::new(1, 2, 3)
};
}
pub struct TestServerHandler;
#[async_trait]
@ -275,6 +300,7 @@ mod tests {
config,
handler: TestServerHandler,
verifier: Verifier::new(methods),
version: server_version!(),
}
}
@ -304,7 +330,7 @@ mod tests {
.expect("Failed to start server");
// Perform handshake and authentication with the server before beginning to send data
let mut connection = Connection::client(transport, DummyAuthHandler)
let mut connection = Connection::client(transport, DummyAuthHandler, server_version!())
.await
.expect("Failed to connect to server");

@ -5,7 +5,7 @@ use distant_auth::Verifier;
use serde::de::DeserializeOwned;
use serde::Serialize;
use crate::common::{PortRange, TcpListener};
use crate::common::{PortRange, TcpListener, Version};
use crate::server::{Server, ServerConfig, ServerHandler, TcpServerRef};
pub struct TcpServerBuilder<T>(Server<T>);
@ -35,6 +35,10 @@ impl<T> TcpServerBuilder<T> {
pub fn verifier(self, verifier: Verifier) -> Self {
Self(self.0.verifier(verifier))
}
pub fn version(self, version: Version) -> Self {
Self(self.0.version(version))
}
}
impl<T> TcpServerBuilder<T>

@ -5,7 +5,7 @@ use distant_auth::Verifier;
use serde::de::DeserializeOwned;
use serde::Serialize;
use crate::common::UnixSocketListener;
use crate::common::{UnixSocketListener, Version};
use crate::server::{Server, ServerConfig, ServerHandler, UnixSocketServerRef};
pub struct UnixSocketServerBuilder<T>(Server<T>);
@ -35,6 +35,10 @@ impl<T> UnixSocketServerBuilder<T> {
pub fn verifier(self, verifier: Verifier) -> Self {
Self(self.0.verifier(verifier))
}
pub fn version(self, version: Version) -> Self {
Self(self.0.version(version))
}
}
impl<T> UnixSocketServerBuilder<T>

@ -5,7 +5,7 @@ use distant_auth::Verifier;
use serde::de::DeserializeOwned;
use serde::Serialize;
use crate::common::WindowsPipeListener;
use crate::common::{Version, WindowsPipeListener};
use crate::server::{Server, ServerConfig, ServerHandler, WindowsPipeServerRef};
pub struct WindowsPipeServerBuilder<T>(Server<T>);
@ -35,6 +35,10 @@ impl<T> WindowsPipeServerBuilder<T> {
pub fn verifier(self, verifier: Verifier) -> Self {
Self(self.0.verifier(verifier))
}
pub fn version(self, version: Version) -> Self {
Self(self.0.version(version))
}
}
impl<T> WindowsPipeServerBuilder<T>

@ -14,7 +14,7 @@ use tokio::task::JoinHandle;
use super::{ConnectionState, RequestCtx, ServerHandler, ServerReply, ServerState, ShutdownTimer};
use crate::common::{
Backup, Connection, Frame, Interest, Keychain, Response, Transport, UntypedRequest,
Backup, Connection, Frame, Interest, Keychain, Response, Transport, UntypedRequest, Version,
};
pub type ServerKeychain = Keychain<oneshot::Receiver<Backup>>;
@ -65,6 +65,7 @@ pub(super) struct ConnectionTaskBuilder<H, S, T> {
sleep_duration: Duration,
heartbeat_duration: Duration,
verifier: Weak<Verifier>,
version: Version,
}
impl ConnectionTaskBuilder<(), (), ()> {
@ -80,6 +81,7 @@ impl ConnectionTaskBuilder<(), (), ()> {
sleep_duration: SLEEP_DURATION,
heartbeat_duration: MINIMUM_HEARTBEAT_DURATION,
verifier: Weak::new(),
version: Version::default(),
}
}
}
@ -96,6 +98,7 @@ impl<H, S, T> ConnectionTaskBuilder<H, S, T> {
sleep_duration: self.sleep_duration,
heartbeat_duration: self.heartbeat_duration,
verifier: self.verifier,
version: self.version,
}
}
@ -110,6 +113,7 @@ impl<H, S, T> ConnectionTaskBuilder<H, S, T> {
sleep_duration: self.sleep_duration,
heartbeat_duration: self.heartbeat_duration,
verifier: self.verifier,
version: self.version,
}
}
@ -124,6 +128,7 @@ impl<H, S, T> ConnectionTaskBuilder<H, S, T> {
sleep_duration: self.sleep_duration,
heartbeat_duration: self.heartbeat_duration,
verifier: self.verifier,
version: self.version,
}
}
@ -138,6 +143,7 @@ impl<H, S, T> ConnectionTaskBuilder<H, S, T> {
sleep_duration: self.sleep_duration,
heartbeat_duration: self.heartbeat_duration,
verifier: self.verifier,
version: self.version,
}
}
@ -152,6 +158,7 @@ impl<H, S, T> ConnectionTaskBuilder<H, S, T> {
sleep_duration: self.sleep_duration,
heartbeat_duration: self.heartbeat_duration,
verifier: self.verifier,
version: self.version,
}
}
@ -169,6 +176,7 @@ impl<H, S, T> ConnectionTaskBuilder<H, S, T> {
sleep_duration: self.sleep_duration,
heartbeat_duration: self.heartbeat_duration,
verifier: self.verifier,
version: self.version,
}
}
@ -183,6 +191,7 @@ impl<H, S, T> ConnectionTaskBuilder<H, S, T> {
sleep_duration,
heartbeat_duration: self.heartbeat_duration,
verifier: self.verifier,
version: self.version,
}
}
@ -200,6 +209,7 @@ impl<H, S, T> ConnectionTaskBuilder<H, S, T> {
sleep_duration: self.sleep_duration,
heartbeat_duration,
verifier: self.verifier,
version: self.version,
}
}
@ -214,6 +224,22 @@ impl<H, S, T> ConnectionTaskBuilder<H, S, T> {
sleep_duration: self.sleep_duration,
heartbeat_duration: self.heartbeat_duration,
verifier,
version: self.version,
}
}
pub fn version(self, version: Version) -> ConnectionTaskBuilder<H, S, T> {
ConnectionTaskBuilder {
handler: self.handler,
state: self.state,
keychain: self.keychain,
transport: self.transport,
shutdown: self.shutdown,
shutdown_timer: self.shutdown_timer,
sleep_duration: self.sleep_duration,
heartbeat_duration: self.heartbeat_duration,
verifier: self.verifier,
version,
}
}
}
@ -240,6 +266,7 @@ where
sleep_duration,
heartbeat_duration,
verifier,
version,
} = self;
// NOTE: This exists purely to make the compiler happy for macro_rules declaration order.
@ -408,7 +435,8 @@ where
match await_or_shutdown!(Box::pin(Connection::server(
transport,
verifier.as_ref(),
keychain
keychain,
version
))) {
Ok(connection) => connection,
Err(x) => {
@ -627,6 +655,12 @@ mod tests {
}};
}
macro_rules! server_version {
() => {
Version::new(1, 2, 3)
};
}
#[test(tokio::test)]
async fn should_terminate_if_fails_access_verifier() {
let handler = Arc::new(TestServerHandler);
@ -671,11 +705,12 @@ mod tests {
.transport(t1)
.shutdown_timer(Arc::downgrade(&shutdown_timer))
.verifier(Arc::downgrade(&verifier))
.version(server_version!())
.spawn();
// Spawn a task to handle establishing connection from client-side
tokio::spawn(async move {
let _client = Connection::client(t2, DummyAuthHandler)
let _client = Connection::client(t2, DummyAuthHandler, server_version!())
.await
.expect("Fail to establish client-side connection");
});
@ -704,11 +739,12 @@ mod tests {
.transport(t1)
.shutdown_timer(Arc::downgrade(&shutdown_timer))
.verifier(Arc::downgrade(&verifier))
.version(server_version!())
.spawn();
// Spawn a task to handle establishing connection from client-side
tokio::spawn(async move {
let _client = Connection::client(t2, DummyAuthHandler)
let _client = Connection::client(t2, DummyAuthHandler, server_version!())
.await
.expect("Fail to establish client-side connection");
});
@ -754,12 +790,13 @@ mod tests {
.transport(t1)
.shutdown_timer(Arc::downgrade(&shutdown_timer))
.verifier(Arc::downgrade(&verifier))
.version(server_version!())
.spawn();
// Spawn a task to handle establishing connection from client-side, and then closes to
// trigger the server-side to close
tokio::spawn(async move {
let _client = Connection::client(t2, DummyAuthHandler)
let _client = Connection::client(t2, DummyAuthHandler, server_version!())
.await
.expect("Fail to establish client-side connection");
});
@ -828,12 +865,13 @@ mod tests {
})
.shutdown_timer(Arc::downgrade(&shutdown_timer))
.verifier(Arc::downgrade(&verifier))
.version(server_version!())
.spawn();
// Spawn a task to handle establishing connection from client-side, set ready to fail
// for the server-side after client connection completes, and wait a bit
tokio::spawn(async move {
let _client = Connection::client(t2, DummyAuthHandler)
let _client = Connection::client(t2, DummyAuthHandler, server_version!())
.await
.expect("Fail to establish client-side connection");
@ -872,12 +910,13 @@ mod tests {
.transport(t1)
.shutdown_timer(Arc::downgrade(&shutdown_timer))
.verifier(Arc::downgrade(&verifier))
.version(server_version!())
.spawn();
// Spawn a task to handle establishing connection from client-side, and then closes to
// trigger the server-side to close
tokio::spawn(async move {
let _client = Connection::client(t2, DummyAuthHandler)
let _client = Connection::client(t2, DummyAuthHandler, server_version!())
.await
.expect("Fail to establish client-side connection");
});
@ -902,11 +941,12 @@ mod tests {
.transport(t1)
.shutdown_timer(Arc::downgrade(&shutdown_timer))
.verifier(Arc::downgrade(&verifier))
.version(server_version!())
.spawn();
// Spawn a task to handle establishing connection from client-side
let task = tokio::spawn(async move {
let mut client = Connection::client(t2, DummyAuthHandler)
let mut client = Connection::client(t2, DummyAuthHandler, server_version!())
.await
.expect("Fail to establish client-side connection");
@ -939,11 +979,12 @@ mod tests {
.shutdown_timer(Arc::downgrade(&shutdown_timer))
.heartbeat_duration(Duration::from_millis(200))
.verifier(Arc::downgrade(&verifier))
.version(server_version!())
.spawn();
// Spawn a task to handle establishing connection from client-side
let task = tokio::spawn(async move {
let mut client = Connection::client(t2, DummyAuthHandler)
let mut client = Connection::client(t2, DummyAuthHandler, server_version!())
.await
.expect("Fail to establish client-side connection");
@ -1047,10 +1088,12 @@ mod tests {
.shutdown_timer(Arc::downgrade(&shutdown_timer))
.heartbeat_duration(Duration::from_millis(200))
.verifier(Arc::downgrade(&verifier))
.version(server_version!())
.spawn();
// Spawn a task to handle the client-side establishment of a full connection
let _client_task = tokio::spawn(Connection::client(t2, DummyAuthHandler));
let _client_task =
tokio::spawn(Connection::client(t2, DummyAuthHandler, server_version!()));
// Shutdown server connection task while it is accepting the connection, verifying that we
// do not get an error in return
@ -1099,10 +1142,12 @@ mod tests {
.shutdown_timer(Arc::downgrade(&shutdown_timer))
.heartbeat_duration(Duration::from_millis(200))
.verifier(Arc::downgrade(&verifier))
.version(server_version!())
.spawn();
// Spawn a task to handle the client-side establishment of a full connection
let _client_task = tokio::spawn(Connection::client(t2, DummyAuthHandler));
let _client_task =
tokio::spawn(Connection::client(t2, DummyAuthHandler, server_version!()));
// Wait to ensure we complete the accept call first
let _ = rx.recv().await;

@ -3,7 +3,7 @@ name = "distant-protocol"
description = "Protocol library for distant, providing data structures used between the client and server"
categories = ["data-structures"]
keywords = ["protocol"]
version = "0.20.0-alpha.9"
version = "0.20.0"
authors = ["Chip Senkbeil <chip@senkbeil.org>"]
edition = "2021"
homepage = "https://github.com/chipsenkbeil/distant"
@ -17,8 +17,10 @@ tests = []
[dependencies]
bitflags = "2.3.1"
const-str = "0.5.6"
derive_more = { version = "0.99.17", default-features = false, features = ["deref", "deref_mut", "display", "from", "error", "into", "into_iterator", "is_variant"] }
regex = "1.8.3"
semver = { version = "1.0.17", features = ["serde"] }
serde = { version = "1.0.163", features = ["derive"] }
serde_bytes = "0.11.9"
strum = { version = "0.24.1", features = ["derive"] }

@ -1,13 +1,13 @@
# distant protocol
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.68.0][distant_rustc_img]][distant_rustc_lnk]
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.70.0][distant_rustc_img]][distant_rustc_lnk]
[distant_crates_img]: https://img.shields.io/crates/v/distant-protocol.svg
[distant_crates_lnk]: https://crates.io/crates/distant-protocol
[distant_doc_img]: https://docs.rs/distant-protocol/badge.svg
[distant_doc_lnk]: https://docs.rs/distant-protocol
[distant_rustc_img]: https://img.shields.io/badge/distant_protocol-rustc_1.68+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/03/09/Rust-1.68.0.html
[distant_rustc_img]: https://img.shields.io/badge/distant_protocol-rustc_1.70+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/06/01/Rust-1.70.0.html
## Details

@ -1,4 +1,3 @@
mod capabilities;
mod change;
mod cmd;
mod error;
@ -10,7 +9,6 @@ mod search;
mod system;
mod version;
pub use capabilities::*;
pub use change::*;
pub use cmd::*;
pub use error::*;
@ -24,6 +22,3 @@ pub use version::*;
/// Id for a remote process
pub type ProcessId = u32;
/// Version indicated by the tuple of (major, minor, patch).
pub type SemVer = (u8, u8, u8);

@ -1,380 +0,0 @@
use std::cmp::Ordering;
use std::collections::HashSet;
use std::hash::{Hash, Hasher};
use std::ops::{BitAnd, BitOr, BitXor, Deref, DerefMut};
use std::str::FromStr;
use derive_more::{From, Into, IntoIterator};
use serde::{Deserialize, Serialize};
use strum::{EnumMessage, IntoEnumIterator};
/// Represents the kinds of capabilities available.
pub use crate::request::RequestKind as CapabilityKind;
/// Set of supported capabilities for a server
#[derive(Clone, Debug, From, Into, PartialEq, Eq, IntoIterator, Serialize, Deserialize)]
#[serde(transparent)]
pub struct Capabilities(#[into_iterator(owned, ref)] HashSet<Capability>);
impl Capabilities {
/// Return set of capabilities encompassing all possible capabilities
pub fn all() -> Self {
Self(CapabilityKind::iter().map(Capability::from).collect())
}
/// Return empty set of capabilities
pub fn none() -> Self {
Self(HashSet::new())
}
/// Returns true if the capability with described kind is included
pub fn contains(&self, kind: impl AsRef<str>) -> bool {
let cap = Capability {
kind: kind.as_ref().to_string(),
description: String::new(),
};
self.0.contains(&cap)
}
/// Adds the specified capability to the set of capabilities
///
/// * If the set did not have this capability, returns `true`
/// * If the set did have this capability, returns `false`
pub fn insert(&mut self, cap: impl Into<Capability>) -> bool {
self.0.insert(cap.into())
}
/// Removes the capability with the described kind, returning the capability
pub fn take(&mut self, kind: impl AsRef<str>) -> Option<Capability> {
let cap = Capability {
kind: kind.as_ref().to_string(),
description: String::new(),
};
self.0.take(&cap)
}
/// Removes the capability with the described kind, returning true if it existed
pub fn remove(&mut self, kind: impl AsRef<str>) -> bool {
let cap = Capability {
kind: kind.as_ref().to_string(),
description: String::new(),
};
self.0.remove(&cap)
}
/// Converts into vec of capabilities sorted by kind
pub fn into_sorted_vec(self) -> Vec<Capability> {
let mut this = self.0.into_iter().collect::<Vec<_>>();
this.sort_unstable();
this
}
}
impl AsRef<HashSet<Capability>> for Capabilities {
fn as_ref(&self) -> &HashSet<Capability> {
&self.0
}
}
impl AsMut<HashSet<Capability>> for Capabilities {
fn as_mut(&mut self) -> &mut HashSet<Capability> {
&mut self.0
}
}
impl Deref for Capabilities {
type Target = HashSet<Capability>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for Capabilities {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl BitAnd for &Capabilities {
type Output = Capabilities;
fn bitand(self, rhs: Self) -> Self::Output {
Capabilities(self.0.bitand(&rhs.0))
}
}
impl BitOr for &Capabilities {
type Output = Capabilities;
fn bitor(self, rhs: Self) -> Self::Output {
Capabilities(self.0.bitor(&rhs.0))
}
}
impl BitOr<Capability> for &Capabilities {
type Output = Capabilities;
fn bitor(self, rhs: Capability) -> Self::Output {
let mut other = Capabilities::none();
other.0.insert(rhs);
self.bitor(&other)
}
}
impl BitXor for &Capabilities {
type Output = Capabilities;
fn bitxor(self, rhs: Self) -> Self::Output {
Capabilities(self.0.bitxor(&rhs.0))
}
}
impl FromIterator<Capability> for Capabilities {
fn from_iter<I: IntoIterator<Item = Capability>>(iter: I) -> Self {
let mut this = Capabilities::none();
for capability in iter {
this.0.insert(capability);
}
this
}
}
/// Capability tied to a server. A capability is equivalent based on its kind and not description.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
pub struct Capability {
/// Label describing the kind of capability
pub kind: String,
/// Information about the capability
pub description: String,
}
impl Capability {
/// Will convert the [`Capability`]'s `kind` into a known [`CapabilityKind`] if possible,
/// returning None if the capability is unknown
pub fn to_capability_kind(&self) -> Option<CapabilityKind> {
CapabilityKind::from_str(&self.kind).ok()
}
/// Returns true if the described capability is unknown
pub fn is_unknown(&self) -> bool {
self.to_capability_kind().is_none()
}
}
impl PartialEq for Capability {
fn eq(&self, other: &Self) -> bool {
self.kind.eq_ignore_ascii_case(&other.kind)
}
}
impl Eq for Capability {}
impl PartialOrd for Capability {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for Capability {
fn cmp(&self, other: &Self) -> Ordering {
self.kind
.to_ascii_lowercase()
.cmp(&other.kind.to_ascii_lowercase())
}
}
impl Hash for Capability {
fn hash<H: Hasher>(&self, state: &mut H) {
self.kind.to_ascii_lowercase().hash(state);
}
}
impl From<CapabilityKind> for Capability {
/// Creates a new capability using the kind's default message
fn from(kind: CapabilityKind) -> Self {
Self {
kind: kind.to_string(),
description: kind
.get_message()
.map(ToString::to_string)
.unwrap_or_default(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
mod capabilities {
use super::*;
#[test]
fn should_be_able_to_serialize_to_json() {
let capabilities: Capabilities = [Capability {
kind: "some kind".to_string(),
description: "some description".to_string(),
}]
.into_iter()
.collect();
let value = serde_json::to_value(capabilities).unwrap();
assert_eq!(
value,
serde_json::json!([
{
"kind": "some kind",
"description": "some description",
}
])
);
}
#[test]
fn should_be_able_to_deserialize_from_json() {
let value = serde_json::json!([
{
"kind": "some kind",
"description": "some description",
}
]);
let capabilities: Capabilities = serde_json::from_value(value).unwrap();
assert_eq!(
capabilities,
[Capability {
kind: "some kind".to_string(),
description: "some description".to_string(),
}]
.into_iter()
.collect()
);
}
#[test]
fn should_be_able_to_serialize_to_msgpack() {
let capabilities: Capabilities = [Capability {
kind: "some kind".to_string(),
description: "some description".to_string(),
}]
.into_iter()
.collect();
// NOTE: We don't actually check the output here because it's an implementation detail
// and could change as we change how serialization is done. This is merely to verify
// that we can serialize since there are times when serde fails to serialize at
// runtime.
let _ = rmp_serde::encode::to_vec_named(&capabilities).unwrap();
}
#[test]
fn should_be_able_to_deserialize_from_msgpack() {
// NOTE: It may seem odd that we are serializing just to deserialize, but this is to
// verify that we are not corrupting or preventing issues when serializing on a
// client/server and then trying to deserialize on the other side. This has happened
// enough times with minor changes that we need tests to verify.
let buf = rmp_serde::encode::to_vec_named(
&[Capability {
kind: "some kind".to_string(),
description: "some description".to_string(),
}]
.into_iter()
.collect::<Capabilities>(),
)
.unwrap();
let capabilities: Capabilities = rmp_serde::decode::from_slice(&buf).unwrap();
assert_eq!(
capabilities,
[Capability {
kind: "some kind".to_string(),
description: "some description".to_string(),
}]
.into_iter()
.collect()
);
}
}
mod capability {
use super::*;
#[test]
fn should_be_able_to_serialize_to_json() {
let capability = Capability {
kind: "some kind".to_string(),
description: "some description".to_string(),
};
let value = serde_json::to_value(capability).unwrap();
assert_eq!(
value,
serde_json::json!({
"kind": "some kind",
"description": "some description",
})
);
}
#[test]
fn should_be_able_to_deserialize_from_json() {
let value = serde_json::json!({
"kind": "some kind",
"description": "some description",
});
let capability: Capability = serde_json::from_value(value).unwrap();
assert_eq!(
capability,
Capability {
kind: "some kind".to_string(),
description: "some description".to_string(),
}
);
}
#[test]
fn should_be_able_to_serialize_to_msgpack() {
let capability = Capability {
kind: "some kind".to_string(),
description: "some description".to_string(),
};
// NOTE: We don't actually check the output here because it's an implementation detail
// and could change as we change how serialization is done. This is merely to verify
// that we can serialize since there are times when serde fails to serialize at
// runtime.
let _ = rmp_serde::encode::to_vec_named(&capability).unwrap();
}
#[test]
fn should_be_able_to_deserialize_from_msgpack() {
// NOTE: It may seem odd that we are serializing just to deserialize, but this is to
// verify that we are not corrupting or causing issues when serializing on a
// client/server and then trying to deserialize on the other side. This has happened
// enough times with minor changes that we need tests to verify.
let buf = rmp_serde::encode::to_vec_named(&Capability {
kind: "some kind".to_string(),
description: "some description".to_string(),
})
.unwrap();
let capability: Capability = rmp_serde::decode::from_slice(&buf).unwrap();
assert_eq!(
capability,
Capability {
kind: "some kind".to_string(),
description: "some description".to_string(),
}
);
}
}
}

@ -12,22 +12,6 @@ impl Cmd {
pub fn new(cmd: impl Into<String>) -> Self {
Self(cmd.into())
}
/// Returns reference to the program portion of the command
pub fn program(&self) -> &str {
match self.0.split_once(' ') {
Some((program, _)) => program.trim(),
None => self.0.trim(),
}
}
/// Returns reference to the arguments portion of the command
pub fn arguments(&self) -> &str {
match self.0.split_once(' ') {
Some((_, arguments)) => arguments.trim(),
None => "",
}
}
}
impl Deref for Cmd {

@ -296,7 +296,7 @@ impl Permissions {
/// Converts a Unix `mode` into the permission set.
pub fn from_unix_mode(mode: u32) -> Self {
let flags = UnixFilePermissionFlags::from_bits_truncate(mode);
let flags = UnixFilePermissionFlags::from_bits_truncate(mode & 0o777);
Self {
owner_read: Some(flags.contains(UnixFilePermissionFlags::OWNER_READ)),
owner_write: Some(flags.contains(UnixFilePermissionFlags::OWNER_WRITE)),
@ -426,15 +426,15 @@ impl From<Permissions> for std::fs::Permissions {
bitflags! {
struct UnixFilePermissionFlags: u32 {
const OWNER_READ = 0o400;
const OWNER_WRITE = 0o200;
const OWNER_EXEC = 0o100;
const GROUP_READ = 0o40;
const GROUP_WRITE = 0o20;
const GROUP_EXEC = 0o10;
const OTHER_READ = 0o4;
const OTHER_WRITE = 0o2;
const OTHER_EXEC = 0o1;
const OWNER_READ = 0o400;
const OWNER_WRITE = 0o200;
const OWNER_EXEC = 0o100;
const GROUP_READ = 0o040;
const GROUP_WRITE = 0o020;
const GROUP_EXEC = 0o010;
const OTHER_READ = 0o004;
const OTHER_WRITE = 0o002;
const OTHER_EXEC = 0o001;
}
}
@ -442,6 +442,364 @@ bitflags! {
mod tests {
use super::*;
#[test]
fn should_properly_parse_unix_mode_into_permissions() {
let permissions = Permissions::from_unix_mode(0o400);
assert_eq!(
permissions,
Permissions {
owner_read: Some(true),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(false),
}
);
let permissions = Permissions::from_unix_mode(0o200);
assert_eq!(
permissions,
Permissions {
owner_read: Some(false),
owner_write: Some(true),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(false),
}
);
let permissions = Permissions::from_unix_mode(0o100);
assert_eq!(
permissions,
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(true),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(false),
}
);
let permissions = Permissions::from_unix_mode(0o040);
assert_eq!(
permissions,
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(true),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(false),
}
);
let permissions = Permissions::from_unix_mode(0o020);
assert_eq!(
permissions,
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(true),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(false),
}
);
let permissions = Permissions::from_unix_mode(0o010);
assert_eq!(
permissions,
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(true),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(false),
}
);
let permissions = Permissions::from_unix_mode(0o004);
assert_eq!(
permissions,
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(true),
other_write: Some(false),
other_exec: Some(false),
}
);
let permissions = Permissions::from_unix_mode(0o002);
assert_eq!(
permissions,
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(true),
other_exec: Some(false),
}
);
let permissions = Permissions::from_unix_mode(0o001);
assert_eq!(
permissions,
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(true),
}
);
let permissions = Permissions::from_unix_mode(0o000);
assert_eq!(
permissions,
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(false),
}
);
let permissions = Permissions::from_unix_mode(0o777);
assert_eq!(
permissions,
Permissions {
owner_read: Some(true),
owner_write: Some(true),
owner_exec: Some(true),
group_read: Some(true),
group_write: Some(true),
group_exec: Some(true),
other_read: Some(true),
other_write: Some(true),
other_exec: Some(true),
}
);
}
#[test]
fn should_properly_convert_into_unix_mode() {
assert_eq!(
Permissions {
owner_read: Some(true),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(false),
}
.to_unix_mode(),
0o400
);
assert_eq!(
Permissions {
owner_read: Some(false),
owner_write: Some(true),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(false),
}
.to_unix_mode(),
0o200
);
assert_eq!(
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(true),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(false),
}
.to_unix_mode(),
0o100
);
assert_eq!(
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(true),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(false),
}
.to_unix_mode(),
0o040
);
assert_eq!(
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(true),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(false),
}
.to_unix_mode(),
0o020
);
assert_eq!(
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(true),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(false),
}
.to_unix_mode(),
0o010
);
assert_eq!(
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(true),
other_write: Some(false),
other_exec: Some(false),
}
.to_unix_mode(),
0o004
);
assert_eq!(
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(true),
other_exec: Some(false),
}
.to_unix_mode(),
0o002
);
assert_eq!(
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(true),
}
.to_unix_mode(),
0o001
);
assert_eq!(
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(false),
}
.to_unix_mode(),
0o000
);
assert_eq!(
Permissions {
owner_read: Some(true),
owner_write: Some(true),
owner_exec: Some(true),
group_read: Some(true),
group_write: Some(true),
group_exec: Some(true),
other_read: Some(true),
other_write: Some(true),
other_exec: Some(true),
}
.to_unix_mode(),
0o777
);
}
#[test]
fn should_be_able_to_serialize_minimal_permissions_to_json() {
let permissions = Permissions {

@ -230,6 +230,35 @@ pub struct SearchQueryOptions {
/// include the remaining results even if less than pagination request.
#[serde(skip_serializing_if = "Option::is_none")]
pub pagination: Option<u64>,
/// If true, will skip searching hidden files.
#[serde(skip_serializing_if = "utils::is_false")]
pub ignore_hidden: bool,
/// If true, will read `.ignore` files that are used by `ripgrep` and `The Silver Searcher`
/// to determine which files and directories to not search.
#[serde(skip_serializing_if = "utils::is_false")]
pub use_ignore_files: bool,
/// If true, will read `.ignore` files from parent directories that are used by `ripgrep` and
/// `The Silver Searcher` to determine which files and directories to not search.
#[serde(skip_serializing_if = "utils::is_false")]
pub use_parent_ignore_files: bool,
/// If true, will read `.gitignore` files to determine which files and directories to not
/// search.
#[serde(skip_serializing_if = "utils::is_false")]
pub use_git_ignore_files: bool,
/// If true, will read global `.gitignore` files to determine which files and directories to
/// not search.
#[serde(skip_serializing_if = "utils::is_false")]
pub use_global_git_ignore_files: bool,
/// If true, will read `.git/info/exclude` files to determine which files and directories to
/// not search.
#[serde(skip_serializing_if = "utils::is_false")]
pub use_git_exclude_files: bool,
}
/// Represents a match for a search query
@ -929,6 +958,12 @@ mod tests {
limit: None,
max_depth: None,
pagination: None,
ignore_hidden: false,
use_ignore_files: false,
use_parent_ignore_files: false,
use_git_ignore_files: false,
use_global_git_ignore_files: false,
use_git_exclude_files: false,
};
let value = serde_json::to_value(options).unwrap();
@ -950,6 +985,12 @@ mod tests {
limit: Some(u64::MAX),
max_depth: Some(u64::MAX),
pagination: Some(u64::MAX),
ignore_hidden: true,
use_ignore_files: true,
use_parent_ignore_files: true,
use_git_ignore_files: true,
use_global_git_ignore_files: true,
use_git_exclude_files: true,
};
let value = serde_json::to_value(options).unwrap();
@ -970,6 +1011,12 @@ mod tests {
"limit": u64::MAX,
"max_depth": u64::MAX,
"pagination": u64::MAX,
"ignore_hidden": true,
"use_ignore_files": true,
"use_parent_ignore_files": true,
"use_git_ignore_files": true,
"use_global_git_ignore_files": true,
"use_git_exclude_files": true,
})
);
}
@ -990,6 +1037,12 @@ mod tests {
limit: None,
max_depth: None,
pagination: None,
ignore_hidden: false,
use_ignore_files: false,
use_parent_ignore_files: false,
use_git_ignore_files: false,
use_global_git_ignore_files: false,
use_git_exclude_files: false,
}
);
}
@ -1011,6 +1064,12 @@ mod tests {
"limit": u64::MAX,
"max_depth": u64::MAX,
"pagination": u64::MAX,
"ignore_hidden": true,
"use_ignore_files": true,
"use_parent_ignore_files": true,
"use_git_ignore_files": true,
"use_global_git_ignore_files": true,
"use_git_exclude_files": true,
});
let options: SearchQueryOptions = serde_json::from_value(value).unwrap();
@ -1029,6 +1088,12 @@ mod tests {
limit: Some(u64::MAX),
max_depth: Some(u64::MAX),
pagination: Some(u64::MAX),
ignore_hidden: true,
use_ignore_files: true,
use_parent_ignore_files: true,
use_git_ignore_files: true,
use_global_git_ignore_files: true,
use_git_exclude_files: true,
}
);
}
@ -1044,6 +1109,12 @@ mod tests {
limit: None,
max_depth: None,
pagination: None,
ignore_hidden: false,
use_ignore_files: false,
use_parent_ignore_files: false,
use_git_ignore_files: false,
use_global_git_ignore_files: false,
use_git_exclude_files: false,
};
// NOTE: We don't actually check the output here because it's an implementation detail
@ -1068,6 +1139,12 @@ mod tests {
limit: Some(u64::MAX),
max_depth: Some(u64::MAX),
pagination: Some(u64::MAX),
ignore_hidden: true,
use_ignore_files: true,
use_parent_ignore_files: true,
use_git_ignore_files: true,
use_global_git_ignore_files: true,
use_git_exclude_files: true,
};
// NOTE: We don't actually check the output here because it's an implementation detail
@ -1092,6 +1169,12 @@ mod tests {
limit: None,
max_depth: None,
pagination: None,
ignore_hidden: false,
use_ignore_files: false,
use_parent_ignore_files: false,
use_git_ignore_files: false,
use_global_git_ignore_files: false,
use_git_exclude_files: false,
})
.unwrap();
@ -1107,6 +1190,12 @@ mod tests {
limit: None,
max_depth: None,
pagination: None,
ignore_hidden: false,
use_ignore_files: false,
use_parent_ignore_files: false,
use_git_ignore_files: false,
use_global_git_ignore_files: false,
use_git_exclude_files: false,
}
);
}
@ -1130,6 +1219,12 @@ mod tests {
limit: Some(u64::MAX),
max_depth: Some(u64::MAX),
pagination: Some(u64::MAX),
ignore_hidden: true,
use_ignore_files: true,
use_parent_ignore_files: true,
use_git_ignore_files: true,
use_global_git_ignore_files: true,
use_git_exclude_files: true,
})
.unwrap();
@ -1149,6 +1244,12 @@ mod tests {
limit: Some(u64::MAX),
max_depth: Some(u64::MAX),
pagination: Some(u64::MAX),
ignore_hidden: true,
use_ignore_files: true,
use_parent_ignore_files: true,
use_git_ignore_files: true,
use_global_git_ignore_files: true,
use_git_exclude_files: true,
}
);
}

@ -1,48 +1,80 @@
use serde::{Deserialize, Serialize};
use crate::common::{Capabilities, SemVer};
use crate::semver;
/// Represents version information.
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct Version {
/// General version of server (arbitrary format)
pub server_version: String,
/// Server version.
pub server_version: semver::Version,
/// Protocol version
pub protocol_version: SemVer,
/// Protocol version.
pub protocol_version: semver::Version,
/// Capabilities of the server
pub capabilities: Capabilities,
/// Additional features available.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub capabilities: Vec<String>,
}
impl Version {
/// Supports executing processes.
pub const CAP_EXEC: &'static str = "exec";
/// Supports reading and writing via filesystem IO.
pub const CAP_FS_IO: &'static str = "fs_io";
/// Supports modifying permissions of filesystem.
pub const CAP_FS_PERM: &'static str = "fs_perm";
/// Supports searching filesystem.
pub const CAP_FS_SEARCH: &'static str = "fs_search";
/// Supports watching filesystem for changes.
pub const CAP_FS_WATCH: &'static str = "fs_watch";
/// Supports TCP tunneling.
// pub const CAP_TCP_TUNNEL: &'static str = "tcp_tunnel";
/// Supports TCP reverse tunneling.
// pub const CAP_TCP_REV_TUNNEL: &'static str = "tcp_rev_tunnel";
/// Supports retrieving system information.
pub const CAP_SYS_INFO: &'static str = "sys_info";
pub const fn capabilities() -> &'static [&'static str] {
&[
Self::CAP_EXEC,
Self::CAP_FS_IO,
Self::CAP_FS_PERM,
Self::CAP_FS_SEARCH,
Self::CAP_FS_WATCH,
/* Self::CAP_TCP_TUNNEL,
Self::CAP_TCP_REV_TUNNEL, */
Self::CAP_SYS_INFO,
]
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::common::Capability;
use semver::Version as SemVer;
#[test]
fn should_be_able_to_serialize_to_json() {
let version = Version {
server_version: String::from("some version"),
protocol_version: (1, 2, 3),
capabilities: [Capability {
kind: String::from("some kind"),
description: String::from("some description"),
}]
.into_iter()
.collect(),
server_version: "123.456.789-rc+build".parse().unwrap(),
protocol_version: SemVer::new(1, 2, 3),
capabilities: vec![String::from("cap")],
};
let value = serde_json::to_value(version).unwrap();
assert_eq!(
value,
serde_json::json!({
"server_version": "some version",
"protocol_version": [1, 2, 3],
"capabilities": [{
"kind": "some kind",
"description": "some description",
}]
"server_version": "123.456.789-rc+build",
"protocol_version": "1.2.3",
"capabilities": ["cap"]
})
);
}
@ -50,26 +82,18 @@ mod tests {
#[test]
fn should_be_able_to_deserialize_from_json() {
let value = serde_json::json!({
"server_version": "some version",
"protocol_version": [1, 2, 3],
"capabilities": [{
"kind": "some kind",
"description": "some description",
}]
"server_version": "123.456.789-rc+build",
"protocol_version": "1.2.3",
"capabilities": ["cap"]
});
let version: Version = serde_json::from_value(value).unwrap();
assert_eq!(
version,
Version {
server_version: String::from("some version"),
protocol_version: (1, 2, 3),
capabilities: [Capability {
kind: String::from("some kind"),
description: String::from("some description"),
}]
.into_iter()
.collect(),
server_version: "123.456.789-rc+build".parse().unwrap(),
protocol_version: SemVer::new(1, 2, 3),
capabilities: vec![String::from("cap")],
}
);
}
@ -77,14 +101,9 @@ mod tests {
#[test]
fn should_be_able_to_serialize_to_msgpack() {
let version = Version {
server_version: String::from("some version"),
protocol_version: (1, 2, 3),
capabilities: [Capability {
kind: String::from("some kind"),
description: String::from("some description"),
}]
.into_iter()
.collect(),
server_version: "123.456.789-rc+build".parse().unwrap(),
protocol_version: SemVer::new(1, 2, 3),
capabilities: vec![String::from("cap")],
};
// NOTE: We don't actually check the output here because it's an implementation detail
@ -101,14 +120,9 @@ mod tests {
// client/server and then trying to deserialize on the other side. This has happened
// enough times with minor changes that we need tests to verify.
let buf = rmp_serde::encode::to_vec_named(&Version {
server_version: String::from("some version"),
protocol_version: (1, 2, 3),
capabilities: [Capability {
kind: String::from("some kind"),
description: String::from("some description"),
}]
.into_iter()
.collect(),
server_version: "123.456.789-rc+build".parse().unwrap(),
protocol_version: SemVer::new(1, 2, 3),
capabilities: vec![String::from("cap")],
})
.unwrap();
@ -116,14 +130,9 @@ mod tests {
assert_eq!(
version,
Version {
server_version: String::from("some version"),
protocol_version: (1, 2, 3),
capabilities: [Capability {
kind: String::from("some kind"),
description: String::from("some description"),
}]
.into_iter()
.collect(),
server_version: "123.456.789-rc+build".parse().unwrap(),
protocol_version: SemVer::new(1, 2, 3),
capabilities: vec![String::from("cap")],
}
);
}

@ -14,10 +14,95 @@ pub use common::*;
pub use msg::*;
pub use request::*;
pub use response::*;
pub use semver;
/// Protocol version indicated by the tuple of (major, minor, patch).
/// Protocol version of major/minor/patch.
///
/// This is different from the crate version, which matches that of the complete suite of distant
/// crates. Rather, this verison is used to provide stability indicators when the protocol itself
/// changes across crate versions.
pub const PROTOCOL_VERSION: SemVer = (0, 1, 0);
/// This should match the version of this crate such that any significant change to the crate
/// version will also be reflected in this constant that can be used to verify compatibility across
/// the wire.
pub const PROTOCOL_VERSION: semver::Version = semver::Version::new(
const_str::parse!(env!("CARGO_PKG_VERSION_MAJOR"), u64),
const_str::parse!(env!("CARGO_PKG_VERSION_MINOR"), u64),
const_str::parse!(env!("CARGO_PKG_VERSION_PATCH"), u64),
);
/// Comparators used to indicate the [lower, upper) bounds of supported protocol versions.
const PROTOCOL_VERSION_COMPAT: (semver::Comparator, semver::Comparator) = (
semver::Comparator {
op: semver::Op::GreaterEq,
major: const_str::parse!(env!("CARGO_PKG_VERSION_MAJOR"), u64),
minor: Some(const_str::parse!(env!("CARGO_PKG_VERSION_MINOR"), u64)),
patch: Some(const_str::parse!(env!("CARGO_PKG_VERSION_PATCH"), u64)),
pre: semver::Prerelease::EMPTY,
},
semver::Comparator {
op: semver::Op::Less,
major: {
let major = const_str::parse!(env!("CARGO_PKG_VERSION_MAJOR"), u64);
// If we have a version like 0.20, then the upper bound is 0.21,
// otherwise if we have a version like 1.2, then the upper bound is 2.0
//
// So only increment the major if it is greater than 0
if major > 0 {
major + 1
} else {
major
}
},
minor: {
let major = const_str::parse!(env!("CARGO_PKG_VERSION_MAJOR"), u64);
let minor = const_str::parse!(env!("CARGO_PKG_VERSION_MINOR"), u64);
// If we have a version like 0.20, then the upper bound is 0.21,
// otherwise if we have a version like 1.2, then the upper bound is 2.0
//
// So only increment the minor if major is 0
if major > 0 {
None
} else {
Some(minor + 1)
}
},
patch: None,
pre: semver::Prerelease::EMPTY,
},
);
/// Returns true if the provided version is compatible with the protocol version.
///
/// ```
/// use distant_protocol::{is_compatible_with, PROTOCOL_VERSION};
/// use distant_protocol::semver::Version;
///
/// // The current protocol version tied to this crate is always compatible
/// assert!(is_compatible_with(&PROTOCOL_VERSION));
///
/// // Major bumps in distant's protocol version are always considered incompatible
/// assert!(!is_compatible_with(&Version::new(
/// PROTOCOL_VERSION.major + 1,
/// PROTOCOL_VERSION.minor,
/// PROTOCOL_VERSION.patch,
/// )));
///
/// // While distant's protocol is being stabilized, minor version bumps
/// // are also considered incompatible!
/// assert!(!is_compatible_with(&Version::new(
/// PROTOCOL_VERSION.major,
/// PROTOCOL_VERSION.minor + 1,
/// PROTOCOL_VERSION.patch,
/// )));
///
/// // Patch bumps in distant's protocol are always considered compatible
/// assert!(is_compatible_with(&Version::new(
/// PROTOCOL_VERSION.major,
/// PROTOCOL_VERSION.minor,
/// PROTOCOL_VERSION.patch + 1,
/// )));
/// ```
pub fn is_compatible_with(version: &semver::Version) -> bool {
let (lower, upper) = PROTOCOL_VERSION_COMPAT;
lower.matches(version) && upper.matches(version)
}

@ -3,7 +3,6 @@ use std::path::PathBuf;
use derive_more::IsVariant;
use serde::{Deserialize, Serialize};
use strum::{AsRefStr, EnumDiscriminants, EnumIter, EnumMessage, EnumString};
use crate::common::{
ChangeKind, Cmd, Permissions, ProcessId, PtySize, SearchId, SearchQuery, SetPermissionsOptions,
@ -14,26 +13,10 @@ use crate::utils;
pub type Environment = HashMap<String, String>;
/// Represents the payload of a request to be performed on the remote machine
#[derive(Clone, Debug, PartialEq, Eq, EnumDiscriminants, IsVariant, Serialize, Deserialize)]
#[strum_discriminants(derive(
AsRefStr,
strum::Display,
EnumIter,
EnumMessage,
EnumString,
Hash,
PartialOrd,
Ord,
IsVariant,
Serialize,
Deserialize
))]
#[strum_discriminants(name(RequestKind))]
#[strum_discriminants(strum(serialize_all = "snake_case"))]
#[derive(Clone, Debug, PartialEq, Eq, IsVariant, Serialize, Deserialize)]
#[serde(rename_all = "snake_case", deny_unknown_fields, tag = "type")]
pub enum Request {
/// Reads a file from the specified path on the remote machine
#[strum_discriminants(strum(message = "Supports reading binary file"))]
FileRead {
/// The path to the file on the remote machine
path: PathBuf,
@ -41,7 +24,6 @@ pub enum Request {
/// Reads a file from the specified path on the remote machine
/// and treats the contents as text
#[strum_discriminants(strum(message = "Supports reading text file"))]
FileReadText {
/// The path to the file on the remote machine
path: PathBuf,
@ -49,7 +31,6 @@ pub enum Request {
/// Writes a file, creating it if it does not exist, and overwriting any existing content
/// on the remote machine
#[strum_discriminants(strum(message = "Supports writing binary file"))]
FileWrite {
/// The path to the file on the remote machine
path: PathBuf,
@ -61,7 +42,6 @@ pub enum Request {
/// Writes a file using text instead of bytes, creating it if it does not exist,
/// and overwriting any existing content on the remote machine
#[strum_discriminants(strum(message = "Supports writing text file"))]
FileWriteText {
/// The path to the file on the remote machine
path: PathBuf,
@ -71,7 +51,6 @@ pub enum Request {
},
/// Appends to a file, creating it if it does not exist, on the remote machine
#[strum_discriminants(strum(message = "Supports appending to binary file"))]
FileAppend {
/// The path to the file on the remote machine
path: PathBuf,
@ -82,7 +61,6 @@ pub enum Request {
},
/// Appends text to a file, creating it if it does not exist, on the remote machine
#[strum_discriminants(strum(message = "Supports appending to text file"))]
FileAppendText {
/// The path to the file on the remote machine
path: PathBuf,
@ -92,7 +70,6 @@ pub enum Request {
},
/// Reads a directory from the specified path on the remote machine
#[strum_discriminants(strum(message = "Supports reading directory"))]
DirRead {
/// The path to the directory on the remote machine
path: PathBuf,
@ -126,7 +103,6 @@ pub enum Request {
},
/// Creates a directory on the remote machine
#[strum_discriminants(strum(message = "Supports creating directory"))]
DirCreate {
/// The path to the directory on the remote machine
path: PathBuf,
@ -137,7 +113,6 @@ pub enum Request {
},
/// Removes a file or directory on the remote machine
#[strum_discriminants(strum(message = "Supports removing files, directories, and symlinks"))]
Remove {
/// The path to the file or directory on the remote machine
path: PathBuf,
@ -149,7 +124,6 @@ pub enum Request {
},
/// Copies a file or directory on the remote machine
#[strum_discriminants(strum(message = "Supports copying files, directories, and symlinks"))]
Copy {
/// The path to the file or directory on the remote machine
src: PathBuf,
@ -159,7 +133,6 @@ pub enum Request {
},
/// Moves/renames a file or directory on the remote machine
#[strum_discriminants(strum(message = "Supports renaming files, directories, and symlinks"))]
Rename {
/// The path to the file or directory on the remote machine
src: PathBuf,
@ -169,7 +142,6 @@ pub enum Request {
},
/// Watches a path for changes
#[strum_discriminants(strum(message = "Supports watching filesystem for changes"))]
Watch {
/// The path to the file, directory, or symlink on the remote machine
path: PathBuf,
@ -189,23 +161,18 @@ pub enum Request {
},
/// Unwatches a path for changes, meaning no additional changes will be reported
#[strum_discriminants(strum(message = "Supports unwatching filesystem for changes"))]
Unwatch {
/// The path to the file, directory, or symlink on the remote machine
path: PathBuf,
},
/// Checks whether the given path exists
#[strum_discriminants(strum(message = "Supports checking if a path exists"))]
Exists {
/// The path to the file or directory on the remote machine
path: PathBuf,
},
/// Retrieves filesystem metadata for the specified path on the remote machine
#[strum_discriminants(strum(
message = "Supports retrieving metadata about a file, directory, or symlink"
))]
Metadata {
/// The path to the file, directory, or symlink on the remote machine
path: PathBuf,
@ -222,9 +189,6 @@ pub enum Request {
},
/// Sets permissions on a file, directory, or symlink on the remote machine
#[strum_discriminants(strum(
message = "Supports setting permissions on a file, directory, or symlink"
))]
SetPermissions {
/// The path to the file, directory, or symlink on the remote machine
path: PathBuf,
@ -238,23 +202,18 @@ pub enum Request {
},
/// Searches filesystem using the provided query
#[strum_discriminants(strum(message = "Supports searching filesystem using queries"))]
Search {
/// Query to perform against the filesystem
query: SearchQuery,
},
/// Cancels an active search being run against the filesystem
#[strum_discriminants(strum(
message = "Supports canceling an active search against the filesystem"
))]
CancelSearch {
/// Id of the search to cancel
id: SearchId,
},
/// Spawns a new process on the remote machine
#[strum_discriminants(strum(message = "Supports spawning a process"))]
ProcSpawn {
/// The full command to run including arguments
cmd: Cmd,
@ -273,14 +232,12 @@ pub enum Request {
},
/// Kills a process running on the remote machine
#[strum_discriminants(strum(message = "Supports killing a spawned process"))]
ProcKill {
/// Id of the actively-running process
id: ProcessId,
},
/// Sends additional data to stdin of running process
#[strum_discriminants(strum(message = "Supports sending stdin to a spawned process"))]
ProcStdin {
/// Id of the actively-running process to send stdin data
id: ProcessId,
@ -291,7 +248,6 @@ pub enum Request {
},
/// Resize pty of remote process
#[strum_discriminants(strum(message = "Supports resizing the pty of a spawned process"))]
ProcResizePty {
/// Id of the actively-running process whose pty to resize
id: ProcessId,
@ -301,11 +257,9 @@ pub enum Request {
},
/// Retrieve information about the server and the system it is on
#[strum_discriminants(strum(message = "Supports retrieving system information"))]
SystemInfo {},
/// Retrieve information about the server's protocol version
#[strum_discriminants(strum(message = "Supports retrieving version"))]
Version {},
}
@ -2114,6 +2068,12 @@ mod tests {
limit: Some(u64::MAX),
max_depth: Some(u64::MAX),
pagination: Some(u64::MAX),
ignore_hidden: true,
use_ignore_files: true,
use_parent_ignore_files: true,
use_git_ignore_files: true,
use_global_git_ignore_files: true,
use_git_exclude_files: true,
},
},
};
@ -2145,6 +2105,12 @@ mod tests {
"limit": u64::MAX,
"max_depth": u64::MAX,
"pagination": u64::MAX,
"ignore_hidden": true,
"use_ignore_files": true,
"use_parent_ignore_files": true,
"use_git_ignore_files": true,
"use_global_git_ignore_files": true,
"use_git_exclude_files": true,
},
},
})
@ -2205,6 +2171,12 @@ mod tests {
"limit": u64::MAX,
"max_depth": u64::MAX,
"pagination": u64::MAX,
"ignore_hidden": true,
"use_ignore_files": true,
"use_parent_ignore_files": true,
"use_git_ignore_files": true,
"use_global_git_ignore_files": true,
"use_git_exclude_files": true,
},
},
});
@ -2230,6 +2202,12 @@ mod tests {
limit: Some(u64::MAX),
max_depth: Some(u64::MAX),
pagination: Some(u64::MAX),
ignore_hidden: true,
use_ignore_files: true,
use_parent_ignore_files: true,
use_git_ignore_files: true,
use_global_git_ignore_files: true,
use_git_exclude_files: true,
},
},
}
@ -2274,6 +2252,12 @@ mod tests {
limit: Some(u64::MAX),
max_depth: Some(u64::MAX),
pagination: Some(u64::MAX),
ignore_hidden: true,
use_ignore_files: true,
use_parent_ignore_files: true,
use_git_ignore_files: true,
use_global_git_ignore_files: true,
use_git_exclude_files: true,
},
},
};
@ -2339,6 +2323,12 @@ mod tests {
limit: Some(u64::MAX),
max_depth: Some(u64::MAX),
pagination: Some(u64::MAX),
ignore_hidden: true,
use_ignore_files: true,
use_parent_ignore_files: true,
use_git_ignore_files: true,
use_global_git_ignore_files: true,
use_git_exclude_files: true,
},
},
})
@ -2365,6 +2355,12 @@ mod tests {
limit: Some(u64::MAX),
max_depth: Some(u64::MAX),
pagination: Some(u64::MAX),
ignore_hidden: true,
use_ignore_files: true,
use_parent_ignore_files: true,
use_git_ignore_files: true,
use_global_git_ignore_files: true,
use_git_exclude_files: true,
},
},
}

@ -2013,19 +2013,14 @@ mod tests {
mod version {
use super::*;
use crate::common::{Capabilities, Capability};
use crate::semver::Version as SemVer;
#[test]
fn should_be_able_to_serialize_to_json() {
let payload = Response::Version(Version {
server_version: String::from("some version"),
protocol_version: (1, 2, 3),
capabilities: [Capability {
kind: String::from("some kind"),
description: String::from("some description"),
}]
.into_iter()
.collect(),
server_version: "123.456.789-rc+build".parse().unwrap(),
protocol_version: SemVer::new(1, 2, 3),
capabilities: vec![String::from("cap")],
});
let value = serde_json::to_value(payload).unwrap();
@ -2033,12 +2028,9 @@ mod tests {
value,
serde_json::json!({
"type": "version",
"server_version": "some version",
"protocol_version": [1, 2, 3],
"capabilities": [{
"kind": "some kind",
"description": "some description",
}],
"server_version": "123.456.789-rc+build",
"protocol_version": "1.2.3",
"capabilities": ["cap"],
})
);
}
@ -2047,18 +2039,18 @@ mod tests {
fn should_be_able_to_deserialize_from_json() {
let value = serde_json::json!({
"type": "version",
"server_version": "some version",
"protocol_version": [1, 2, 3],
"capabilities": Capabilities::all(),
"server_version": "123.456.789-rc+build",
"protocol_version": "1.2.3",
"capabilities": ["cap"],
});
let payload: Response = serde_json::from_value(value).unwrap();
assert_eq!(
payload,
Response::Version(Version {
server_version: String::from("some version"),
protocol_version: (1, 2, 3),
capabilities: Capabilities::all(),
server_version: "123.456.789-rc+build".parse().unwrap(),
protocol_version: SemVer::new(1, 2, 3),
capabilities: vec![String::from("cap")],
})
);
}
@ -2066,9 +2058,9 @@ mod tests {
#[test]
fn should_be_able_to_serialize_to_msgpack() {
let payload = Response::Version(Version {
server_version: String::from("some version"),
protocol_version: (1, 2, 3),
capabilities: Capabilities::all(),
server_version: "123.456.789-rc+build".parse().unwrap(),
protocol_version: SemVer::new(1, 2, 3),
capabilities: vec![String::from("cap")],
});
// NOTE: We don't actually check the errput here because it's an implementation detail
@ -2085,9 +2077,9 @@ mod tests {
// client/server and then trying to deserialize on the other side. This has happened
// enough times with minor changes that we need tests to verify.
let buf = rmp_serde::encode::to_vec_named(&Response::Version(Version {
server_version: String::from("some version"),
protocol_version: (1, 2, 3),
capabilities: Capabilities::all(),
server_version: "123.456.789-rc+build".parse().unwrap(),
protocol_version: SemVer::new(1, 2, 3),
capabilities: vec![String::from("cap")],
}))
.unwrap();
@ -2095,9 +2087,9 @@ mod tests {
assert_eq!(
payload,
Response::Version(Version {
server_version: String::from("some version"),
protocol_version: (1, 2, 3),
capabilities: Capabilities::all(),
server_version: "123.456.789-rc+build".parse().unwrap(),
protocol_version: SemVer::new(1, 2, 3),
capabilities: vec![String::from("cap")],
})
);
}

@ -2,7 +2,7 @@
name = "distant-ssh2"
description = "Library to enable native ssh-2 protocol for use with distant sessions"
categories = ["network-programming"]
version = "0.20.0-alpha.9"
version = "0.20.0"
authors = ["Chip Senkbeil <chip@senkbeil.org>"]
edition = "2021"
homepage = "https://github.com/chipsenkbeil/distant"
@ -20,7 +20,7 @@ async-compat = "0.2.1"
async-once-cell = "0.5.2"
async-trait = "0.1.68"
derive_more = { version = "0.99.17", default-features = false, features = ["display", "error"] }
distant-core = { version = "=0.20.0-alpha.9", path = "../distant-core" }
distant-core = { version = "=0.20.0", path = "../distant-core" }
futures = "0.3.28"
hex = "0.4.3"
log = "0.4.18"

@ -1,13 +1,13 @@
# distant ssh2
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.68.0][distant_rustc_img]][distant_rustc_lnk]
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.70.0][distant_rustc_img]][distant_rustc_lnk]
[distant_crates_img]: https://img.shields.io/crates/v/distant-ssh2.svg
[distant_crates_lnk]: https://crates.io/crates/distant-ssh2
[distant_doc_img]: https://docs.rs/distant-ssh2/badge.svg
[distant_doc_lnk]: https://docs.rs/distant-ssh2
[distant_rustc_img]: https://img.shields.io/badge/distant_ssh2-rustc_1.68+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/03/09/Rust-1.68.0.html
[distant_rustc_img]: https://img.shields.io/badge/distant_ssh2-rustc_1.70+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/06/01/Rust-1.70.0.html
Library provides native ssh integration into the
[`distant`](https://github.com/chipsenkbeil/distant) binary.

@ -7,9 +7,10 @@ use std::time::Duration;
use async_compat::CompatExt;
use async_once_cell::OnceCell;
use async_trait::async_trait;
use distant_core::protocol::semver;
use distant_core::protocol::{
Capabilities, CapabilityKind, DirEntry, Environment, FileType, Metadata, Permissions,
ProcessId, PtySize, SetPermissionsOptions, SystemInfo, UnixMetadata, Version, PROTOCOL_VERSION,
DirEntry, Environment, FileType, Metadata, Permissions, ProcessId, PtySize,
SetPermissionsOptions, SystemInfo, UnixMetadata, Version, PROTOCOL_VERSION,
};
use distant_core::{DistantApi, DistantCtx};
use log::*;
@ -936,18 +937,33 @@ impl DistantApi for SshDistantApi {
async fn version(&self, ctx: DistantCtx) -> io::Result<Version> {
debug!("[Conn {}] Querying capabilities", ctx.connection_id);
let mut capabilities = Capabilities::all();
// Searching is not supported by ssh implementation
// TODO: Could we have external search using ripgrep's JSON lines API?
capabilities.take(CapabilityKind::Search);
capabilities.take(CapabilityKind::CancelSearch);
// Broken via wezterm-ssh, so not supported right now
capabilities.take(CapabilityKind::SetPermissions);
let capabilities = vec![
Version::CAP_EXEC.to_string(),
Version::CAP_FS_IO.to_string(),
Version::CAP_SYS_INFO.to_string(),
];
// Parse our server's version
let mut server_version: semver::Version = env!("CARGO_PKG_VERSION")
.parse()
.map_err(|x| io::Error::new(io::ErrorKind::Other, x))?;
// Add the package name to the version information
if server_version.build.is_empty() {
server_version.build = semver::BuildMetadata::new(env!("CARGO_PKG_NAME"))
.map_err(|x| io::Error::new(io::ErrorKind::Other, x))?;
} else {
let raw_build_str = format!(
"{}.{}",
server_version.build.as_str(),
env!("CARGO_PKG_NAME")
);
server_version.build = semver::BuildMetadata::new(&raw_build_str)
.map_err(|x| io::Error::new(io::ErrorKind::Other, x))?;
}
Ok(Version {
server_version: format!("{} {}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION")),
server_version,
protocol_version: PROTOCOL_VERSION,
capabilities,
})

@ -16,15 +16,16 @@ use std::str::FromStr;
use std::time::Duration;
use async_compat::CompatExt;
use async_once_cell::OnceCell;
use async_trait::async_trait;
use distant_core::net::auth::{AuthHandlerMap, DummyAuthHandler, Verifier};
use distant_core::net::client::{Client, ClientConfig};
use distant_core::net::common::{Host, InmemoryTransport, OneshotListener};
use distant_core::net::common::{Host, InmemoryTransport, OneshotListener, Version};
use distant_core::net::server::{Server, ServerRef};
use distant_core::protocol::PROTOCOL_VERSION;
use distant_core::{DistantApiServerHandler, DistantClient, DistantSingleKeyCredentials};
use log::*;
use smol::channel::Receiver as SmolReceiver;
use tokio::sync::Mutex;
use wezterm_ssh::{
ChildKiller, Config as WezConfig, MasterPty, PtySize, Session as WezSession,
SessionEvent as WezSessionEvent,
@ -325,17 +326,20 @@ impl SshAuthHandler for LocalSshAuthHandler {
}
}
/// Represents an ssh2 client
/// Represents an ssh2 client.
pub struct Ssh {
session: WezSession,
events: SmolReceiver<WezSessionEvent>,
host: String,
port: u16,
authenticated: bool,
/// Cached copy of the family representing the remote machine.
cached_family: Mutex<Option<SshFamily>>,
}
impl Ssh {
/// Connect to a remote TCP server using SSH
/// Connect to a remote TCP server using SSH.
pub fn connect(host: impl AsRef<str>, opts: SshOpts) -> io::Result<Self> {
debug!(
"Establishing ssh connection to {} using {:?}",
@ -416,15 +420,16 @@ impl Ssh {
host: host.as_ref().to_string(),
port,
authenticated: false,
cached_family: Mutex::new(None),
})
}
/// Host this client is connected to
/// Host this client is connected to.
pub fn host(&self) -> &str {
&self.host
}
/// Port this client is connected to on remote host
/// Port this client is connected to on remote host.
pub fn port(&self) -> u16 {
self.port
}
@ -434,7 +439,7 @@ impl Ssh {
self.authenticated
}
/// Authenticates the [`Ssh`] if not already authenticated
/// Authenticates the [`Ssh`] if not already authenticated.
pub async fn authenticate(&mut self, handler: impl SshAuthHandler) -> io::Result<()> {
// If already authenticated, exit
if self.authenticated {
@ -499,10 +504,10 @@ impl Ssh {
Ok(())
}
/// Detects the family of operating system on the remote machine
/// Detects the family of operating system on the remote machine.
///
/// Caches the result such that subsequent checks will return the same family.
pub async fn detect_family(&self) -> io::Result<SshFamily> {
static INSTANCE: OnceCell<SshFamily> = OnceCell::new();
// Exit early if not authenticated as this is a requirement
if !self.authenticated {
return Err(io::Error::new(
@ -511,18 +516,23 @@ impl Ssh {
));
}
INSTANCE
.get_or_try_init(async move {
let is_windows = utils::is_windows(&self.session).await?;
let mut family = self.cached_family.lock().await;
Ok(if is_windows {
SshFamily::Windows
} else {
SshFamily::Unix
})
})
.await
.copied()
// Family value is not present, so we retrieve it now and populate our cache
if family.is_none() {
// Check if we are windows, otherwise assume unix, returning an error if encountered,
// which will also drop our lock on the cache
let is_windows = utils::is_windows(&self.session).await?;
*family = Some(if is_windows {
SshFamily::Windows
} else {
SshFamily::Unix
});
}
// Cache should always be Some(...) by this point
Ok(family.unwrap())
}
/// Consume [`Ssh`] and produce a [`DistantClient`] that is connected to a remote
@ -579,6 +589,11 @@ impl Ssh {
match Client::tcp(addr)
.auth_handler(AuthHandlerMap::new().with_static_key(key.clone()))
.connect_timeout(timeout)
.version(Version::new(
PROTOCOL_VERSION.major,
PROTOCOL_VERSION.minor,
PROTOCOL_VERSION.patch,
))
.connect()
.await
{

@ -1,6 +1,6 @@
use std::ffi::OsString;
use crate::options::DistantSubcommand;
use crate::options::{DistantSubcommand, OptionsError};
use crate::{CliResult, Options};
mod commands;
@ -18,12 +18,12 @@ pub struct Cli {
impl Cli {
/// Creates a new CLI instance by parsing command-line arguments
pub fn initialize() -> anyhow::Result<Self> {
pub fn initialize() -> Result<Self, OptionsError> {
Self::initialize_from(std::env::args_os())
}
/// Creates a new CLI instance by parsing providing arguments
pub fn initialize_from<I, T>(args: I) -> anyhow::Result<Self>
pub fn initialize_from<I, T>(args: I) -> Result<Self, OptionsError>
where
I: IntoIterator<Item = T>,
T: Into<OsString> + Clone,

@ -7,10 +7,10 @@ use std::time::Duration;
use anyhow::Context;
use distant_core::net::common::{ConnectionId, Host, Map, Request, Response};
use distant_core::net::manager::ManagerClient;
use distant_core::protocol::semver;
use distant_core::protocol::{
self, Capabilities, ChangeKind, ChangeKindSet, FileType, Permissions, SearchQuery,
SearchQueryContentsMatch, SearchQueryMatch, SearchQueryPathMatch, SetPermissionsOptions,
SystemInfo,
self, ChangeKind, ChangeKindSet, FileType, Permissions, SearchQuery, SearchQueryContentsMatch,
SearchQueryMatch, SearchQueryPathMatch, SetPermissionsOptions, SystemInfo, Version,
};
use distant_core::{DistantChannel, DistantChannelExt, RemoteCommand, Searcher, Watcher};
use log::*;
@ -25,7 +25,10 @@ use crate::cli::common::{
Cache, Client, JsonAuthHandler, MsgReceiver, MsgSender, PromptAuthHandler,
};
use crate::constants::MAX_PIPE_CHUNK_SIZE;
use crate::options::{ClientFileSystemSubcommand, ClientSubcommand, Format, NetworkSettings};
use crate::options::{
ClientFileSystemSubcommand, ClientSubcommand, Format, NetworkSettings, ParseShellError,
Shell as ShellOption,
};
use crate::{CliError, CliResult};
mod lsp;
@ -364,10 +367,12 @@ async fn async_run(cmd: ClientSubcommand) -> CliResult {
cache,
connection,
cmd,
cmd_str,
current_dir,
environment,
lsp,
pty,
shell,
network,
} => {
debug!("Connecting to manager");
@ -382,20 +387,55 @@ async fn async_run(cmd: ClientSubcommand) -> CliResult {
use_or_lookup_connection_id(&mut cache, connection, &mut client).await?;
debug!("Opening channel to connection {}", connection_id);
let channel = client
let mut channel: DistantChannel = client
.open_raw_channel(connection_id)
.await
.with_context(|| format!("Failed to open channel to connection {connection_id}"))?;
.with_context(|| format!("Failed to open channel to connection {connection_id}"))?
.into_client()
.into_channel();
// Convert cmd into string
let cmd = cmd.join(" ");
let cmd = cmd_str.unwrap_or_else(|| cmd.join(" "));
// Check if we should attempt to run the command in a shell
let cmd = match shell {
None => cmd,
// Use default shell, which we need to figure out
Some(None) => {
let system_info = channel
.system_info()
.await
.context("Failed to detect remote operating system")?;
// If system reports a default shell, use it, otherwise pick a default based on the
// operating system being windows or non-windows
let shell: ShellOption = if !system_info.shell.is_empty() {
system_info.shell.parse()
} else if system_info.family.eq_ignore_ascii_case("windows") {
"cmd.exe".parse()
} else {
"/bin/sh".parse()
}
.map_err(|x: ParseShellError| anyhow::anyhow!(x))?;
shell
.make_cmd_string(&cmd)
.map_err(|x| anyhow::anyhow!(x))?
}
// Use explicit shell
Some(Some(shell)) => shell
.make_cmd_string(&cmd)
.map_err(|x| anyhow::anyhow!(x))?,
};
if let Some(scheme) = lsp {
debug!(
"Spawning LSP server (pty = {}, cwd = {:?}): {}",
pty, current_dir, cmd
);
Lsp::new(channel.into_client().into_channel())
Lsp::new(channel)
.spawn(cmd, current_dir, scheme, pty, MAX_PIPE_CHUNK_SIZE)
.await?;
} else if pty {
@ -403,7 +443,7 @@ async fn async_run(cmd: ClientSubcommand) -> CliResult {
"Spawning pty process (environment = {:?}, cwd = {:?}): {}",
environment, current_dir, cmd
);
Shell::new(channel.into_client().into_channel())
Shell::new(channel)
.spawn(
cmd,
environment.into_map(),
@ -420,7 +460,7 @@ async fn async_run(cmd: ClientSubcommand) -> CliResult {
.environment(environment.into_map())
.current_dir(current_dir)
.pty(None)
.spawn(channel.into_client().into_channel(), &cmd)
.spawn(channel, &cmd)
.await
.with_context(|| format!("Failed to spawn {cmd}"))?;
@ -541,32 +581,51 @@ async fn async_run(cmd: ClientSubcommand) -> CliResult {
match format {
Format::Shell => {
let (major, minor, patch) = distant_core::protocol::PROTOCOL_VERSION;
let mut client_version: semver::Version = env!("CARGO_PKG_VERSION")
.parse()
.context("Failed to parse client version")?;
// Add the package name to the version information
if client_version.build.is_empty() {
client_version.build = semver::BuildMetadata::new(env!("CARGO_PKG_NAME"))
.context("Failed to define client build metadata")?;
} else {
let raw_build_str = format!(
"{}.{}",
client_version.build.as_str(),
env!("CARGO_PKG_NAME")
);
client_version.build = semver::BuildMetadata::new(&raw_build_str)
.context("Failed to define client build metadata")?;
}
println!(
"Client: {} {} (Protocol {major}.{minor}.{patch})",
env!("CARGO_PKG_NAME"),
env!("CARGO_PKG_VERSION")
"Client: {client_version} (Protocol {})",
distant_core::protocol::PROTOCOL_VERSION
);
let (major, minor, patch) = version.protocol_version;
println!(
"Server: {} (Protocol {major}.{minor}.{patch})",
version.server_version
"Server: {} (Protocol {})",
version.server_version, version.protocol_version
);
// Build a complete set of capabilities to show which ones we support
let client_capabilities = Capabilities::all();
let server_capabilities = version.capabilities;
let mut capabilities: Vec<String> = client_capabilities
.union(server_capabilities.as_ref())
.map(|cap| {
let kind = &cap.kind;
if client_capabilities.contains(kind)
&& server_capabilities.contains(kind)
{
format!("+{kind}")
let mut capabilities: HashMap<String, u8> = Version::capabilities()
.iter()
.map(|cap| (cap.to_string(), 1))
.collect();
for cap in version.capabilities {
*capabilities.entry(cap).or_default() += 1;
}
let mut capabilities: Vec<String> = capabilities
.into_iter()
.map(|(cap, cnt)| {
if cnt > 1 {
format!("+{cap}")
} else {
format!("-{kind}")
format!("-{cap}")
}
})
.collect();
@ -1128,6 +1187,25 @@ async fn async_run(cmd: ClientSubcommand) -> CliResult {
mode,
path,
}) => {
debug!("Connecting to manager");
let mut client = Client::new(network)
.using_prompt_auth_handler()
.connect()
.await
.context("Failed to connect to manager")?;
let mut cache = read_cache(&cache).await;
let connection_id =
use_or_lookup_connection_id(&mut cache, connection, &mut client).await?;
debug!("Opening channel to connection {}", connection_id);
let mut channel: DistantChannel = client
.open_raw_channel(connection_id)
.await
.with_context(|| format!("Failed to open channel to connection {connection_id}"))?
.into_client()
.into_channel();
debug!("Parsing {mode:?} into a proper set of permissions");
let permissions = {
if mode.trim().eq_ignore_ascii_case("readonly") {
@ -1137,37 +1215,61 @@ async fn async_run(cmd: ClientSubcommand) -> CliResult {
} else {
// Attempt to parse an octal number (chmod absolute), falling back to
// parsing the mode string similar to chmod's symbolic mode
let mode = match u32::from_str_radix(&mode, 8) {
Ok(absolute) => file_mode::Mode::from(absolute),
match u32::from_str_radix(&mode, 8) {
Ok(absolute) => {
Permissions::from_unix_mode(file_mode::Mode::from(absolute).mode())
}
Err(_) => {
let mut new_mode = file_mode::Mode::empty();
new_mode
// The way parsing works, we need to parse and apply to two different
// situations
//
// 1. A mode that is all 1s so we can see if the mask would remove
// permission to some of the bits
// 2. A mode that is all 0s so we can see if the mask would add
// permission to some of the bits
let mut removals = file_mode::Mode::from(0o777);
removals
.set_str(&mode)
.context("Failed to parse mode string")?;
new_mode
let removals_mask = !removals.mode();
let mut additions = file_mode::Mode::empty();
additions
.set_str(&mode)
.context("Failed to parse mode string")?;
let additions_mask = additions.mode();
macro_rules! get_mode {
($mask:expr) => {{
let is_false = removals_mask & $mask > 0;
let is_true = additions_mask & $mask > 0;
match (is_true, is_false) {
(true, false) => Some(true),
(false, true) => Some(false),
(false, false) => None,
(true, true) => {
unreachable!("Mask cannot be adding and removing")
}
}
}};
}
Permissions {
owner_read: get_mode!(0o400),
owner_write: get_mode!(0o200),
owner_exec: get_mode!(0o100),
group_read: get_mode!(0o040),
group_write: get_mode!(0o020),
group_exec: get_mode!(0o010),
other_read: get_mode!(0o004),
other_write: get_mode!(0o002),
other_exec: get_mode!(0o001),
}
}
};
Permissions::from_unix_mode(mode.mode())
}
}
};
debug!("Connecting to manager");
let mut client = Client::new(network)
.using_prompt_auth_handler()
.connect()
.await
.context("Failed to connect to manager")?;
let mut cache = read_cache(&cache).await;
let connection_id =
use_or_lookup_connection_id(&mut cache, connection, &mut client).await?;
debug!("Opening channel to connection {}", connection_id);
let channel = client
.open_raw_channel(connection_id)
.await
.with_context(|| format!("Failed to open channel to connection {connection_id}"))?;
let options = SetPermissionsOptions {
recursive,
follow_symlinks,
@ -1175,8 +1277,6 @@ async fn async_run(cmd: ClientSubcommand) -> CliResult {
};
debug!("Setting permissions for {path:?} as (permissions = {permissions:?}, options = {options:?})");
channel
.into_client()
.into_channel()
.set_permissions(path.as_path(), permissions, options)
.await
.with_context(|| {

@ -14,15 +14,18 @@ pub fn run(cmd: GenerateSubcommand) -> CliResult {
async fn async_run(cmd: GenerateSubcommand) -> CliResult {
match cmd {
GenerateSubcommand::Config { file } => tokio::fs::write(file, Config::default_raw_str())
.await
.context("Failed to write default config to {file:?}")?,
GenerateSubcommand::Config { output } => match output {
Some(path) => tokio::fs::write(path, Config::default_raw_str())
.await
.context("Failed to write default config to {path:?}")?,
None => println!("{}", Config::default_raw_str()),
},
GenerateSubcommand::Completion { file, shell } => {
GenerateSubcommand::Completion { output, shell } => {
let name = "distant";
let mut cmd = Options::command();
if let Some(path) = file {
if let Some(path) = output {
clap_generate(
shell,
&mut cmd,

@ -228,41 +228,24 @@ async fn async_run(cmd: ManagerSubcommand) -> CliResult {
Ok(())
}
ManagerSubcommand::Capabilities { format, network } => {
ManagerSubcommand::Version { format, network } => {
debug!("Connecting to manager");
let mut client = connect_to_manager(format, network).await?;
debug!("Getting list of capabilities");
let caps = client
.capabilities()
.await
.context("Failed to get list of capabilities")?;
debug!("Got capabilities: {caps:?}");
debug!("Getting version");
let version = client.version().await.context("Failed to get version")?;
debug!("Got version: {version}");
match format {
Format::Json => {
println!(
"{}",
serde_json::to_string(&caps)
.context("Failed to format capabilities as json")?
serde_json::to_string(&serde_json::json!({ "version": version }))
.context("Failed to format version as json")?
);
}
Format::Shell => {
#[derive(Tabled)]
struct CapabilityRow {
kind: String,
description: String,
}
println!(
"{}",
Table::new(caps.into_sorted_vec().into_iter().map(|cap| {
CapabilityRow {
kind: cap.kind,
description: cap.description,
}
}))
);
println!("{version}");
}
}

@ -11,12 +11,13 @@ use distant_core::net::auth::{
StaticKeyAuthMethodHandler,
};
use distant_core::net::client::{Client, ClientConfig, ReconnectStrategy, UntypedClient};
use distant_core::net::common::{Destination, Map, SecretKey32};
use distant_core::net::common::{Destination, Map, SecretKey32, Version};
use distant_core::net::manager::{ConnectHandler, LaunchHandler};
use distant_core::protocol::PROTOCOL_VERSION;
use log::*;
use tokio::io::{AsyncBufReadExt, BufReader};
use tokio::process::{Child, Command};
use tokio::sync::Mutex;
use tokio::process::Command;
use tokio::sync::{watch, Mutex};
use crate::options::{BindAddress, ClientLaunchConfig};
@ -32,15 +33,28 @@ fn invalid(label: &str) -> io::Error {
/// Supports launching locally through the manager as defined by `manager://...`
pub struct ManagerLaunchHandler {
servers: Mutex<Vec<Child>>,
shutdown: watch::Sender<bool>,
}
impl ManagerLaunchHandler {
pub fn new() -> Self {
Self {
servers: Mutex::new(Vec::new()),
shutdown: watch::channel(false).0,
}
}
/// Triggers shutdown of any tasks still checking that spawned servers have terminated.
pub fn shutdown(&self) {
let _ = self.shutdown.send(true);
}
}
impl Drop for ManagerLaunchHandler {
/// Terminates waiting for any servers spawned by this handler, which in turn should
/// shut them down.
fn drop(&mut self) {
self.shutdown();
}
}
#[async_trait]
@ -137,9 +151,34 @@ impl LaunchHandler for ManagerLaunchHandler {
match stdout.read_line(&mut line).await {
Ok(n) if n > 0 => {
if let Ok(destination) = line[..n].trim().parse::<Destination>() {
// Store a reference to the server so we can terminate them
// when this handler is dropped
self.servers.lock().await.push(child);
let mut rx = self.shutdown.subscribe();
// Wait for the process to complete in a task. We have to do this
// to properly check the exit status, otherwise if the server
// self-terminates then we get a ZOMBIE process! Oh no!
//
// This also replaces the need to store the children within the
// handler itself and instead uses a watch update to kill the
// task in advance in the case where the child hasn't terminated.
tokio::spawn(async move {
// We don't actually care about the result, just that we're done
loop {
tokio::select! {
result = rx.changed() => {
if result.is_err() {
break;
}
if *rx.borrow_and_update() {
break;
}
}
_ = child.wait() => {
break;
}
}
}
});
break Ok(destination);
} else {
@ -247,6 +286,11 @@ impl DistantConnectHandler {
..Default::default()
})
.connect_timeout(Duration::from_secs(180))
.version(Version::new(
PROTOCOL_VERSION.major,
PROTOCOL_VERSION.minor,
PROTOCOL_VERSION.patch,
))
.connect_untyped()
.await
{

@ -2,8 +2,9 @@ use std::io::{self, Read, Write};
use anyhow::Context;
use distant_core::net::auth::Verifier;
use distant_core::net::common::{Host, SecretKey32};
use distant_core::net::common::{Host, SecretKey32, Version};
use distant_core::net::server::{Server, ServerConfig as NetServerConfig};
use distant_core::protocol::PROTOCOL_VERSION;
use distant_core::DistantSingleKeyCredentials;
use distant_local::{Config as LocalConfig, WatchConfig as LocalWatchConfig};
use log::*;
@ -159,6 +160,11 @@ async fn async_run(cmd: ServerSubcommand, _is_forked: bool) -> CliResult {
})
.handler(handler)
.verifier(Verifier::static_key(key.clone()))
.version(Version::new(
PROTOCOL_VERSION.major,
PROTOCOL_VERSION.minor,
PROTOCOL_VERSION.patch,
))
.start(addr, port)
.await
.with_context(|| format!("Failed to start server @ {addr} with {port}"))?;

@ -7,7 +7,7 @@ use distant_core::net::auth::{
AuthHandler, AuthMethodHandler, PromptAuthMethodHandler, SingleAuthHandler,
};
use distant_core::net::client::{Client as NetClient, ClientConfig, ReconnectStrategy};
use distant_core::net::manager::ManagerClient;
use distant_core::net::manager::{ManagerClient, PROTOCOL_VERSION};
use log::*;
use crate::cli::common::{MsgReceiver, MsgSender};
@ -71,6 +71,7 @@ impl<T: AuthHandler + Clone> Client<T> {
},
..Default::default()
})
.version(PROTOCOL_VERSION)
.connect()
.await
{
@ -113,6 +114,7 @@ impl<T: AuthHandler + Clone> Client<T> {
},
..Default::default()
})
.version(PROTOCOL_VERSION)
.connect()
.await
{

@ -1,6 +1,6 @@
use anyhow::Context;
use distant_core::net::auth::Verifier;
use distant_core::net::manager::{Config as ManagerConfig, ManagerServer};
use distant_core::net::manager::{Config as ManagerConfig, ManagerServer, PROTOCOL_VERSION};
use distant_core::net::server::ServerRef;
use log::*;
@ -18,6 +18,9 @@ impl Manager {
pub async fn listen(self) -> anyhow::Result<ServerRef> {
let user = self.config.user;
// Version we'll use to report compatibility in talking to the manager
let version = PROTOCOL_VERSION;
#[cfg(unix)]
{
use distant_core::net::common::UnixSocketListener;
@ -28,6 +31,7 @@ impl Manager {
global_paths::UNIX_SOCKET_PATH.as_path()
}
});
debug!("Manager wants to use unix socket @ {:?}", socket_path);
// Ensure that the path to the socket exists
if let Some(parent) = socket_path.parent() {
@ -38,6 +42,7 @@ impl Manager {
let server = ManagerServer::new(self.config)
.verifier(Verifier::none())
.version(version)
.start(
UnixSocketListener::bind_with_permissions(socket_path, self.access.into_mode())
.await?,
@ -56,9 +61,11 @@ impl Manager {
} else {
global_paths::WINDOWS_PIPE_NAME.as_str()
});
debug!("Manager wants to use windows pipe @ {:?}", pipe_name);
let server = ManagerServer::new(self.config)
.verifier(Verifier::none())
.version(version)
.start(WindowsPipeListener::bind_local(pipe_name)?)
.with_context(|| format!("Failed to start manager at pipe {pipe_name:?}"))?;

@ -6,6 +6,7 @@ pub struct ReadmeDoctests;
use std::process::{ExitCode, Termination};
use clap::error::ErrorKind;
use derive_more::{Display, Error, From};
mod cli;
@ -16,30 +17,83 @@ mod options;
pub mod win_service;
pub use cli::Cli;
pub use options::Options;
pub use options::{Format, Options, OptionsError};
/// Wrapper around a [`CliResult`] that provides [`Termination`] support
pub struct MainResult(CliResult);
/// Wrapper around a [`CliResult`] that provides [`Termination`] support and [`Format`]ing.
pub struct MainResult {
inner: CliResult,
format: Format,
}
impl MainResult {
pub const OK: MainResult = MainResult(Ok(()));
pub const OK: MainResult = MainResult {
inner: Ok(()),
format: Format::Shell,
};
/// Creates a new result that performs general shell formatting.
pub fn new(inner: CliResult) -> Self {
Self {
inner,
format: Format::Shell,
}
}
/// Converts to shell formatting for errors.
pub fn shell(self) -> Self {
Self {
inner: self.inner,
format: Format::Shell,
}
}
/// Converts to a JSON formatting for errors.
pub fn json(self) -> Self {
Self {
inner: self.inner,
format: Format::Json,
}
}
}
impl From<CliResult> for MainResult {
fn from(res: CliResult) -> Self {
Self(res)
Self::new(res)
}
}
impl From<OptionsError> for MainResult {
fn from(x: OptionsError) -> Self {
Self::new(match x {
OptionsError::Config(x) => Err(CliError::Error(x)),
OptionsError::Options(x) => match x.kind() {
// --help and --version should not actually exit with an error and instead display
// their related information while succeeding
ErrorKind::DisplayHelp | ErrorKind::DisplayVersion => {
// NOTE: We're causing a side effect here in constructing the main result,
// but seems cleaner than returning an error with an exit code of 0
// and a message to try to print. Plus, we leverage automatic color
// handling in this approach.
let _ = x.print();
Ok(())
}
// Everything else is an actual error and should fail
_ => Err(CliError::Error(anyhow::anyhow!(x))),
},
})
}
}
impl From<anyhow::Error> for MainResult {
fn from(x: anyhow::Error) -> Self {
Self(Err(CliError::Error(x)))
Self::new(Err(CliError::Error(x)))
}
}
impl From<anyhow::Result<()>> for MainResult {
fn from(res: anyhow::Result<()>) -> Self {
Self(res.map_err(CliError::Error))
Self::new(res.map_err(CliError::Error))
}
}
@ -62,14 +116,33 @@ impl CliError {
impl Termination for MainResult {
fn report(self) -> ExitCode {
match self.0 {
match self.inner {
Ok(_) => ExitCode::SUCCESS,
Err(x) => match x {
CliError::Exit(code) => ExitCode::from(code),
CliError::Error(x) => {
eprintln!("{x:?}");
match self.format {
// For anyhow, we want to print with debug information, which includes the
// full stack of information that anyhow collects; otherwise, we would only
// include the top-level context.
Format::Shell => eprintln!("{x:?}"),
Format::Json => println!(
"{}",
serde_json::to_string(&serde_json::json!({
"type": "error",
"msg": format!("{x:?}"),
}),)
.expect("Failed to format error to JSON")
),
}
// For anyhow, we want to log with debug information, which includes the full
// stack of information that anyhow collects; otherwise, we would only include
// the top-level context.
::log::error!("{x:?}");
::log::logger().flush();
ExitCode::FAILURE
}
},

@ -1,4 +1,4 @@
use distant::{Cli, MainResult};
use distant::{Cli, Format, MainResult};
#[cfg(unix)]
fn main() -> MainResult {
@ -8,7 +8,12 @@ fn main() -> MainResult {
};
let _logger = cli.init_logger();
MainResult::from(cli.run())
let format = cli.options.command.format();
let result = MainResult::from(cli.run());
match format {
Format::Shell => result.shell(),
Format::Json => result.json(),
}
}
#[cfg(windows)]
@ -18,6 +23,7 @@ fn main() -> MainResult {
Err(x) => return MainResult::from(x),
};
let _logger = cli.init_logger();
let format = cli.options.command.format();
// If we are trying to listen as a manager, try as a service first
if cli.is_manager_listen_command() {
@ -36,5 +42,9 @@ fn main() -> MainResult {
}
// Otherwise, execute as a non-service CLI
MainResult::from(cli.run())
let result = MainResult::from(cli.run());
match format {
Format::Shell => result.shell(),
Format::Json => result.json(),
}
}

@ -4,7 +4,7 @@ use std::path::{Path, PathBuf};
use clap::builder::TypedValueParser as _;
use clap::{Args, Parser, Subcommand, ValueEnum, ValueHint};
use clap_complete::Shell as ClapCompleteShell;
use derive_more::IsVariant;
use derive_more::{Display, Error, From, IsVariant};
use distant_core::net::common::{ConnectionId, Destination, Map, PortRange};
use distant_core::net::server::Shutdown;
use distant_core::protocol::ChangeKind;
@ -28,26 +28,32 @@ pub struct Options {
#[clap(flatten)]
pub logging: LoggingSettings,
#[cfg(feature = "tracing")]
#[clap(long, global = true)]
pub tracing: bool,
/// Configuration file to load instead of the default paths
#[clap(short = 'c', long = "config", global = true, value_parser)]
#[clap(long = "config", global = true, value_parser)]
config_path: Option<PathBuf>,
#[clap(subcommand)]
pub command: DistantSubcommand,
}
/// Represents an error associated with parsing options.
#[derive(Debug, Display, From, Error)]
pub enum OptionsError {
// When configuration file fails to load
Config(#[error(not(source))] anyhow::Error),
// When parsing options fails (or is something like --version or --help)
Options(#[error(not(source))] clap::Error),
}
impl Options {
/// Creates a new CLI instance by parsing command-line arguments
pub fn load() -> anyhow::Result<Self> {
pub fn load() -> Result<Self, OptionsError> {
Self::load_from(std::env::args_os())
}
/// Creates a new CLI instance by parsing providing arguments
pub fn load_from<I, T>(args: I) -> anyhow::Result<Self>
pub fn load_from<I, T>(args: I) -> Result<Self, OptionsError>
where
I: IntoIterator<Item = T>,
T: Into<OsString> + Clone,
@ -165,7 +171,7 @@ impl Options {
DistantSubcommand::Manager(cmd) => {
update_logging!(manager);
match cmd {
ManagerSubcommand::Capabilities { network, .. } => {
ManagerSubcommand::Version { network, .. } => {
network.merge(config.manager.network);
}
ManagerSubcommand::Info { network, .. } => {
@ -275,6 +281,19 @@ pub enum DistantSubcommand {
Generate(GenerateSubcommand),
}
impl DistantSubcommand {
/// Format used by the subcommand.
#[inline]
pub fn format(&self) -> Format {
match self {
Self::Client(x) => x.format(),
Self::Manager(x) => x.format(),
Self::Server(x) => x.format(),
Self::Generate(x) => x.format(),
}
}
}
/// Subcommands for `distant client`.
#[derive(Debug, PartialEq, Subcommand, IsVariant)]
pub enum ClientSubcommand {
@ -444,6 +463,11 @@ pub enum ClientSubcommand {
#[clap(long)]
pty: bool,
/// If specified, will spawn the process in the specified shell, defaulting to the
/// user-configured shell.
#[clap(long, name = "SHELL")]
shell: Option<Option<Shell>>,
/// Alternative current directory for the remote process
#[clap(long)]
current_dir: Option<PathBuf>,
@ -452,8 +476,17 @@ pub enum ClientSubcommand {
#[clap(long, default_value_t)]
environment: Map,
/// If present, commands are read from the provided string
#[clap(short = 'c', long = "cmd", conflicts_with = "CMD")]
cmd_str: Option<String>,
/// Command to run
#[clap(name = "CMD", num_args = 1.., last = true)]
#[clap(
name = "CMD",
num_args = 1..,
last = true,
conflicts_with = "cmd_str"
)]
cmd: Vec<String>,
},
@ -524,6 +557,21 @@ impl ClientSubcommand {
Self::Version { network, .. } => network,
}
}
/// Format used by the subcommand.
#[inline]
pub fn format(&self) -> Format {
match self {
Self::Api { .. } => Format::Json,
Self::Connect { format, .. } => *format,
Self::FileSystem(fs) => fs.format(),
Self::Launch { format, .. } => *format,
Self::Shell { .. } => Format::Shell,
Self::Spawn { .. } => Format::Shell,
Self::SystemInfo { .. } => Format::Shell,
Self::Version { format, .. } => *format,
}
}
}
/// Subcommands for `distant fs`.
@ -921,6 +969,12 @@ impl ClientFileSystemSubcommand {
Self::Write { network, .. } => network,
}
}
/// Format used by the subcommand.
#[inline]
pub fn format(&self) -> Format {
Format::Shell
}
}
/// Subcommands for `distant generate`.
@ -928,15 +982,16 @@ impl ClientFileSystemSubcommand {
pub enum GenerateSubcommand {
/// Generate configuration file with base settings
Config {
/// Path to where the configuration file should be created
file: PathBuf,
/// Write output to a file instead of stdout
#[clap(short, long, value_name = "FILE")]
output: Option<PathBuf>,
},
// Generate completion info for CLI
Completion {
/// If specified, will output to the file at the given path instead of stdout
#[clap(long)]
file: Option<PathBuf>,
/// Write output to a file instead of stdout
#[clap(long, value_name = "FILE")]
output: Option<PathBuf>,
/// Specific shell to target for the generated output
#[clap(value_enum, value_parser)]
@ -944,6 +999,14 @@ pub enum GenerateSubcommand {
},
}
impl GenerateSubcommand {
/// Format used by the subcommand.
#[inline]
pub fn format(&self) -> Format {
Format::Shell
}
}
/// Subcommands for `distant manager`.
#[derive(Debug, PartialEq, Eq, Subcommand, IsVariant)]
pub enum ManagerSubcommand {
@ -991,7 +1054,7 @@ pub enum ManagerSubcommand {
},
/// Retrieve a list of capabilities that the manager supports
Capabilities {
Version {
#[clap(short, long, default_value_t, value_enum)]
format: Format,
@ -1040,6 +1103,22 @@ pub enum ManagerSubcommand {
},
}
impl ManagerSubcommand {
/// Format used by the subcommand.
#[inline]
pub fn format(&self) -> Format {
match self {
Self::Select { format, .. } => *format,
Self::Service(_) => Format::Shell,
Self::Listen { .. } => Format::Shell,
Self::Version { format, .. } => *format,
Self::Info { format, .. } => *format,
Self::List { format, .. } => *format,
Self::Kill { format, .. } => *format,
}
}
}
/// Subcommands for `distant manager service`.
#[derive(Debug, PartialEq, Eq, Subcommand, IsVariant)]
pub enum ManagerServiceSubcommand {
@ -1156,6 +1235,14 @@ pub enum ServerSubcommand {
},
}
impl ServerSubcommand {
/// Format used by the subcommand.
#[inline]
pub fn format(&self) -> Format {
Format::Shell
}
}
#[derive(Args, Debug, PartialEq)]
pub struct ServerListenWatchOptions {
/// If specified, will use the polling-based watcher for filesystem changes
@ -1856,7 +1943,9 @@ mod tests {
current_dir: None,
environment: map!(),
lsp: Some(None),
shell: Some(None),
pty: true,
cmd_str: None,
cmd: vec![String::from("cmd")],
}),
};
@ -1894,7 +1983,9 @@ mod tests {
current_dir: None,
environment: map!(),
lsp: Some(None),
shell: Some(None),
pty: true,
cmd_str: None,
cmd: vec![String::from("cmd")],
}),
}
@ -1919,7 +2010,9 @@ mod tests {
current_dir: None,
environment: map!(),
lsp: Some(None),
shell: Some(None),
pty: true,
cmd_str: None,
cmd: vec![String::from("cmd")],
}),
};
@ -1957,7 +2050,9 @@ mod tests {
current_dir: None,
environment: map!(),
lsp: Some(None),
shell: Some(None),
pty: true,
cmd_str: None,
cmd: vec![String::from("cmd")],
}),
}
@ -3331,7 +3426,7 @@ mod tests {
log_level: None,
},
command: DistantSubcommand::Generate(GenerateSubcommand::Completion {
file: None,
output: None,
shell: ClapCompleteShell::Bash,
}),
};
@ -3355,7 +3450,7 @@ mod tests {
log_level: Some(LogLevel::Trace),
},
command: DistantSubcommand::Generate(GenerateSubcommand::Completion {
file: None,
output: None,
shell: ClapCompleteShell::Bash,
}),
}
@ -3371,7 +3466,7 @@ mod tests {
log_level: Some(LogLevel::Info),
},
command: DistantSubcommand::Generate(GenerateSubcommand::Completion {
file: None,
output: None,
shell: ClapCompleteShell::Bash,
}),
};
@ -3395,7 +3490,7 @@ mod tests {
log_level: Some(LogLevel::Info),
},
command: DistantSubcommand::Generate(GenerateSubcommand::Completion {
file: None,
output: None,
shell: ClapCompleteShell::Bash,
}),
}
@ -3410,7 +3505,7 @@ mod tests {
log_file: None,
log_level: None,
},
command: DistantSubcommand::Manager(ManagerSubcommand::Capabilities {
command: DistantSubcommand::Manager(ManagerSubcommand::Version {
format: Format::Json,
network: NetworkSettings {
unix_socket: None,
@ -3442,7 +3537,7 @@ mod tests {
log_file: Some(PathBuf::from("config-log-file")),
log_level: Some(LogLevel::Trace),
},
command: DistantSubcommand::Manager(ManagerSubcommand::Capabilities {
command: DistantSubcommand::Manager(ManagerSubcommand::Version {
format: Format::Json,
network: NetworkSettings {
unix_socket: Some(PathBuf::from("config-unix-socket")),
@ -3461,7 +3556,7 @@ mod tests {
log_file: Some(PathBuf::from("cli-log-file")),
log_level: Some(LogLevel::Info),
},
command: DistantSubcommand::Manager(ManagerSubcommand::Capabilities {
command: DistantSubcommand::Manager(ManagerSubcommand::Version {
format: Format::Json,
network: NetworkSettings {
unix_socket: Some(PathBuf::from("cli-unix-socket")),
@ -3493,7 +3588,7 @@ mod tests {
log_file: Some(PathBuf::from("cli-log-file")),
log_level: Some(LogLevel::Info),
},
command: DistantSubcommand::Manager(ManagerSubcommand::Capabilities {
command: DistantSubcommand::Manager(ManagerSubcommand::Version {
format: Format::Json,
network: NetworkSettings {
unix_socket: Some(PathBuf::from("cli-unix-socket")),

@ -3,6 +3,7 @@ mod cmd;
mod logging;
mod network;
mod search;
mod shell;
mod time;
mod value;
@ -11,5 +12,6 @@ pub use cmd::*;
pub use logging::*;
pub use network::*;
pub use search::*;
pub use shell::*;
pub use time::*;
pub use value::*;

@ -55,6 +55,35 @@ pub struct CliSearchQueryOptions {
/// include the remaining results even if less than pagination request
#[clap(long)]
pub pagination: Option<u64>,
/// If true, will skip searching hidden files.
#[clap(long)]
pub ignore_hidden: bool,
/// If true, will read `.ignore` files that are used by `ripgrep` and `The Silver Searcher`
/// to determine which files and directories to not search.
#[clap(long)]
pub use_ignore_files: bool,
/// If true, will read `.ignore` files from parent directories that are used by `ripgrep` and
/// `The Silver Searcher` to determine which files and directories to not search.
#[clap(long)]
pub use_parent_ignore_files: bool,
/// If true, will read `.gitignore` files to determine which files and directories to not
/// search.
#[clap(long)]
pub use_git_ignore_files: bool,
/// If true, will read global `.gitignore` files to determine which files and directories to
/// not search.
#[clap(long)]
pub use_global_git_ignore_files: bool,
/// If true, will read `.git/info/exclude` files to determine which files and directories to
/// not search.
#[clap(long)]
pub use_git_exclude_files: bool,
}
impl From<CliSearchQueryOptions> for SearchQueryOptions {
@ -68,6 +97,12 @@ impl From<CliSearchQueryOptions> for SearchQueryOptions {
limit: x.limit,
max_depth: x.max_depth,
pagination: x.pagination,
ignore_hidden: x.ignore_hidden,
use_ignore_files: x.use_ignore_files,
use_parent_ignore_files: x.use_parent_ignore_files,
use_git_ignore_files: x.use_git_ignore_files,
use_global_git_ignore_files: x.use_global_git_ignore_files,
use_git_exclude_files: x.use_git_exclude_files,
}
}
}

@ -0,0 +1,196 @@
use derive_more::{Display, Error};
use std::str::FromStr;
use typed_path::{Utf8UnixPath, Utf8WindowsPath};
/// Represents a shell to execute on the remote machine.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Shell {
/// Represents the path to the shell on the remote machine.
pub path: String,
/// Represents the kind of shell.
pub kind: ShellKind,
}
impl Shell {
#[inline]
pub fn is_posix(&self) -> bool {
self.kind.is_posix()
}
/// Wraps a `cmd` such that it is invoked by this shell.
///
/// * For `cmd.exe`, this wraps in double quotes such that it can be invoked by `cmd.exe /S /C "..."`.
/// * For `powershell.exe`, this wraps in single quotes and escapes single quotes by doubling
/// them such that it can be invoked by `powershell.exe -Command '...'`.
/// * For `rc` and `elvish`, this wraps in single quotes and escapes single quotes by doubling them.
/// * For rc and elvish, this uses `shell -c '...'`.
/// * For **POSIX** shells, this wraps in single quotes and uses the trick of `'\''` to fake escape.
/// * For `nu`, this wraps in single quotes or backticks where possible, but fails if the cmd contains single quotes and backticks.
///
pub fn make_cmd_string(&self, cmd: &str) -> Result<String, &'static str> {
let path = self.path.as_str();
match self.kind {
ShellKind::CmdExe => Ok(format!("{path} /S /C \"{cmd}\"")),
// NOTE: Powershell does not work directly because our splitting logic for arguments on
// distant-local does not handle single quotes. In fact, the splitting logic
// isn't designed for powershell at all. To get around that limitation, we are
// using cmd.exe to invoke powershell, which fits closer to our parsing rules.
// Crazy, I know! Eventually, we should switch to properly using powershell
// and escaping single quotes by doubling them.
ShellKind::PowerShell => Ok(format!(
"cmd.exe /S /C \"{path} -Command {}\"",
cmd.replace('"', "\"\""),
)),
ShellKind::Rc | ShellKind::Elvish => {
Ok(format!("{path} -c '{}'", cmd.replace('\'', "''")))
}
ShellKind::Nu => {
let has_single_quotes = cmd.contains('\'');
let has_backticks = cmd.contains('`');
match (has_single_quotes, has_backticks) {
// If we have both single quotes and backticks, fail
(true, true) => {
Err("unable to escape single quotes and backticks at the same time with nu")
}
// If we only have single quotes, use backticks
(true, false) => Ok(format!("{path} -c `{cmd}`")),
// Otherwise, we can safely use single quotes
_ => Ok(format!("{path} -c '{cmd}'")),
}
}
// We assume anything else not specially handled is POSIX
_ => Ok(format!("{path} -c '{}'", cmd.replace('\'', "'\\''"))),
}
}
}
#[derive(Clone, Debug, PartialEq, Eq, Display, Error)]
pub struct ParseShellError(#[error(not(source))] String);
impl FromStr for Shell {
type Err = ParseShellError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let s = s.trim();
let kind = ShellKind::identify(s)
.ok_or_else(|| ParseShellError(format!("Unsupported shell: {s}")))?;
Ok(Self {
path: s.to_string(),
kind,
})
}
}
/// Supported types of shells.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[non_exhaustive]
pub enum ShellKind {
Ash,
Bash,
CmdExe,
Csh,
Dash,
Elvish,
Fish,
Ksh,
Loksh,
Mksh,
Nu,
Pdksh,
PowerShell,
Rc,
Scsh,
Sh,
Tcsh,
Zsh,
}
impl ShellKind {
/// Returns true if shell represents a POSIX-compliant implementation.
pub fn is_posix(&self) -> bool {
matches!(
self,
Self::Ash
| Self::Bash
| Self::Csh
| Self::Dash
| Self::Fish
| Self::Ksh
| Self::Loksh
| Self::Mksh
| Self::Pdksh
| Self::Scsh
| Self::Sh
| Self::Tcsh
| Self::Zsh
)
}
/// Identifies the shell kind from the given string. This string could be a Windows path, Unix
/// path, or solo shell name.
///
/// The process is handled by these steps:
///
/// 1. Check if the string matches a shell name verbatim
/// 2. Parse the path as a Unix path and check the file name for a match
/// 3. Parse the path as a Windows path and check the file name for a match
///
pub fn identify(s: &str) -> Option<Self> {
Self::from_name(s)
.or_else(|| Utf8UnixPath::new(s).file_name().and_then(Self::from_name))
.or_else(|| {
Utf8WindowsPath::new(s)
.file_name()
.and_then(Self::from_name)
})
}
fn from_name(name: &str) -> Option<Self> {
macro_rules! map_str {
($($name:literal -> $value:expr),+ $(,)?) => {{
$(
if name.trim().eq_ignore_ascii_case($name) {
return Some($value);
}
)+
None
}};
}
map_str! {
"ash" -> Self::Ash,
"bash" -> Self::Bash,
"cmd" -> Self::CmdExe,
"cmd.exe" -> Self::CmdExe,
"csh" -> Self::Csh,
"dash" -> Self::Dash,
"elvish" -> Self::Elvish,
"fish" -> Self::Fish,
"ksh" -> Self::Ksh,
"loksh" -> Self::Loksh,
"mksh" -> Self::Mksh,
"nu" -> Self::Nu,
"pdksh" -> Self::Pdksh,
"powershell" -> Self::PowerShell,
"powershell.exe" -> Self::PowerShell,
"rc" -> Self::Rc,
"scsh" -> Self::Scsh,
"sh" -> Self::Sh,
"tcsh" -> Self::Tcsh,
"zsh" -> Self::Zsh,
}
}
}

@ -161,13 +161,13 @@ fn run_service() -> windows_service::Result<()> {
let handle = thread::spawn({
move || {
debug!("Loading CLI using args from disk for {SERVICE_NAME}");
let config = Config::load()?;
let config = Config::load().expect("Failed to load config");
debug!("Parsing CLI args from disk for {SERVICE_NAME}");
let cli = Cli::initialize_from(config.args)?;
let cli = Cli::initialize_from(config.args).expect("Failed to initialize CLI");
debug!("Running CLI for {SERVICE_NAME}");
cli.run()
cli.run().expect("CLI failed during execution")
}
});
@ -176,13 +176,7 @@ fn run_service() -> windows_service::Result<()> {
let success = loop {
if handle.is_finished() {
match handle.join() {
Ok(result) => match result {
Ok(_) => break true,
Err(x) => {
error!("{x:?}");
break false;
}
},
Ok(_) => break true,
Err(x) => {
error!("{x:?}");
break false;

@ -1,4 +1,5 @@
use distant_core::protocol::{Capabilities, Capability, SemVer, PROTOCOL_VERSION};
use distant_core::protocol::semver::Version as SemVer;
use distant_core::protocol::{Version, PROTOCOL_VERSION};
use rstest::*;
use serde_json::json;
use test_log::test;
@ -25,17 +26,17 @@ async fn should_support_json_capabilities(mut api_process: CtxCommand<ApiProcess
serde_json::from_value(res["payload"]["protocol_version"].clone()).unwrap();
assert_eq!(protocol_version, PROTOCOL_VERSION);
let capabilities: Capabilities = res["payload"]["capabilities"]
let capabilities: Vec<String> = res["payload"]["capabilities"]
.as_array()
.expect("Field 'supported' was not an array")
.expect("Field 'capabilities' was not an array")
.iter()
.map(|value| {
serde_json::from_value::<Capability>(value.clone())
.expect("Could not read array value as capability")
serde_json::from_value::<String>(value.clone())
.expect("Could not read array value as string")
})
.collect();
// NOTE: Our local server api should always support all capabilities since it is the reference
// implementation for our api
assert_eq!(capabilities, Capabilities::all());
assert_eq!(capabilities, Version::capabilities());
}

@ -1,3 +1,4 @@
use distant_core::protocol::semver;
use distant_core::protocol::PROTOCOL_VERSION;
use rstest::*;
@ -8,22 +9,40 @@ use crate::common::utils::TrimmedLinesMatchPredicate;
#[test_log::test]
fn should_output_capabilities(ctx: DistantManagerCtx) {
// Because all of our crates have the same version, we can expect it to match
let package_name = "distant-local";
let package_version = env!("CARGO_PKG_VERSION");
let (major, minor, patch) = PROTOCOL_VERSION;
let version: semver::Version = env!("CARGO_PKG_VERSION").parse().unwrap();
// Add the package name to the client version information
let client_version = if version.build.is_empty() {
let mut version = version.clone();
version.build = semver::BuildMetadata::new(env!("CARGO_PKG_NAME")).unwrap();
version
} else {
let mut version = version.clone();
let raw_build_str = format!("{}.{}", version.build.as_str(), env!("CARGO_PKG_NAME"));
version.build = semver::BuildMetadata::new(&raw_build_str).unwrap();
version
};
// Add the distant-local to the server version information
let server_version = if version.build.is_empty() {
let mut version = version;
version.build = semver::BuildMetadata::new("distant-local").unwrap();
version
} else {
let raw_build_str = format!("{}.{}", version.build.as_str(), "distant-local");
let mut version = version;
version.build = semver::BuildMetadata::new(&raw_build_str).unwrap();
version
};
// Since our client and server are built the same, all capabilities should be listed with +
// and using 4 columns since we are not using a tty
let expected = indoc::formatdoc! {"
Client: distant {package_version} (Protocol {major}.{minor}.{patch})
Server: {package_name} {package_version} (Protocol {major}.{minor}.{patch})
Client: {client_version} (Protocol {PROTOCOL_VERSION})
Server: {server_version} (Protocol {PROTOCOL_VERSION})
Capabilities supported (+) or not (-):
+cancel_search +copy +dir_create +dir_read
+exists +file_append +file_append_text +file_read
+file_read_text +file_write +file_write_text +metadata
+proc_kill +proc_resize_pty +proc_spawn +proc_stdin
+remove +rename +search +set_permissions
+system_info +unwatch +version +watch
+exec +fs_io +fs_perm +fs_search
+fs_watch +sys_info
"};
ctx.cmd("version")

@ -1,41 +0,0 @@
use indoc::indoc;
use rstest::*;
use crate::common::fixtures::*;
const EXPECTED_TABLE: &str = indoc! {"
+---------------+--------------------------------------------------------------+
| kind | description |
+---------------+--------------------------------------------------------------+
| authenticate | Supports authenticating with a remote server |
+---------------+--------------------------------------------------------------+
| capabilities | Supports retrieving capabilities |
+---------------+--------------------------------------------------------------+
| channel | Supports sending data through a channel with a remote server |
+---------------+--------------------------------------------------------------+
| close_channel | Supports closing a channel with a remote server |
+---------------+--------------------------------------------------------------+
| connect | Supports connecting to remote servers |
+---------------+--------------------------------------------------------------+
| info | Supports retrieving connection-specific information |
+---------------+--------------------------------------------------------------+
| kill | Supports killing a remote connection |
+---------------+--------------------------------------------------------------+
| launch | Supports launching a server on remote machines |
+---------------+--------------------------------------------------------------+
| list | Supports retrieving a list of managed connections |
+---------------+--------------------------------------------------------------+
| open_channel | Supports opening a channel with a remote server |
+---------------+--------------------------------------------------------------+
"};
#[rstest]
#[test_log::test]
fn should_output_capabilities(ctx: DistantManagerCtx) {
// distant action capabilities
ctx.new_assert_cmd(vec!["manager", "capabilities"])
.assert()
.success()
.stdout(EXPECTED_TABLE)
.stderr("");
}

@ -1 +1 @@
mod capabilities;
mod version;

@ -0,0 +1,13 @@
use rstest::*;
use crate::common::fixtures::*;
#[rstest]
#[test_log::test]
fn should_output_version(ctx: DistantManagerCtx) {
ctx.new_assert_cmd(vec!["manager", "version"])
.assert()
.success()
.stdout(format!("{}\n", env!("CARGO_PKG_VERSION")))
.stderr("");
}
Loading…
Cancel
Save