Compare commits

...

38 Commits

Author SHA1 Message Date
Chip Senkbeil 3fe1fba339
Correct wget usage for installation 9 months ago
Chip Senkbeil 48f7eb74ec
Update readme example to use --daemon instead of & for background manager 10 months ago
Chip Senkbeil 96abcefdc5
Add extra debug logging when starting a manager 10 months ago
Chip Senkbeil 22f3c2dd76
Fix bugs in set permissions for CLI and distant-local 11 months ago
Chip Senkbeil 0320e7fe24
Bump to v0.20.0 11 months ago
Chip Senkbeil 9e48300e83
Fix zombies being leftover from distant launch manager://localhost when servers self-terminate 11 months ago
Chip Senkbeil e304e6a689
Fix shutting down killed connections from a manager 11 months ago
Chip Senkbeil 8972013716
Refactor capabilities to version for manager, integrate version checking for client/server/manager, and define protocol version (#219) 11 months ago
Chip Senkbeil 0efb5aee4c
Add --shell support to CLI (#218) 11 months ago
Chip Senkbeil 56b3b8f4f1
Fix CLI commands with --format json not outputting errors in JSON 11 months ago
Chip Senkbeil eb23b4e1ad
Fix win service 11 months ago
Chip Senkbeil dc7e9b5309
Bump to alpha.12 11 months ago
Chip Senkbeil e0b8769087
Fix return code of --help and --version on cli 11 months ago
Chip Senkbeil 9bc50886bb
Update latest tagging with custom code that uses a personal access token to trigger workflows 11 months ago
Chip Senkbeil bd3b068651
Add workflow to tag latest 11 months ago
Chip Senkbeil c61393750a
Bump minimum version of Rust to 1.70.0 11 months ago
Chip Senkbeil 2abaf0b814
Use sparse checkout during publish 11 months ago
Chip Senkbeil 0e03fc3011
Reintroduce checkout to publish step 11 months ago
Chip Senkbeil cb8ea0507f
Bump to 0.20.0-alpha.11 and restore ci tests 11 months ago
Chip Senkbeil 8a34fec1f7
Update README 11 months ago
Chip Senkbeil 6feeb2d012
Tweaking release config until it works 11 months ago
Chip Senkbeil fefbe19a3c
Switch to stripping using cargo and supporting a latest release tag 11 months ago
Chip Senkbeil be7a15caa0
Refactor generation commands to use --output for files and printing to stdout by default 11 months ago
Chip Senkbeil 84ea28402d
Add support for distant spawn -c 'cmd str' 11 months ago
Chip Senkbeil b74cba28df
Bump to v0.20.0-alpha.10 11 months ago
Chip Senkbeil f4180f6245
Change search default to not use standard filters, and provide options to set filters manually 11 months ago
Chip Senkbeil c250acdfb4
Fix search task exiting on failing to start a search with distant-local 11 months ago
Chip Senkbeil 1836f20a2a
Bump to 0.20.0-alpha.9 11 months ago
Chip Senkbeil 9096a7d81b
Fix destination username & password parsing to accept full character set 11 months ago
Chip Senkbeil 7c08495904
Switch to unbounded channels for `Reply` (#207) 11 months ago
Chip Senkbeil da75801639
Fix server hangup (#206) 12 months ago
Nagy Botond 8009cc9361
fix(parser): allow `-` (hyphen) to appear in usernames (#203) 12 months ago
Chip Senkbeil 4fb9045152
Support sequential batch processing (#201) 12 months ago
Chip Senkbeil efad345a0d
Add header support to request & response (#200) 12 months ago
Chip Senkbeil 6ba3ded188
Fix not serializing when only renamed set, reset field name to timestamp from ts 12 months ago
Chip Senkbeil c4c46f80a9
Remove Formatter code by inlining logic for search and watch 12 months ago
Chip Senkbeil 791a41c29e
Refactor Change to use single path & support renamed detail field (#196) 12 months ago
Chip Senkbeil a36263e7e1
Fix makefile 12 months ago

@ -4,11 +4,13 @@ on:
push:
paths-ignore:
- '**.md'
- 'Makefile.toml'
branches:
- master
pull_request:
paths-ignore:
- '**.md'
- 'Makefile.toml'
branches:
- master
@ -85,7 +87,7 @@ jobs:
- { rust: stable, os: windows-latest, target: x86_64-pc-windows-msvc }
- { rust: stable, os: macos-latest }
- { rust: stable, os: ubuntu-latest }
- { rust: 1.68.0, os: ubuntu-latest }
- { rust: 1.70.0, os: ubuntu-latest }
steps:
- uses: actions/checkout@v3
- name: Install Rust ${{ matrix.rust }}

@ -0,0 +1,24 @@
name: 'Tag latest'
on:
push:
branches:
- master
jobs:
action:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Tag latest and push
env:
GITHUB_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
run: |
git config user.name "${GITHUB_ACTOR}"
git config user.email "${GITHUB_ACTOR}@users.noreply.github.com"
origin_url="$(git config --get remote.origin.url)"
origin_url="${origin_url/#https:\/\//https:\/\/$GITHUB_TOKEN@}" # add token to URL
git tag latest --force
git push "$origin_url" --tags --force

@ -0,0 +1,28 @@
name: 'Lock Threads'
on:
schedule:
- cron: '0 3 * * *'
workflow_dispatch:
permissions:
issues: write
pull-requests: write
concurrency:
group: lock
jobs:
action:
runs-on: ubuntu-latest
steps:
- uses: dessant/lock-threads@v4
with:
issue-inactive-days: '30'
issue-comment: >
I'm going to lock this issue because it has been closed for _30 days_ ⏳.
This helps our maintainers find and focus on the active issues.
If you have found a problem that seems similar to this, please open a new
issue and complete the issue template so we can capture all the details
necessary to investigate further.
process-only: 'issues'

@ -5,402 +5,312 @@ on:
tags:
- v[0-9]+.[0-9]+.[0-9]+
- v[0-9]+.[0-9]+.[0-9]+-**
- latest
# Status of Targets:
#
# ✅ x86_64-apple-darwin
# ✅ aarch64-apple-darwin
#
# ✅ x86_64-pc-windows-msvc
# ✅ aarch64-pc-windows-msvc
#
# ✅ x86_64-unknown-linux-gnu
# ✅ aarch64-unknown-linux-gnu
# ❌ aarch64-linux-android (fails due to termios)
# ✅ armv7-unknown-linux-gnueabihf
#
# ✅ x86_64-unknown-linux-musl
# ✅ aarch64-unknown-linux-musl
#
# ✅ x86_64-unknown-freebsd
# ❓ aarch64-unknown-freebsd (works manually, but cannot cross-compile via CI)
#
# ❌ x86_64-unknown-netbsd (fails due to termios)
# ❌ aarch64-unknown-netbsd (???)
#
# ❌ x86_64-unknown-openbsd (fails due to rustc internal error at end)
# ❌ aarch64-unknown-openbsd (fails due to openssl-src)
#
jobs:
macos:
name: "Build release on MacOS"
name: "Build release on MacOS (${{ matrix.target }})"
runs-on: macos-11.0
if: startsWith(github.ref, 'refs/tags/')
env:
UPLOAD_NAME: macos
X86_ARCH: x86_64-apple-darwin
ARM_ARCH: aarch64-apple-darwin
X86_DIR: target/x86_64-apple-darwin/release
ARM_DIR: target/aarch64-apple-darwin/release
BUILD_BIN: distant
UNIVERSAL_REL_BIN: distant-macos
strategy:
matrix:
target:
- x86_64-apple-darwin
- aarch64-apple-darwin
steps:
- uses: actions/checkout@v3
- name: Install Rust (x86)
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
target: ${{ env.X86_ARCH }}
- name: Install Rust (ARM)
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
target: ${{ env.ARM_ARCH }}
- uses: Swatinem/rust-cache@v2
- name: Build binary (x86_64)
run: |
cargo build --release --all-features --target ${{ env.X86_ARCH }}
ls -l ./${{ env.X86_DIR }}
strip ./${{ env.X86_DIR }}/${{ env.BUILD_BIN }}
- name: Build binary (aarch64)
run: |
cargo build --release --all-features --target ${{ env.ARM_ARCH }}
ls -l ./${{ env.ARM_DIR }}
strip ./${{ env.ARM_DIR }}/${{ env.BUILD_BIN }}
- name: Unify binaries
run: |
lipo -create -output ${{ env.UNIVERSAL_REL_BIN }} \
./${{ env.X86_DIR }}/${{ env.BUILD_BIN }} \
./${{ env.ARM_DIR }}/${{ env.BUILD_BIN }}
chmod +x ./${{ env.UNIVERSAL_REL_BIN }}
- name: Upload
uses: actions/upload-artifact@v2
with:
name: ${{ env.UPLOAD_NAME }}
path: |
${{ env.UNIVERSAL_REL_BIN }}
windows:
name: "Build release on Windows"
runs-on: windows-latest
if: startsWith(github.ref, 'refs/tags/')
env:
UPLOAD_NAME: win64
X86_ARCH: x86_64-pc-windows-msvc
X86_DIR: target/x86_64-pc-windows-msvc/release
BUILD_BIN: distant.exe
X86_REL_BIN: distant-win64.exe
steps:
- uses: actions/checkout@v2
- name: Install Rust (MSVC)
- name: Install Rust (${{ matrix.target }})
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
target: ${{ env.X86_ARCH }}
- uses: Swatinem/rust-cache@v2
- name: Build binary (x86_64)
run: |
cargo build --release --all-features --target ${{ env.X86_ARCH }}
ls -l ./${{ env.X86_DIR }}
strip ./${{ env.X86_DIR }}/${{ env.BUILD_BIN }}
mv ./${{ env.X86_DIR }}/${{ env.BUILD_BIN }} ./${{ env.X86_REL_BIN }}
chmod +x ./${{ env.X86_REL_BIN }}
- name: Upload
uses: actions/upload-artifact@v2
with:
name: ${{ env.UPLOAD_NAME }}
path: |
${{ env.X86_REL_BIN }}
linux_gnu_x86:
name: "Build release on Linux (GNU x86)"
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
env:
UPLOAD_NAME: linux64-gnu-x86
X86_GNU_ARCH: x86_64-unknown-linux-gnu
X86_GNU_DIR: target/x86_64-unknown-linux-gnu/release
BUILD_BIN: distant
X86_GNU_REL_BIN: distant-linux64-gnu-x86
steps:
- uses: actions/checkout@v2
- name: Install Rust (GNU x86)
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
target: ${{ env.X86_GNU_ARCH }}
- uses: Swatinem/rust-cache@v2
- name: Build binary (GNU x86_64)
run: |
cargo build --release --all-features --target ${{ env.X86_GNU_ARCH }}
ls -l ./${{ env.X86_GNU_DIR }}
strip ./${{ env.X86_GNU_DIR }}/${{ env.BUILD_BIN }}
mv ./${{ env.X86_GNU_DIR }}/${{ env.BUILD_BIN }} ./${{ env.X86_GNU_REL_BIN }}
chmod +x ./${{ env.X86_GNU_REL_BIN }}
- name: Upload
uses: actions/upload-artifact@v2
with:
name: ${{ env.UPLOAD_NAME }}
path: |
${{ env.X86_GNU_REL_BIN }}
linux_gnu_aarch64:
name: "Build release on Linux (GNU aarch64)"
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
env:
UPLOAD_NAME: linux64-gnu-aarch64
AARCH64_GNU_ARCH: aarch64-unknown-linux-gnu
AARCH64_GNU_DIR: target/aarch64-unknown-linux-gnu/release
BUILD_BIN: distant
AARCH64_GNU_REL_BIN: distant-linux64-gnu-aarch64
steps:
- uses: actions/checkout@v2
- name: Install Rust (GNU aarch64)
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
target: ${{ env.AARCH64_GNU_ARCH }}
target: ${{ matrix.target }}
override: true
- uses: Swatinem/rust-cache@v2
- name: Install linker & binutils (gcc-aarch64-linux-gnu)
- name: Build binary (${{ matrix.target }})
run: |
sudo apt update
sudo apt install -y gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu
- name: Build binary (GNU aarch64)
run: |
cargo build --release --all-features --target ${{ env.AARCH64_GNU_ARCH }}
ls -l ./${{ env.AARCH64_GNU_DIR }}
/usr/aarch64-linux-gnu/bin/strip ./${{ env.AARCH64_GNU_DIR }}/${{ env.BUILD_BIN }}
mv ./${{ env.AARCH64_GNU_DIR }}/${{ env.BUILD_BIN }} ./${{ env.AARCH64_GNU_REL_BIN }}
chmod +x ./${{ env.AARCH64_GNU_REL_BIN }}
cargo build --release --all-features --target ${{ matrix.target }}
mv ./target/${{ matrix.target }}/release/distant ./distant-${{ matrix.target }}
chmod +x ./distant-${{ matrix.target }}
- name: Upload
uses: actions/upload-artifact@v2
with:
name: ${{ env.UPLOAD_NAME }}
path: |
${{ env.AARCH64_GNU_REL_BIN }}
name: ${{ matrix.target }}
path: ./distant-${{ matrix.target }}
if-no-files-found: error
retention-days: 5
linux_gnu_arm_v7:
name: "Build release on Linux (GNU arm-v7)"
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
env:
UPLOAD_NAME: linux64-gnu-arm-v7
ARMV7_GNU_ARCH: armv7-unknown-linux-gnueabihf
ARMV7_GNU_DIR: target/armv7-unknown-linux-gnueabihf/release
BUILD_BIN: distant
ARMV7_GNU_REL_BIN: distant-linux64-gnu-arm-v7
macos_unify:
name: "Build universal binary on MacOS"
needs: [macos]
runs-on: macos-11.0
steps:
- uses: actions/checkout@v2
- name: Install Rust (GNU arm-v7)
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
target: ${{ env.ARMV7_GNU_ARCH }}
- uses: Swatinem/rust-cache@v2
- name: Install linker & binutils (gcc-arm-linux-gnueabihf)
run: |
sudo apt update
sudo apt install -y gcc-arm-linux-gnueabihf binutils-arm-linux-gnueabihf
- name: Build binary (GNU arm-v7)
- uses: actions/download-artifact@v2
- name: Unify binaries
run: |
cargo build --release --all-features --target ${{ env.ARMV7_GNU_ARCH }}
ls -l ./${{ env.ARMV7_GNU_DIR }}
/usr/arm-linux-gnueabihf/bin/strip ./${{ env.ARMV7_GNU_DIR }}/${{ env.BUILD_BIN }}
mv ./${{ env.ARMV7_GNU_DIR }}/${{ env.BUILD_BIN }} ./${{ env.ARMV7_GNU_REL_BIN }}
chmod +x ./${{ env.ARMV7_GNU_REL_BIN }}
lipo -create -output distant-universal-apple-darwin \
./x86_64-apple-darwin/distant-x86_64-apple-darwin \
./aarch64-apple-darwin/distant-aarch64-apple-darwin
chmod +x ./distant-universal-apple-darwin
- name: Upload
uses: actions/upload-artifact@v2
with:
name: ${{ env.UPLOAD_NAME }}
path: |
${{ env.ARMV7_GNU_REL_BIN }}
name: universal-apple-darwin
path: ./distant-universal-apple-darwin
if-no-files-found: error
retention-days: 5
# NOTE: For musl, we only support ssh2 and not libssh for the time being due to some
# build issue with libssh-rs-sys not finding the symbol ENGINE_cleanup in libcrypto
linux_musl_x86:
name: "Build release on Linux (musl x86)"
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
env:
UPLOAD_NAME: linux64-musl-x86
X86_MUSL_ARCH: x86_64-unknown-linux-musl
X86_MUSL_DIR: target/x86_64-unknown-linux-musl/release
BUILD_BIN: distant
X86_MUSL_REL_BIN: distant-linux64-musl-x86
windows:
name: "Build release on Windows (${{ matrix.target }})"
runs-on: windows-latest
strategy:
matrix:
target:
- x86_64-pc-windows-msvc
- aarch64-pc-windows-msvc
steps:
- uses: actions/checkout@v2
- name: Install Rust (MUSL x86)
- name: Install Rust (${{ matrix.target }})
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
target: ${{ env.X86_MUSL_ARCH }}
target: ${{ matrix.target }}
override: true
- name: Install musl tools
run: |
sudo apt update
sudo apt install -y musl-tools
- uses: Swatinem/rust-cache@v2
- name: Build binary (MUSL x86_64)
- name: Build binary (${{ matrix.target }})
run: |
cargo build --release --no-default-features --features ssh2 --target ${{ env.X86_MUSL_ARCH }}
ls -l ./${{ env.X86_MUSL_DIR }}
strip ./${{ env.X86_MUSL_DIR }}/${{ env.BUILD_BIN }}
mv ./${{ env.X86_MUSL_DIR }}/${{ env.BUILD_BIN }} ./${{ env.X86_MUSL_REL_BIN }}
chmod +x ./${{ env.X86_MUSL_REL_BIN }}
cargo build --release --all-features --target ${{ matrix.target }}
mv ./target/${{ matrix.target }}/release/distant.exe ./distant-${{ matrix.target }}.exe
chmod +x ./distant-${{ matrix.target }}.exe
- name: Upload
uses: actions/upload-artifact@v2
with:
name: ${{ env.UPLOAD_NAME }}
path: |
${{ env.X86_MUSL_REL_BIN }}
name: ${{ matrix.target }}
path: ./distant-${{ matrix.target }}.exe
if-no-files-found: error
retention-days: 5
# NOTE: For musl, we only support ssh2 and not libssh for the time being due to some
# build issue with libssh-rs-sys not finding the symbol ENGINE_cleanup in libcrypto
linux_musl_aarch64:
name: "Build release on Linux (musl aarch64)"
linux:
name: "Build release on Linux (${{ matrix.target }})"
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
env:
UPLOAD_NAME: linux64-musl-aarch64
AARCH64_MUSL_ARCH: aarch64-unknown-linux-musl
AARCH64_MUSL_DIR: target/aarch64-unknown-linux-musl/release
BUILD_BIN: distant
AARCH64_MUSL_REL_BIN: distant-linux64-musl-aarch64
strategy:
matrix:
include:
- target: x86_64-unknown-linux-gnu
build: --all-features
cargo: cargo
- target: aarch64-unknown-linux-gnu
build: --all-features
deps: gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu
cargo: cargo
- target: armv7-unknown-linux-gnueabihf
build: --all-features
deps: gcc-arm-linux-gnueabihf binutils-arm-linux-gnueabihf
cargo: cargo
- target: x86_64-unknown-linux-musl
build: --no-default-features --features ssh2
deps: musl-tools
cargo: cargo
- target: aarch64-unknown-linux-musl
build: --no-default-features --features ssh2
deps: musl-tools gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu
cargo: cross
prepare: |
curl -L "https://github.com/cross-rs/cross/releases/download/v0.2.5/cross-x86_64-unknown-linux-musl.tar.gz" |
tar xz -C $HOME/.cargo/bin
- target: x86_64-unknown-freebsd
build: --all-features
cargo: cross
prepare: |
curl -L "https://github.com/cross-rs/cross/releases/download/v0.2.5/cross-x86_64-unknown-linux-musl.tar.gz" |
tar xz -C $HOME/.cargo/bin
steps:
- uses: actions/checkout@v2
- name: Install Rust (MUSL aarch64)
- name: Install Rust (${{ matrix.target }})
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
target: ${{ env.AARCH64_MUSL_ARCH }}
target: ${{ matrix.target }}
override: true
- name: Install musl tools
- uses: Swatinem/rust-cache@v2
- name: Install dependencies
if: ${{ matrix.deps }}
run: |
sudo apt update
sudo apt install -y musl-tools gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu
- name: Install cross
env:
LINK: https://github.com/cross-rs/cross/releases/download
CROSS_VERSION: 0.2.4
CROSS_FILE: cross-x86_64-unknown-linux-musl
run: |
curl -L "$LINK/v$CROSS_VERSION/$CROSS_FILE.tar.gz" |
tar xz -C $HOME/.cargo/bin
- uses: Swatinem/rust-cache@v2
- name: Build binary (MUSL aarch64)
sudo apt install -y ${{ matrix.deps }}
- name: Preparing system
if: ${{ matrix.prepare }}
run: ${{ matrix.prepare }}
- name: Build binary (${{ matrix.target }})
run: |
cross build --release --no-default-features --features ssh2 --target ${{ env.AARCH64_MUSL_ARCH }}
ls -l ./${{ env.AARCH64_MUSL_DIR }}
aarch64-linux-gnu-strip ./${{ env.AARCH64_MUSL_DIR }}/${{ env.BUILD_BIN }}
mv ./${{ env.AARCH64_MUSL_DIR }}/${{ env.BUILD_BIN }} ./${{ env.AARCH64_MUSL_REL_BIN }}
chmod +x ./${{ env.AARCH64_MUSL_REL_BIN }}
${{ matrix.cargo }} build --release ${{ matrix.build }} --target ${{ matrix.target }}
mv ./target/${{ matrix.target }}/release/distant ./distant-${{ matrix.target }}
chmod +x ./distant-${{ matrix.target }}
- name: Upload
uses: actions/upload-artifact@v2
with:
name: ${{ env.UPLOAD_NAME }}
path: |
${{ env.AARCH64_MUSL_REL_BIN }}
name: ${{ matrix.target }}
path: ./distant-${{ matrix.target }}
if-no-files-found: error
retention-days: 5
# bsd:
# name: "Build release on ${{ matrix.os.name }} (${{ matrix.os.target }})"
# runs-on: ${{ matrix.os.host }}
# strategy:
# matrix:
# os:
# - name: freebsd
# architecture: x86-64
# version: '13.2'
# host: macos-12
# target: x86_64-unknown-freebsd
# build: --all-features
# prepare: sudo pkg install -y openssl gmake lang/rust devel/llvm-devel
# - name: netbsd
# architecture: x86-64
# version: '9.3'
# host: macos-12
# target: x86_64-unknown-netbsd
# build: --all-features
# prepare: |
# PATH="/usr/pkg/sbin:/usr/pkg/bin:$PATH"
# PKG_PATH="https://cdn.NetBSD.org/pub/pkgsrc/packages"
# PKG_PATH="$PKG_PATH/NetBSD/x86_64/9.3/All/"
# export PATH PKG_PATH
# sudo -E pkg_add -I gmake rust
# cargo update --dry-run
# - name: openbsd
# architecture: x86-64
# version: '7.3'
# host: macos-12
# target: x86_64-unknown-openbsd
# build: --all-features
# prepare: |
# sudo pkg_add -I gmake rust llvm
# sed -i 's/lto = true/lto = false/' Cargo.toml
# steps:
# - uses: actions/checkout@v3
# - uses: Swatinem/rust-cache@v2
# - name: Build in VM
# uses: cross-platform-actions/action@v0.15.0
# env:
# CARGO_INCREMENTAL: 0
# with:
# environment_variables: CARGO_INCREMENTAL
# operating_system: ${{ matrix.os.name }}
# architecture: ${{ matrix.os.architecture }}
# version: ${{ matrix.os.version }}
# shell: bash
# run: |
# ${{ matrix.os.prepare }}
# cargo build --release ${{ matrix.os.build }} --target ${{ matrix.os.target }}
# mv ./target/${{ matrix.os.target }}/release/distant ./distant-${{ matrix.os.target }}
# chmod +x ./distant-${{ matrix.os.target }}
# - name: Upload
# uses: actions/upload-artifact@v2
# with:
# name: ${{ matrix.os.target }}
# path: ./distant-${{ matrix.os.target }}
# if-no-files-found: error
# retention-days: 5
publish:
needs: [macos, windows, linux_gnu_x86, linux_gnu_aarch64, linux_gnu_arm_v7, linux_musl_x86, linux_musl_aarch64]
needs: [macos, macos_unify, windows, linux]
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/')
permissions:
contents: write
env:
MACOS: macos
MACOS_UNIVERSAL_BIN: distant-macos
WIN64: win64
WIN64_BIN: distant-win64.exe
LINUX64_GNU_X86: linux64-gnu-x86
LINUX64_GNU_X86_BIN: distant-linux64-gnu-x86
LINUX64_GNU_AARCH64: linux64-gnu-aarch64
LINUX64_GNU_AARCH64_BIN: distant-linux64-gnu-aarch64
LINUX64_GNU_ARMV7: linux64-gnu-arm-v7
LINUX64_GNU_ARMV7_BIN: distant-linux64-gnu-arm-v7
LINUX64_MUSL_X86: linux64-musl-x86
LINUX64_MUSL_X86_BIN: distant-linux64-musl-x86
LINUX64_MUSL_AARCH64: linux64-musl-aarch64
LINUX64_MUSL_AARCH64_BIN: distant-linux64-musl-aarch64
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
sparse-checkout: |
CHANGELOG.md
sparse-checkout-cone-mode: false
- uses: actions/download-artifact@v2
- name: Generate MacOS SHA256 checksums
run: |
cd ${{ env.MACOS }}
sha256sum ${{ env.MACOS_UNIVERSAL_BIN }} > ${{ env.MACOS_UNIVERSAL_BIN }}.sha256sum
echo "SHA_MACOS_BIN=$(cat ${{ env.MACOS_UNIVERSAL_BIN }}.sha256sum)" >> $GITHUB_ENV
- name: Generate Win64 SHA256 checksums
run: |
cd ${{ env.WIN64 }}
sha256sum ${{ env.WIN64_BIN }} > ${{ env.WIN64_BIN }}.sha256sum
echo "SHA_WIN64_BIN=$(cat ${{ env.WIN64_BIN }}.sha256sum)" >> $GITHUB_ENV
- name: Generate Linux64 (gnu x86) SHA256 checksums
run: |
cd ${{ env.LINUX64_GNU_X86 }}
sha256sum ${{ env.LINUX64_GNU_X86_BIN }} > ${{ env.LINUX64_GNU_X86_BIN }}.sha256sum
echo "SHA_LINUX64_GNU_X86_BIN=$(cat ${{ env.LINUX64_GNU_X86_BIN }}.sha256sum)" >> $GITHUB_ENV
- name: Generate Linux64 (gnu aarch64) SHA256 checksums
- name: Generate SHA256 checksums
run: |
cd ${{ env.LINUX64_GNU_AARCH64 }}
sha256sum ${{ env.LINUX64_GNU_AARCH64_BIN }} > ${{ env.LINUX64_GNU_AARCH64_BIN }}.sha256sum
echo "SHA_LINUX64_GNU_AARCH64_BIN=$(cat ${{ env.LINUX64_GNU_AARCH64_BIN }}.sha256sum)" >> $GITHUB_ENV
- name: Generate Linux64 (gnu arm-v7) SHA256 checksums
run: |
cd ${{ env.LINUX64_GNU_ARMV7 }}
sha256sum ${{ env.LINUX64_GNU_ARMV7_BIN }} > ${{ env.LINUX64_GNU_ARMV7_BIN }}.sha256sum
echo "SHA_LINUX64_GNU_ARMV7_BIN=$(cat ${{ env.LINUX64_GNU_ARMV7_BIN }}.sha256sum)" >> $GITHUB_ENV
- name: Generate Linux64 (musl x86) SHA256 checksums
run: |
cd ${{ env.LINUX64_MUSL_X86 }}
sha256sum ${{ env.LINUX64_MUSL_X86_BIN }} > ${{ env.LINUX64_MUSL_X86_BIN }}.sha256sum
echo "SHA_LINUX64_MUSL_X86_BIN=$(cat ${{ env.LINUX64_MUSL_X86_BIN }}.sha256sum)" >> $GITHUB_ENV
- name: Generate Linux64 (musl aarch64) SHA256 checksums
run: |
cd ${{ env.LINUX64_MUSL_AARCH64 }}
sha256sum ${{ env.LINUX64_MUSL_AARCH64_BIN }} > ${{ env.LINUX64_MUSL_AARCH64_BIN }}.sha256sum
echo "SHA_LINUX64_MUSL_AARCH64_BIN=$(cat ${{ env.LINUX64_MUSL_AARCH64_BIN }}.sha256sum)" >> $GITHUB_ENV
for i in $(find . -name "distant-*" -type f); do
echo "Generating checksum for ${i}"
sha256sum "${i}" > "${i}.sha256sum"
done
- name: Determine git tag
if: github.event_name == 'push'
run: |
TAG_NAME=${{ github.ref }}
echo "TAG_NAME=${TAG_NAME#refs/tags/}" >> $GITHUB_ENV
echo "TAG_VERSION=${TAG_NAME#refs/tags/v}" >> $GITHUB_ENV
- name: Check git tag for pre-release
- name: Check git tag for pre-release or latest
id: check-tag
run: |
if [[ ${{ github.ref }} =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+-.*$ ]]; then
echo ::set-output name=match::true
echo "is_prerelease=true" >> $GITHUB_OUTPUT
elif [[ ${{ github.ref }} =~ ^refs/tags/latest$ ]]; then
echo "is_latest=true" >> $GITHUB_OUTPUT
fi
- name: Print pre-release status
run: |
echo "Is ${{ github.ref }} a pre-release: ${{ steps.check-tag.outputs.match }}"
echo "Is ${{ github.ref }} pre-release: ${{ steps.check-tag.outputs.is_prerelease }}"
echo "Is ${{ github.ref }} latest: ${{ steps.check-tag.outputs.is_latest }}"
- name: Get Changelog Entry
id: changelog
uses: mindsers/changelog-reader-action@v2
with:
version: ${{ env.TAG_VERSION }}
path: "./CHANGELOG.md"
- name: Publish
if: ${{ steps.check-tag.outputs.is_latest != 'true' }}
- name: Publish (latest)
if: ${{ steps.check-tag.outputs.is_latest == 'true' }}
uses: softprops/action-gh-release@v1
with:
name: Latest Build
fail_on_unmatched_files: true
target_commitish: ${{ github.sha }}
draft: false
prerelease: true
files: |
**/distant-*
body: |
This is the latest commit (${{ github.sha }}) built for testing.
This is not guaranteed to pass all tests or even function properly.
- name: Publish (release)
if: ${{ steps.check-tag.outputs.is_latest != 'true' }}
uses: softprops/action-gh-release@v1
with:
name: distant ${{ env.TAG_NAME }}
fail_on_unmatched_files: true
target_commitish: ${{ github.sha }}
draft: false
prerelease: ${{ steps.check-tag.outputs.match == 'true' }}
prerelease: ${{ steps.check-tag.outputs.is_prerelease == 'true' }}
files: |
${{ env.MACOS }}/${{ env.MACOS_UNIVERSAL_BIN }}
${{ env.WIN64 }}/${{ env.WIN64_BIN }}
${{ env.LINUX64_GNU_X86 }}/${{ env.LINUX64_GNU_X86_BIN }}
${{ env.LINUX64_GNU_AARCH64 }}/${{ env.LINUX64_GNU_AARCH64_BIN }}
${{ env.LINUX64_GNU_ARMV7 }}/${{ env.LINUX64_GNU_ARMV7_BIN }}
${{ env.LINUX64_MUSL_X86 }}/${{ env.LINUX64_MUSL_X86_BIN }}
${{ env.LINUX64_MUSL_AARCH64 }}/${{ env.LINUX64_MUSL_AARCH64_BIN }}
**/*.sha256sum
**/distant-*
body: |
## Release Notes
${{ steps.changelog.outputs.changes }}
## Binaries
Standalone binaries are built out for Windows (x86_64), MacOS (Intel & ARM), and Linux (x86_64, aarch64, armv7).
- **linux64-gnu-x86** is the x86-64 release on Linux using libc
- **linux64-gnu-aarch64** is the aarch64 release on Linux using libc
- **linux64-gnu-arm-v7** is the arm-v7 release on Linux using libc (for Raspberry PI)
- **linux64-musl-x86** is the x86-64 release on Linux using musl (static binary, no libc dependency)
- **linux64-musl-aarch64** is the aarch64 release on Linux using musl (static binary, no libc dependency)
- **macos** is a universal binary for Mac OS that supports x86-64 and aarch64 (ARM) platforms
- **win64** is the x86-64 release on Windows using MSVC
## SHA256 Checksums
```
${{ env.SHA_MACOS_BIN }}
${{ env.SHA_WIN64_BIN }}
${{ env.SHA_LINUX64_GNU_X86_BIN }}
${{ env.SHA_LINUX64_GNU_AARCH64_BIN }}
${{ env.SHA_LINUX64_GNU_ARMV7_BIN }}
${{ env.SHA_LINUX64_MUSL_X86_BIN }}
${{ env.SHA_LINUX64_MUSL_AARCH64_BIN }}
```

@ -7,6 +7,149 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
### Fixed
- Bug in `distant fs set-permissions` where partial permissions such as `go-w`
would result in clearing all permissions
- Bug in `distant-local` implementation of `SetPermissions` where read-only
status was being set/cleared prior to Unix permissions being applied,
resulting in applying an invalid change to the permissions
## [0.20.0]
All changes described in these alpha releases:
- [Alpha 13](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.13)
- [Alpha 12](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.12)
- [Alpha 11](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.11)
- [Alpha 10](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.10)
- [Alpha 9](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.9)
- [Alpha 8](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.8)
- [Alpha 7](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.7)
- [Alpha 6](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.6)
- [Alpha 5](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.5)
- [Alpha 4](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.4)
- [Alpha 3](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.3)
- [Alpha 2](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.2)
- [Alpha 1](https://github.com/chipsenkbeil/distant/releases/tag/v0.20.0-alpha.1)
### Fixed
- When terminating a connection using `distant manager kill`, the connection is
now properly dropped, resulting servers waiting to terminate due to
`--shutdown lonely=N` to now shutdown accordingly
- Zombies from spawned servers via `distant launch manager://localhost` are now
properly terminated by checking the exit status of processes
## [0.20.0-alpha.13]
### Added
- Support for `--shell` with optional path to an explicit shell as an option
when executing `distant spawn` in order to run the command within a shell
rather than directly
- `semver` crate to be used for version information in protocol and manager
- `is_compatible_with` function to root of `distant-protocol` crate that checks
if a provided version is compatible with the protocol
### Changed
- `distant_protocol::PROTOCOL_VERSION` now uses the crate's major, minor, and
patch version at compile-time (parsed via `const-str` crate) to streamline
version handling between crate and protocol
- Protocol and manager now supply a version request instead of capabilities and
the capabilities of protocol are now a `Vec<String>` to contain a set of more
broad capabilities instead of every possible request type
### Fixed
- CLI commands like `distant manager select` will now output errors in a JSON
format when configured to communicate using JSON
- `distant-ssh2` no longer caches the remote family globally, but instead
caches it per `Ssh` instance
### Removed
- `Cmd::program` and `Cmd::arguments` functions as they were misleading (didn't
do what `distant-local` or `distant-ssh2` do)
- Removed `Capability` and `Capabilities` from protocol and manager
## [0.20.0-alpha.12]
### Changed
- Minimum Rust version is now `1.70.0` due to bump in `grep-cli` minimum
requirement. This technically applied to v0.20.0-alpha.11, but wasn't caught
until the dependency updated
### Fixed
- `distant --help` will now return exit code of 0
- `distant --version` will now return exit code of 0
## [0.20.0-alpha.11]
### Added
- CLI now supports `-c <STR>` and `--cmd <STR>` to use a given string as the
command as an alternative to `-- <CMD> <ARG> <ARG>`
- Add build for FreeBSD
### Changed
- Cli no longer uses `-c` as shorthand for specifying a config file
- `--file` option for generating completion has been renamed to `--output`
- CLI command to generate config files now defaults to printing to stdout with
`--output` providing the option to write to a file
- Artifacts built now use format of `distant-<TRIPLE>`
## [0.20.0-alpha.10]
### Added
- `use_hidden`, `use_ignore_files`, `use_parent_ignore_files`,
`use_git_ignore`, `use_global_git_ignore`, and `use_git_exclude` as new
options for searching
### Changed
- Searching now disables all standard filters by default with re-introducing
the ability to set the filters by individual options
### Fixed
- Failing to start a search will no longer cause the search task to exit when
using the local server, which would result in no more searches being able to
be executed
## [0.20.0-alpha.9]
### Added
- `Request` and `Response` types from `distant-net` now support an optional
`Header` to send miscellaneous information
### Changed
- `Change` structure now provides a single `path` instead of `paths` with the
`distant-local` implementation sending a separate `Changed` event per path
- `ChangeDetails` now includes a `renamed` field to capture the new path name
when known
- `DistantApi` now handles batch requests in parallel, returning the results in
order. To achieve the previous sequential processing of batch requests, the
header value `sequence` needs to be set to true
- Rename `GenericServerRef` to `ServerRef` and remove `ServerRef` trait,
refactoring `TcpServerRef`, `UnixSocketServerRef`, and `WindowsPipeServerRef`
to use the struct instead of `Box<dyn ServerRef>`
- Update `Reply` trait and associated implementations to be non-blocking &
synchronous as opposed to asynchronous to avoid deadlocks and also be more
performant
### Fixed
- Username and password now support full character sets outside of `@` for
passwords and `:` and `@` for usernames
## [0.20.0-alpha.8]
### Added
@ -42,6 +185,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- `crossbeam-channel` dependency removed from notify by disabling its feature
in order to avoid a `tokio::spawn` issue (https://github.com/notify-rs/notify/issues/380)
### Fixed
- usernames with `-` (hyphen) we're rejected as invalid
## [0.20.0-alpha.7]
### Added
@ -496,7 +643,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
pending upon full channel and no longer locks up
- stdout, stderr, and stdin of `RemoteProcess` no longer cause deadlock
[Unreleased]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.8...HEAD
[Unreleased]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.13...HEAD
[0.20.0-alpha.13]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.12...v0.20.0-alpha.13
[0.20.0-alpha.12]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.11...v0.20.0-alpha.12
[0.20.0-alpha.11]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.10...v0.20.0-alpha.11
[0.20.0-alpha.10]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.9...v0.20.0-alpha.10
[0.20.0-alpha.9]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.8...v0.20.0-alpha.9
[0.20.0-alpha.8]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.7...v0.20.0-alpha.8
[0.20.0-alpha.7]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.6...v0.20.0-alpha.7
[0.20.0-alpha.6]: https://github.com/chipsenkbeil/distant/compare/v0.20.0-alpha.5...v0.20.0-alpha.6

29
Cargo.lock generated

@ -571,6 +571,12 @@ version = "0.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913"
[[package]]
name = "const-str"
version = "0.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aca749d3d3f5b87a0d6100509879f9cf486ab510803a4a4e1001da1ff61c2bd6"
[[package]]
name = "convert_case"
version = "0.4.0"
@ -807,7 +813,7 @@ dependencies = [
[[package]]
name = "distant"
version = "0.20.0-alpha.8"
version = "0.20.0"
dependencies = [
"anyhow",
"assert_cmd",
@ -844,6 +850,7 @@ dependencies = [
"test-log",
"tokio",
"toml_edit",
"typed-path",
"which",
"whoami",
"windows-service",
@ -852,7 +859,7 @@ dependencies = [
[[package]]
name = "distant-auth"
version = "0.20.0-alpha.8"
version = "0.20.0"
dependencies = [
"async-trait",
"derive_more",
@ -865,7 +872,7 @@ dependencies = [
[[package]]
name = "distant-core"
version = "0.20.0-alpha.8"
version = "0.20.0"
dependencies = [
"async-trait",
"bitflags 2.3.1",
@ -891,7 +898,7 @@ dependencies = [
[[package]]
name = "distant-local"
version = "0.20.0-alpha.8"
version = "0.20.0"
dependencies = [
"assert_fs",
"async-trait",
@ -919,11 +926,12 @@ dependencies = [
[[package]]
name = "distant-net"
version = "0.20.0-alpha.8"
version = "0.20.0"
dependencies = [
"async-trait",
"bytes",
"chacha20poly1305",
"const-str",
"derive_more",
"distant-auth",
"dyn-clone",
@ -935,7 +943,9 @@ dependencies = [
"p256",
"paste",
"rand",
"rmp",
"rmp-serde",
"semver 1.0.17",
"serde",
"serde_bytes",
"serde_json",
@ -948,13 +958,15 @@ dependencies = [
[[package]]
name = "distant-protocol"
version = "0.20.0-alpha.8"
version = "0.20.0"
dependencies = [
"bitflags 2.3.1",
"const-str",
"derive_more",
"regex",
"rmp",
"rmp-serde",
"semver 1.0.17",
"serde",
"serde_bytes",
"serde_json",
@ -963,7 +975,7 @@ dependencies = [
[[package]]
name = "distant-ssh2"
version = "0.20.0-alpha.8"
version = "0.20.0"
dependencies = [
"anyhow",
"assert_fs",
@ -2767,6 +2779,9 @@ name = "semver"
version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed"
dependencies = [
"serde",
]
[[package]]
name = "semver-parser"

@ -3,7 +3,7 @@ name = "distant"
description = "Operate on a remote computer through file and process manipulation"
categories = ["command-line-utilities"]
keywords = ["cli"]
version = "0.20.0-alpha.8"
version = "0.20.0"
authors = ["Chip Senkbeil <chip@senkbeil.org>"]
edition = "2021"
homepage = "https://github.com/chipsenkbeil/distant"
@ -25,6 +25,7 @@ members = [
opt-level = 'z'
lto = true
codegen-units = 1
strip = true
[features]
default = ["libssh", "ssh2"]
@ -39,8 +40,8 @@ clap_complete = "4.3.0"
config = { version = "0.13.3", default-features = false, features = ["toml"] }
derive_more = { version = "0.99.17", default-features = false, features = ["display", "from", "error", "is_variant"] }
dialoguer = { version = "0.10.4", default-features = false }
distant-core = { version = "=0.20.0-alpha.8", path = "distant-core" }
distant-local = { version = "=0.20.0-alpha.8", path = "distant-local" }
distant-core = { version = "=0.20.0", path = "distant-core" }
distant-local = { version = "=0.20.0", path = "distant-local" }
directories = "5.0.1"
file-mode = "0.1.2"
flexi_logger = "0.25.5"
@ -58,12 +59,13 @@ tokio = { version = "1.28.2", features = ["full"] }
toml_edit = { version = "0.19.10", features = ["serde"] }
terminal_size = "0.2.6"
termwiz = "0.20.0"
typed-path = "0.3.2"
which = "4.4.0"
winsplit = "0.1.0"
whoami = "1.4.0"
# Optional native SSH functionality
distant-ssh2 = { version = "=0.20.0-alpha.8", path = "distant-ssh2", default-features = false, features = ["serde"], optional = true }
distant-ssh2 = { version = "=0.20.0", path = "distant-ssh2", default-features = false, features = ["serde"], optional = true }
[target.'cfg(unix)'.dependencies]
fork = "0.1.21"

@ -1,21 +1,26 @@
[tasks.format]
clear = true
install_crate = "rustfmt-nightly"
command = "cargo"
args = ["+nightly", "fmt", "--all"]
[tasks.test]
clear = true
command = "cargo"
args = ["test", "--release", "--all-features", "--workspace"]
[tasks.ci-test]
clear = true
command = "cargo"
args = ["nextest", "run", "--profile", "ci", "--release", "--all-features", "--workspace"]
[tasks.post-ci-test]
clear = true
command = "cargo"
args = ["test", "--release", "--all-features", "--workspace", "--doc"]
[tasks.publish]
clear = true
script = '''
cargo publish --all-features -p distant-auth
cargo publish --all-features -p distant-protocol
@ -27,6 +32,7 @@ cargo publish --all-features
'''
[tasks.dry-run-publish]
clear = true
script = '''
cargo publish --all-features --dry-run -p distant-auth
cargo publish --all-features --dry-run -p distant-protocol

@ -1,6 +1,11 @@
# distant - remotely edit files and run programs
<h1 align="center">
<img src="https://distant.dev/assets/images/distant-with-logo-300x87.png" alt="Distant">
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![CI][distant_ci_img]][distant_ci_lnk] [![RustC 1.68+][distant_rustc_img]][distant_rustc_lnk]
<a href="https://distant.dev/">Documentation</a> |
<a href="https://github.com/chipsenkbeil/distant/discussions">Discussion</a>
</h1>
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![CI][distant_ci_img]][distant_ci_lnk] [![RustC 1.70+][distant_rustc_img]][distant_rustc_lnk]
[distant_crates_img]: https://img.shields.io/crates/v/distant.svg
[distant_crates_lnk]: https://crates.io/crates/distant
@ -8,164 +13,52 @@
[distant_doc_lnk]: https://docs.rs/distant
[distant_ci_img]: https://github.com/chipsenkbeil/distant/actions/workflows/ci.yml/badge.svg
[distant_ci_lnk]: https://github.com/chipsenkbeil/distant/actions/workflows/ci.yml
[distant_rustc_img]: https://img.shields.io/badge/distant-rustc_1.68+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/03/09/Rust-1.68.0.html
[distant_rustc_img]: https://img.shields.io/badge/distant-rustc_1.70+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/06/01/Rust-1.70.0.html
🚧 **(Alpha stage software) This program is in rapid development and may break or change frequently!** 🚧
## Details
The `distant` binary supplies both a server and client component as well as
a command to start a server and configure the local client to be able to
talk to the server.
- Asynchronous in nature, powered by [`tokio`](https://tokio.rs/)
- Data is serialized to send across the wire via [`msgpack`](https://msgpack.org/)
- Encryption & authentication are handled via
[XChaCha20Poly1305](https://tools.ietf.org/html/rfc8439) for an authenticated
encryption scheme via
[RustCrypto/ChaCha20Poly1305](https://github.com/RustCrypto/AEADs/tree/master/chacha20poly1305)
Additionally, the core of the distant client and server codebase can be pulled
in to be used with your own Rust crates via the `distant-core` crate. The
networking library, which is agnostic of `distant` protocols, can be used via
the `distant-net` crate.
## Installation
### Prebuilt Binaries
If you would like a pre-built binary, check out the
[releases section](https://github.com/chipsenkbeil/distant/releases).
### Building from Source
### Unix
If you have [`cargo`](https://github.com/rust-lang/cargo) installed, you can
directly download and build the source via:
```sh
# Need to include -L to follow redirects as this returns 301
curl -L https://sh.distant.dev | sh
```bash
cargo install distant
# Can also use wget to the same result
wget -q -O- https://sh.distant.dev | sh
```
Alternatively, you can clone this repository and build from source following
the [build guide](./BUILDING.md).
## Backend Feature Matrix
Distant supports multiple backends to facilitate remote communication with
another server. Today, these backends include:
* `distant` - a standalone server acting as the reference implementation
* `ssh` - a wrapper around an `ssh` client that translates the distant protocol
into ssh server requests
Not every backend supports every feature of distant. Below is a table outlining
the available features and which backend supports each feature:
| Feature | distant | ssh |
| --------------------- | --------| ----|
| Filesystem I/O | ✅ | ✅ |
| Filesystem Watching | ✅ | ✅ |
| Process Execution | ✅ | ✅ |
| Reconnect | ✅ | ❌ |
| Search | ✅ | ❌ |
| System Information | ✅ | ⚠ |
| Version | ✅ | ✅ |
* ✅ means full support
* ⚠ means partial support
* ❌ means no support
### Feature Details
* `Filesystem I/O` - able to read from and write to the filesystem
* `Filesystem Watching` - able to receive notifications when changes to the
filesystem occur
* `Process Execution` - able to execute processes
* `Reconnect` - able to reconnect after network outages
* `Search` - able to search the filesystem
* `System Information` - able to retrieve information about the system
* `Version` - able to report back version information
## Example
### Starting the manager
In order to facilitate communication between a client and server, you first
need to start the manager. This can be done in one of two ways:
1. Leverage the `service` functionality to spawn the manager using one of the
following supported service management platforms:
- [`sc.exe`](https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-server-2012-r2-and-2012/cc754599(v=ws.11)) for use with [Window Service](https://en.wikipedia.org/wiki/Windows_service) (Windows)
- [Launchd](https://en.wikipedia.org/wiki/Launchd) (MacOS)
- [systemd](https://en.wikipedia.org/wiki/Systemd) (Linux)
- [OpenRC](https://en.wikipedia.org/wiki/OpenRC) (Linux)
- [rc.d](https://en.wikipedia.org/wiki/Init#Research_Unix-style/BSD-style) (FreeBSD)
2. Run the manager manually by using the `listen` subcommand
#### Service management
```bash
# If you want to install the manager as a service, you can use the service
# interface available directly from the CLI
#
# By default, this will install a system-level service, which means that you
# will need elevated permissions to both install AND communicate with the
# manager
distant manager service install
# If you want to maintain a user-level manager service, you can include the
# --user flag. Note that this is only supported on MacOS (via launchd) and
# Linux (via systemd)
distant manager service install --user
# ........
# Once you have installed the service, you will normally need to start it
# manually or restart your machine to trigger startup on boot
distant manager service start # --user if you are working with user-level
```
#### Manual start
See https://distant.dev/getting-started/installation/unix/ for more details.
```bash
# If you choose to run the manager without a service management platform, you
# can either run the manager in the foreground or provide --daemon to spawn and
# detach the manager
### Windows
# Run in the foreground
distant manager listen
# Detach the manager where it will not terminate even if the parent exits
distant manager listen --daemon
```powershell
Set-ExecutionPolicy RemoteSigned -Scope CurrentUser # Optional: Needed to run a remote script the first time
irm sh.distant.dev | iex
```
### Interacting with a remote machine
See https://distant.dev/getting-started/installation/windows/ for more details.
Once you have a manager listening for client requests, you can begin
interacting with the manager, spawn and/or connect to servers, and interact
with remote machines.
## Usage
```bash
# Connect to my.example.com on port 22 via SSH and start a distant server
distant client launch ssh://my.example.com
# After the connection is established, you can perform different operations
# on the remote machine via `distant client action {command} [args]`
distant client action copy path/to/file new/path/to/file
distant client action spawn -- echo 'Hello, this is from the other side'
```sh
# Start a manager in the background
distant manager listen --daemon
# Opening a shell to the remote machine is trivial
distant client shell
# SSH into a server, start distant, and connect to the distant server
distant launch ssh://example.com
# If you have more than one connection open, you can switch between active
# connections by using the `select` subcommand
distant client select '<ID>'
# Read the current working directory
distant fs read .
# For programmatic use, a REPL following the JSON API is available
distant client repl --format json
# Start a shell on the remote machine
distant shell
```
See https://distant.dev/getting-started/usage/ for more details.
## License
This project is licensed under either of

@ -3,7 +3,7 @@ name = "distant-auth"
description = "Authentication library for distant, providing various implementations"
categories = ["authentication"]
keywords = ["auth", "authentication", "async"]
version = "0.20.0-alpha.8"
version = "0.20.0"
authors = ["Chip Senkbeil <chip@senkbeil.org>"]
edition = "2021"
homepage = "https://github.com/chipsenkbeil/distant"

@ -1,13 +1,13 @@
# distant auth
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.68.0][distant_rustc_img]][distant_rustc_lnk]
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.70.0][distant_rustc_img]][distant_rustc_lnk]
[distant_crates_img]: https://img.shields.io/crates/v/distant-auth.svg
[distant_crates_lnk]: https://crates.io/crates/distant-auth
[distant_doc_img]: https://docs.rs/distant-auth/badge.svg
[distant_doc_lnk]: https://docs.rs/distant-auth
[distant_rustc_img]: https://img.shields.io/badge/distant_auth-rustc_1.68+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/03/09/Rust-1.68.0.html
[distant_rustc_img]: https://img.shields.io/badge/distant_auth-rustc_1.70+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/06/01/Rust-1.70.0.html
## Details

@ -5,11 +5,13 @@ use async_trait::async_trait;
use crate::authenticator::Authenticator;
use crate::methods::AuthenticationMethod;
/// Authenticaton method for a static secret key
/// Authenticaton method that skips authentication and approves anything.
#[derive(Clone, Debug)]
pub struct NoneAuthenticationMethod;
impl NoneAuthenticationMethod {
pub const ID: &str = "none";
#[inline]
pub fn new() -> Self {
Self
@ -26,7 +28,7 @@ impl Default for NoneAuthenticationMethod {
#[async_trait]
impl AuthenticationMethod for NoneAuthenticationMethod {
fn id(&self) -> &'static str {
"none"
Self::ID
}
async fn authenticate(&self, _: &mut dyn Authenticator) -> io::Result<()> {

@ -14,6 +14,8 @@ pub struct StaticKeyAuthenticationMethod<T> {
}
impl<T> StaticKeyAuthenticationMethod<T> {
pub const ID: &str = "static_key";
#[inline]
pub fn new(key: T) -> Self {
Self { key }
@ -26,7 +28,7 @@ where
T: FromStr + PartialEq + Send + Sync,
{
fn id(&self) -> &'static str {
"static_key"
Self::ID
}
async fn authenticate(&self, authenticator: &mut dyn Authenticator) -> io::Result<()> {

@ -3,7 +3,7 @@ name = "distant-core"
description = "Core library for distant, enabling operation on a remote computer through file and process manipulation"
categories = ["network-programming"]
keywords = ["api", "async"]
version = "0.20.0-alpha.8"
version = "0.20.0"
authors = ["Chip Senkbeil <chip@senkbeil.org>"]
edition = "2021"
homepage = "https://github.com/chipsenkbeil/distant"
@ -16,8 +16,8 @@ async-trait = "0.1.68"
bitflags = "2.3.1"
bytes = "1.4.0"
derive_more = { version = "0.99.17", default-features = false, features = ["as_mut", "as_ref", "deref", "deref_mut", "display", "from", "error", "into", "into_iterator", "is_variant", "try_into"] }
distant-net = { version = "=0.20.0-alpha.8", path = "../distant-net" }
distant-protocol = { version = "=0.20.0-alpha.8", path = "../distant-protocol" }
distant-net = { version = "=0.20.0", path = "../distant-net" }
distant-protocol = { version = "=0.20.0", path = "../distant-protocol" }
futures = "0.3.28"
hex = "0.4.3"
log = "0.4.18"

@ -1,13 +1,13 @@
# distant core
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.68.0][distant_rustc_img]][distant_rustc_lnk]
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.70.0][distant_rustc_img]][distant_rustc_lnk]
[distant_crates_img]: https://img.shields.io/crates/v/distant-core.svg
[distant_crates_lnk]: https://crates.io/crates/distant-core
[distant_doc_img]: https://docs.rs/distant-core/badge.svg
[distant_doc_lnk]: https://docs.rs/distant-core
[distant_rustc_img]: https://img.shields.io/badge/distant_core-rustc_1.68+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/03/09/Rust-1.68.0.html
[distant_rustc_img]: https://img.shields.io/badge/distant_core-rustc_1.70+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/06/01/Rust-1.70.0.html
## Details

@ -4,7 +4,7 @@ use std::sync::Arc;
use async_trait::async_trait;
use distant_net::common::ConnectionId;
use distant_net::server::{ConnectionCtx, Reply, ServerCtx, ServerHandler};
use distant_net::server::{Reply, RequestCtx, ServerHandler};
use log::*;
use crate::protocol::{
@ -16,26 +16,25 @@ mod reply;
use reply::DistantSingleReply;
/// Represents the context provided to the [`DistantApi`] for incoming requests
pub struct DistantCtx<T> {
pub struct DistantCtx {
pub connection_id: ConnectionId,
pub reply: Box<dyn Reply<Data = protocol::Response>>,
pub local_data: Arc<T>,
}
/// Represents a [`ServerHandler`] that leverages an API compliant with `distant`
pub struct DistantApiServerHandler<T, D>
pub struct DistantApiServerHandler<T>
where
T: DistantApi<LocalData = D>,
T: DistantApi,
{
api: T,
api: Arc<T>,
}
impl<T, D> DistantApiServerHandler<T, D>
impl<T> DistantApiServerHandler<T>
where
T: DistantApi<LocalData = D>,
T: DistantApi,
{
pub fn new(api: T) -> Self {
Self { api }
Self { api: Arc::new(api) }
}
}
@ -51,12 +50,15 @@ fn unsupported<T>(label: &str) -> io::Result<T> {
/// which can be used to build other servers that are compatible with distant
#[async_trait]
pub trait DistantApi {
type LocalData: Send + Sync;
/// Invoked whenever a new connection is established.
#[allow(unused_variables)]
async fn on_connect(&self, id: ConnectionId) -> io::Result<()> {
Ok(())
}
/// Invoked whenever a new connection is established, providing a mutable reference to the
/// newly-created local data. This is a way to support modifying local data before it is used.
/// Invoked whenever an existing connection is dropped.
#[allow(unused_variables)]
async fn on_accept(&self, ctx: ConnectionCtx<'_, Self::LocalData>) -> io::Result<()> {
async fn on_disconnect(&self, id: ConnectionId) -> io::Result<()> {
Ok(())
}
@ -64,7 +66,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn version(&self, ctx: DistantCtx<Self::LocalData>) -> io::Result<Version> {
async fn version(&self, ctx: DistantCtx) -> io::Result<Version> {
unsupported("version")
}
@ -74,11 +76,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn read_file(
&self,
ctx: DistantCtx<Self::LocalData>,
path: PathBuf,
) -> io::Result<Vec<u8>> {
async fn read_file(&self, ctx: DistantCtx, path: PathBuf) -> io::Result<Vec<u8>> {
unsupported("read_file")
}
@ -88,11 +86,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn read_file_text(
&self,
ctx: DistantCtx<Self::LocalData>,
path: PathBuf,
) -> io::Result<String> {
async fn read_file_text(&self, ctx: DistantCtx, path: PathBuf) -> io::Result<String> {
unsupported("read_file_text")
}
@ -103,12 +97,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn write_file(
&self,
ctx: DistantCtx<Self::LocalData>,
path: PathBuf,
data: Vec<u8>,
) -> io::Result<()> {
async fn write_file(&self, ctx: DistantCtx, path: PathBuf, data: Vec<u8>) -> io::Result<()> {
unsupported("write_file")
}
@ -121,7 +110,7 @@ pub trait DistantApi {
#[allow(unused_variables)]
async fn write_file_text(
&self,
ctx: DistantCtx<Self::LocalData>,
ctx: DistantCtx,
path: PathBuf,
data: String,
) -> io::Result<()> {
@ -135,12 +124,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn append_file(
&self,
ctx: DistantCtx<Self::LocalData>,
path: PathBuf,
data: Vec<u8>,
) -> io::Result<()> {
async fn append_file(&self, ctx: DistantCtx, path: PathBuf, data: Vec<u8>) -> io::Result<()> {
unsupported("append_file")
}
@ -153,7 +137,7 @@ pub trait DistantApi {
#[allow(unused_variables)]
async fn append_file_text(
&self,
ctx: DistantCtx<Self::LocalData>,
ctx: DistantCtx,
path: PathBuf,
data: String,
) -> io::Result<()> {
@ -172,7 +156,7 @@ pub trait DistantApi {
#[allow(unused_variables)]
async fn read_dir(
&self,
ctx: DistantCtx<Self::LocalData>,
ctx: DistantCtx,
path: PathBuf,
depth: usize,
absolute: bool,
@ -189,12 +173,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn create_dir(
&self,
ctx: DistantCtx<Self::LocalData>,
path: PathBuf,
all: bool,
) -> io::Result<()> {
async fn create_dir(&self, ctx: DistantCtx, path: PathBuf, all: bool) -> io::Result<()> {
unsupported("create_dir")
}
@ -205,12 +184,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn copy(
&self,
ctx: DistantCtx<Self::LocalData>,
src: PathBuf,
dst: PathBuf,
) -> io::Result<()> {
async fn copy(&self, ctx: DistantCtx, src: PathBuf, dst: PathBuf) -> io::Result<()> {
unsupported("copy")
}
@ -221,12 +195,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn remove(
&self,
ctx: DistantCtx<Self::LocalData>,
path: PathBuf,
force: bool,
) -> io::Result<()> {
async fn remove(&self, ctx: DistantCtx, path: PathBuf, force: bool) -> io::Result<()> {
unsupported("remove")
}
@ -237,12 +206,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn rename(
&self,
ctx: DistantCtx<Self::LocalData>,
src: PathBuf,
dst: PathBuf,
) -> io::Result<()> {
async fn rename(&self, ctx: DistantCtx, src: PathBuf, dst: PathBuf) -> io::Result<()> {
unsupported("rename")
}
@ -257,7 +221,7 @@ pub trait DistantApi {
#[allow(unused_variables)]
async fn watch(
&self,
ctx: DistantCtx<Self::LocalData>,
ctx: DistantCtx,
path: PathBuf,
recursive: bool,
only: Vec<ChangeKind>,
@ -272,7 +236,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn unwatch(&self, ctx: DistantCtx<Self::LocalData>, path: PathBuf) -> io::Result<()> {
async fn unwatch(&self, ctx: DistantCtx, path: PathBuf) -> io::Result<()> {
unsupported("unwatch")
}
@ -282,7 +246,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn exists(&self, ctx: DistantCtx<Self::LocalData>, path: PathBuf) -> io::Result<bool> {
async fn exists(&self, ctx: DistantCtx, path: PathBuf) -> io::Result<bool> {
unsupported("exists")
}
@ -296,7 +260,7 @@ pub trait DistantApi {
#[allow(unused_variables)]
async fn metadata(
&self,
ctx: DistantCtx<Self::LocalData>,
ctx: DistantCtx,
path: PathBuf,
canonicalize: bool,
resolve_file_type: bool,
@ -314,7 +278,7 @@ pub trait DistantApi {
#[allow(unused_variables)]
async fn set_permissions(
&self,
ctx: DistantCtx<Self::LocalData>,
ctx: DistantCtx,
path: PathBuf,
permissions: Permissions,
options: SetPermissionsOptions,
@ -328,11 +292,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn search(
&self,
ctx: DistantCtx<Self::LocalData>,
query: SearchQuery,
) -> io::Result<SearchId> {
async fn search(&self, ctx: DistantCtx, query: SearchQuery) -> io::Result<SearchId> {
unsupported("search")
}
@ -342,11 +302,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn cancel_search(
&self,
ctx: DistantCtx<Self::LocalData>,
id: SearchId,
) -> io::Result<()> {
async fn cancel_search(&self, ctx: DistantCtx, id: SearchId) -> io::Result<()> {
unsupported("cancel_search")
}
@ -361,7 +317,7 @@ pub trait DistantApi {
#[allow(unused_variables)]
async fn proc_spawn(
&self,
ctx: DistantCtx<Self::LocalData>,
ctx: DistantCtx,
cmd: String,
environment: Environment,
current_dir: Option<PathBuf>,
@ -376,7 +332,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn proc_kill(&self, ctx: DistantCtx<Self::LocalData>, id: ProcessId) -> io::Result<()> {
async fn proc_kill(&self, ctx: DistantCtx, id: ProcessId) -> io::Result<()> {
unsupported("proc_kill")
}
@ -387,12 +343,7 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn proc_stdin(
&self,
ctx: DistantCtx<Self::LocalData>,
id: ProcessId,
data: Vec<u8>,
) -> io::Result<()> {
async fn proc_stdin(&self, ctx: DistantCtx, id: ProcessId, data: Vec<u8>) -> io::Result<()> {
unsupported("proc_stdin")
}
@ -405,7 +356,7 @@ pub trait DistantApi {
#[allow(unused_variables)]
async fn proc_resize_pty(
&self,
ctx: DistantCtx<Self::LocalData>,
ctx: DistantCtx,
id: ProcessId,
size: PtySize,
) -> io::Result<()> {
@ -416,32 +367,34 @@ pub trait DistantApi {
///
/// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)]
async fn system_info(&self, ctx: DistantCtx<Self::LocalData>) -> io::Result<SystemInfo> {
async fn system_info(&self, ctx: DistantCtx) -> io::Result<SystemInfo> {
unsupported("system_info")
}
}
#[async_trait]
impl<T, D> ServerHandler for DistantApiServerHandler<T, D>
impl<T> ServerHandler for DistantApiServerHandler<T>
where
T: DistantApi<LocalData = D> + Send + Sync,
D: Send + Sync,
T: DistantApi + Send + Sync + 'static,
{
type LocalData = D;
type Request = protocol::Msg<protocol::Request>;
type Response = protocol::Msg<protocol::Response>;
/// Overridden to leverage [`DistantApi`] implementation of `on_accept`
async fn on_accept(&self, ctx: ConnectionCtx<'_, Self::LocalData>) -> io::Result<()> {
T::on_accept(&self.api, ctx).await
/// Overridden to leverage [`DistantApi`] implementation of `on_connect`.
async fn on_connect(&self, id: ConnectionId) -> io::Result<()> {
T::on_connect(&self.api, id).await
}
async fn on_request(&self, ctx: ServerCtx<Self::Request, Self::Response, Self::LocalData>) {
let ServerCtx {
/// Overridden to leverage [`DistantApi`] implementation of `on_disconnect`.
async fn on_disconnect(&self, id: ConnectionId) -> io::Result<()> {
T::on_disconnect(&self.api, id).await
}
async fn on_request(&self, ctx: RequestCtx<Self::Request, Self::Response>) {
let RequestCtx {
connection_id,
request,
reply,
local_data,
} = ctx;
// Convert our reply to a queued reply so we can ensure that the result
@ -454,10 +407,9 @@ where
let ctx = DistantCtx {
connection_id,
reply: Box::new(DistantSingleReply::from(reply.clone_reply())),
local_data,
};
let data = handle_request(self, ctx, data).await;
let data = handle_request(Arc::clone(&self.api), ctx, data).await;
// Report outgoing errors in our debug logs
if let protocol::Response::Error(x) = &data {
@ -466,27 +418,34 @@ where
protocol::Msg::Single(data)
}
protocol::Msg::Batch(list) => {
protocol::Msg::Batch(list)
if matches!(request.header.get_as("sequence"), Some(Ok(true))) =>
{
let mut out = Vec::new();
let mut has_failed = false;
for data in list {
// Once we hit a failure, all remaining requests return interrupted
if has_failed {
out.push(protocol::Response::Error(protocol::Error {
kind: protocol::ErrorKind::Interrupted,
description: String::from("Canceled due to earlier error"),
}));
continue;
}
let ctx = DistantCtx {
connection_id,
reply: Box::new(DistantSingleReply::from(reply.clone_reply())),
local_data: Arc::clone(&local_data),
};
// TODO: This does not run in parallel, meaning that the next item in the
// batch will not be queued until the previous item completes! This
// would be useful if we wanted to chain requests where the previous
// request feeds into the current request, but not if we just want
// to run everything together. So we should instead rewrite this
// to spawn a task per request and then await completion of all tasks
let data = handle_request(self, ctx, data).await;
let data = handle_request(Arc::clone(&self.api), ctx, data).await;
// Report outgoing errors in our debug logs
// Report outgoing errors in our debug logs and mark as failed
// to cancel any future tasks being run
if let protocol::Response::Error(x) = &data {
debug!("[Conn {}] {}", connection_id, x);
has_failed = true;
}
out.push(data);
@ -494,17 +453,54 @@ where
protocol::Msg::Batch(out)
}
protocol::Msg::Batch(list) => {
let mut tasks = Vec::new();
// If sequence specified as true, we want to process in order, otherwise we can
// process in any order
for data in list {
let api = Arc::clone(&self.api);
let ctx = DistantCtx {
connection_id,
reply: Box::new(DistantSingleReply::from(reply.clone_reply())),
};
let task = tokio::spawn(async move {
let data = handle_request(api, ctx, data).await;
// Report outgoing errors in our debug logs
if let protocol::Response::Error(x) = &data {
debug!("[Conn {}] {}", connection_id, x);
}
data
});
tasks.push(task);
}
let out = futures::future::join_all(tasks)
.await
.into_iter()
.map(|x| match x {
Ok(x) => x,
Err(x) => protocol::Response::Error(x.to_string().into()),
})
.collect();
protocol::Msg::Batch(out)
}
};
// Queue up our result to go before ANY of the other messages that might be sent.
// This is important to avoid situations such as when a process is started, but before
// the confirmation can be sent some stdout or stderr is captured and sent first.
if let Err(x) = reply.send_before(response).await {
if let Err(x) = reply.send_before(response) {
error!("[Conn {}] Failed to send response: {}", connection_id, x);
}
// Flush out all of our replies thus far and toggle to no longer hold submissions
if let Err(x) = reply.flush(false).await {
if let Err(x) = reply.flush(false) {
error!(
"[Conn {}] Failed to flush response queue: {}",
connection_id, x
@ -514,54 +510,46 @@ where
}
/// Processes an incoming request
async fn handle_request<T, D>(
server: &DistantApiServerHandler<T, D>,
ctx: DistantCtx<D>,
async fn handle_request<T>(
api: Arc<T>,
ctx: DistantCtx,
request: protocol::Request,
) -> protocol::Response
where
T: DistantApi<LocalData = D> + Send + Sync,
D: Send + Sync,
T: DistantApi + Send + Sync,
{
match request {
protocol::Request::Version {} => server
.api
protocol::Request::Version {} => api
.version(ctx)
.await
.map(protocol::Response::Version)
.unwrap_or_else(protocol::Response::from),
protocol::Request::FileRead { path } => server
.api
protocol::Request::FileRead { path } => api
.read_file(ctx, path)
.await
.map(|data| protocol::Response::Blob { data })
.unwrap_or_else(protocol::Response::from),
protocol::Request::FileReadText { path } => server
.api
protocol::Request::FileReadText { path } => api
.read_file_text(ctx, path)
.await
.map(|data| protocol::Response::Text { data })
.unwrap_or_else(protocol::Response::from),
protocol::Request::FileWrite { path, data } => server
.api
protocol::Request::FileWrite { path, data } => api
.write_file(ctx, path, data)
.await
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::FileWriteText { path, text } => server
.api
protocol::Request::FileWriteText { path, text } => api
.write_file_text(ctx, path, text)
.await
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::FileAppend { path, data } => server
.api
protocol::Request::FileAppend { path, data } => api
.append_file(ctx, path, data)
.await
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::FileAppendText { path, text } => server
.api
protocol::Request::FileAppendText { path, text } => api
.append_file_text(ctx, path, text)
.await
.map(|_| protocol::Response::Ok)
@ -572,8 +560,7 @@ where
absolute,
canonicalize,
include_root,
} => server
.api
} => api
.read_dir(ctx, path, depth, absolute, canonicalize, include_root)
.await
.map(|(entries, errors)| protocol::Response::DirEntries {
@ -581,26 +568,22 @@ where
errors: errors.into_iter().map(Error::from).collect(),
})
.unwrap_or_else(protocol::Response::from),
protocol::Request::DirCreate { path, all } => server
.api
protocol::Request::DirCreate { path, all } => api
.create_dir(ctx, path, all)
.await
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::Remove { path, force } => server
.api
protocol::Request::Remove { path, force } => api
.remove(ctx, path, force)
.await
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::Copy { src, dst } => server
.api
protocol::Request::Copy { src, dst } => api
.copy(ctx, src, dst)
.await
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::Rename { src, dst } => server
.api
protocol::Request::Rename { src, dst } => api
.rename(ctx, src, dst)
.await
.map(|_| protocol::Response::Ok)
@ -610,20 +593,17 @@ where
recursive,
only,
except,
} => server
.api
} => api
.watch(ctx, path, recursive, only, except)
.await
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::Unwatch { path } => server
.api
protocol::Request::Unwatch { path } => api
.unwatch(ctx, path)
.await
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::Exists { path } => server
.api
protocol::Request::Exists { path } => api
.exists(ctx, path)
.await
.map(|value| protocol::Response::Exists { value })
@ -632,8 +612,7 @@ where
path,
canonicalize,
resolve_file_type,
} => server
.api
} => api
.metadata(ctx, path, canonicalize, resolve_file_type)
.await
.map(protocol::Response::Metadata)
@ -642,20 +621,17 @@ where
path,
permissions,
options,
} => server
.api
} => api
.set_permissions(ctx, path, permissions, options)
.await
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::Search { query } => server
.api
protocol::Request::Search { query } => api
.search(ctx, query)
.await
.map(|id| protocol::Response::SearchStarted { id })
.unwrap_or_else(protocol::Response::from),
protocol::Request::CancelSearch { id } => server
.api
protocol::Request::CancelSearch { id } => api
.cancel_search(ctx, id)
.await
.map(|_| protocol::Response::Ok)
@ -665,32 +641,27 @@ where
environment,
current_dir,
pty,
} => server
.api
} => api
.proc_spawn(ctx, cmd.into(), environment, current_dir, pty)
.await
.map(|id| protocol::Response::ProcSpawned { id })
.unwrap_or_else(protocol::Response::from),
protocol::Request::ProcKill { id } => server
.api
protocol::Request::ProcKill { id } => api
.proc_kill(ctx, id)
.await
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::ProcStdin { id, data } => server
.api
protocol::Request::ProcStdin { id, data } => api
.proc_stdin(ctx, id, data)
.await
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::ProcResizePty { id, size } => server
.api
protocol::Request::ProcResizePty { id, size } => api
.proc_resize_pty(ctx, id, size)
.await
.map(|_| protocol::Response::Ok)
.unwrap_or_else(protocol::Response::from),
protocol::Request::SystemInfo {} => server
.api
protocol::Request::SystemInfo {} => api
.system_info(ctx)
.await
.map(protocol::Response::SystemInfo)

@ -1,6 +1,4 @@
use std::future::Future;
use std::io;
use std::pin::Pin;
use distant_net::server::Reply;
@ -19,14 +17,10 @@ impl From<Box<dyn Reply<Data = protocol::Msg<protocol::Response>>>> for DistantS
impl Reply for DistantSingleReply {
type Data = protocol::Response;
fn send(&self, data: Self::Data) -> Pin<Box<dyn Future<Output = io::Result<()>> + Send + '_>> {
fn send(&self, data: Self::Data) -> io::Result<()> {
self.0.send(protocol::Msg::Single(data))
}
fn blocking_send(&self, data: Self::Data) -> io::Result<()> {
self.0.blocking_send(protocol::Msg::Single(data))
}
fn clone_reply(&self) -> Box<dyn Reply<Data = Self::Data>> {
Box::new(Self(self.0.clone_reply()))
}

@ -44,8 +44,12 @@ pub trait DistantChannelExt {
/// Creates a remote directory, optionally creating all parent components if specified
fn create_dir(&mut self, path: impl Into<PathBuf>, all: bool) -> AsyncReturn<'_, ()>;
/// Checks whether the `path` exists on the remote machine
fn exists(&mut self, path: impl Into<PathBuf>) -> AsyncReturn<'_, bool>;
/// Checks whether this client is compatible with the remote server
fn is_compatible(&mut self) -> AsyncReturn<'_, bool>;
/// Retrieves metadata about a path on a remote machine
fn metadata(
&mut self,
@ -136,6 +140,9 @@ pub trait DistantChannelExt {
/// Retrieves server version information
fn version(&mut self) -> AsyncReturn<'_, Version>;
/// Returns version of protocol that the client uses
fn protocol_version(&self) -> protocol::semver::Version;
/// Writes a remote file with the data from a collection of bytes
fn write_file(
&mut self,
@ -232,6 +239,15 @@ impl DistantChannelExt
)
}
fn is_compatible(&mut self) -> AsyncReturn<'_, bool> {
make_body!(self, protocol::Request::Version {}, |data| match data {
protocol::Response::Version(version) =>
Ok(protocol::is_compatible_with(&version.protocol_version)),
protocol::Response::Error(x) => Err(io::Error::from(x)),
_ => Err(mismatched_response()),
})
}
fn metadata(
&mut self,
path: impl Into<PathBuf>,
@ -453,6 +469,10 @@ impl DistantChannelExt
})
}
fn protocol_version(&self) -> protocol::semver::Version {
protocol::PROTOCOL_VERSION
}
fn write_file(
&mut self,
path: impl Into<PathBuf>,

@ -265,13 +265,13 @@ mod tests {
protocol::Response::Changed(Change {
timestamp: 0,
kind: ChangeKind::Access,
paths: vec![test_path.to_path_buf()],
path: test_path.to_path_buf(),
details: Default::default(),
}),
protocol::Response::Changed(Change {
timestamp: 1,
kind: ChangeKind::Modify,
paths: vec![test_path.to_path_buf()],
path: test_path.to_path_buf(),
details: Default::default(),
}),
],
@ -286,7 +286,7 @@ mod tests {
Change {
timestamp: 0,
kind: ChangeKind::Access,
paths: vec![test_path.to_path_buf()],
path: test_path.to_path_buf(),
details: Default::default(),
}
);
@ -297,7 +297,7 @@ mod tests {
Change {
timestamp: 1,
kind: ChangeKind::Modify,
paths: vec![test_path.to_path_buf()],
path: test_path.to_path_buf(),
details: Default::default(),
}
);
@ -340,7 +340,7 @@ mod tests {
protocol::Response::Changed(Change {
timestamp: 0,
kind: ChangeKind::Access,
paths: vec![test_path.to_path_buf()],
path: test_path.to_path_buf(),
details: Default::default(),
}),
))
@ -354,7 +354,7 @@ mod tests {
protocol::Response::Changed(Change {
timestamp: 1,
kind: ChangeKind::Modify,
paths: vec![test_path.to_path_buf()],
path: test_path.to_path_buf(),
details: Default::default(),
}),
))
@ -368,7 +368,7 @@ mod tests {
protocol::Response::Changed(Change {
timestamp: 2,
kind: ChangeKind::Delete,
paths: vec![test_path.to_path_buf()],
path: test_path.to_path_buf(),
details: Default::default(),
}),
))
@ -382,7 +382,7 @@ mod tests {
Change {
timestamp: 0,
kind: ChangeKind::Access,
paths: vec![test_path.to_path_buf()],
path: test_path.to_path_buf(),
details: Default::default(),
}
);
@ -393,7 +393,7 @@ mod tests {
Change {
timestamp: 2,
kind: ChangeKind::Delete,
paths: vec![test_path.to_path_buf()],
path: test_path.to_path_buf(),
details: Default::default(),
}
);
@ -434,19 +434,19 @@ mod tests {
protocol::Response::Changed(Change {
timestamp: 0,
kind: ChangeKind::Access,
paths: vec![test_path.to_path_buf()],
path: test_path.to_path_buf(),
details: Default::default(),
}),
protocol::Response::Changed(Change {
timestamp: 1,
kind: ChangeKind::Modify,
paths: vec![test_path.to_path_buf()],
path: test_path.to_path_buf(),
details: Default::default(),
}),
protocol::Response::Changed(Change {
timestamp: 2,
kind: ChangeKind::Delete,
paths: vec![test_path.to_path_buf()],
path: test_path.to_path_buf(),
details: Default::default(),
}),
],
@ -473,7 +473,7 @@ mod tests {
Change {
timestamp: 0,
kind: ChangeKind::Access,
paths: vec![test_path.to_path_buf()],
path: test_path.to_path_buf(),
details: Default::default(),
}
);
@ -498,7 +498,7 @@ mod tests {
protocol::Response::Changed(Change {
timestamp: 3,
kind: ChangeKind::Unknown,
paths: vec![test_path.to_path_buf()],
path: test_path.to_path_buf(),
details: Default::default(),
}),
))
@ -512,7 +512,7 @@ mod tests {
Some(Change {
timestamp: 1,
kind: ChangeKind::Modify,
paths: vec![test_path.to_path_buf()],
path: test_path.to_path_buf(),
details: Default::default(),
})
);
@ -521,7 +521,7 @@ mod tests {
Some(Change {
timestamp: 2,
kind: ChangeKind::Delete,
paths: vec![test_path.to_path_buf()],
path: test_path.to_path_buf(),
details: Default::default(),
})
);

@ -0,0 +1,325 @@
use std::io;
use std::path::PathBuf;
use async_trait::async_trait;
use distant_core::{
DistantApi, DistantApiServerHandler, DistantChannelExt, DistantClient, DistantCtx,
};
use distant_net::auth::{DummyAuthHandler, Verifier};
use distant_net::client::Client;
use distant_net::common::{InmemoryTransport, OneshotListener, Version};
use distant_net::server::{Server, ServerRef};
use distant_protocol::PROTOCOL_VERSION;
/// Stands up an inmemory client and server using the given api.
async fn setup(api: impl DistantApi + Send + Sync + 'static) -> (DistantClient, ServerRef) {
let (t1, t2) = InmemoryTransport::pair(100);
let server = Server::new()
.handler(DistantApiServerHandler::new(api))
.verifier(Verifier::none())
.version(Version::new(
PROTOCOL_VERSION.major,
PROTOCOL_VERSION.minor,
PROTOCOL_VERSION.patch,
))
.start(OneshotListener::from_value(t2))
.expect("Failed to start server");
let client: DistantClient = Client::build()
.auth_handler(DummyAuthHandler)
.connector(t1)
.version(Version::new(
PROTOCOL_VERSION.major,
PROTOCOL_VERSION.minor,
PROTOCOL_VERSION.patch,
))
.connect()
.await
.expect("Failed to connect to server");
(client, server)
}
mod single {
use test_log::test;
use super::*;
#[test(tokio::test)]
async fn should_support_single_request_returning_error() {
struct TestDistantApi;
#[async_trait]
impl DistantApi for TestDistantApi {
async fn read_file(&self, _ctx: DistantCtx, _path: PathBuf) -> io::Result<Vec<u8>> {
Err(io::Error::new(io::ErrorKind::NotFound, "test error"))
}
}
let (mut client, _server) = setup(TestDistantApi).await;
let error = client.read_file(PathBuf::from("file")).await.unwrap_err();
assert_eq!(error.kind(), io::ErrorKind::NotFound);
assert_eq!(error.to_string(), "test error");
}
#[test(tokio::test)]
async fn should_support_single_request_returning_success() {
struct TestDistantApi;
#[async_trait]
impl DistantApi for TestDistantApi {
async fn read_file(&self, _ctx: DistantCtx, _path: PathBuf) -> io::Result<Vec<u8>> {
Ok(b"hello world".to_vec())
}
}
let (mut client, _server) = setup(TestDistantApi).await;
let contents = client.read_file(PathBuf::from("file")).await.unwrap();
assert_eq!(contents, b"hello world");
}
}
mod batch_parallel {
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use distant_net::common::Request;
use distant_protocol::{Msg, Request as RequestPayload};
use test_log::test;
use super::*;
#[test(tokio::test)]
async fn should_support_multiple_requests_running_in_parallel() {
struct TestDistantApi;
#[async_trait]
impl DistantApi for TestDistantApi {
async fn read_file(&self, _ctx: DistantCtx, path: PathBuf) -> io::Result<Vec<u8>> {
if path.to_str().unwrap() == "slow" {
tokio::time::sleep(Duration::from_millis(500)).await;
}
let time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
Ok((time.as_millis() as u64).to_be_bytes().to_vec())
}
}
let (mut client, _server) = setup(TestDistantApi).await;
let request = Request::new(Msg::batch([
RequestPayload::FileRead {
path: PathBuf::from("file1"),
},
RequestPayload::FileRead {
path: PathBuf::from("slow"),
},
RequestPayload::FileRead {
path: PathBuf::from("file2"),
},
]));
let response = client.send(request).await.unwrap();
let payloads = response.payload.into_batch().unwrap();
// Collect our times from the reading
let mut times = Vec::new();
for payload in payloads {
match payload {
distant_protocol::Response::Blob { data } => {
let mut buf = [0u8; 8];
buf.copy_from_slice(&data[..8]);
times.push(u64::from_be_bytes(buf));
}
x => panic!("Unexpected payload: {x:?}"),
}
}
// Verify that these ran in parallel as the first and third requests should not be
// over 500 milliseconds apart due to the sleep in the middle!
let diff = times[0].abs_diff(times[2]);
assert!(diff <= 500, "Sequential ordering detected");
}
#[test(tokio::test)]
async fn should_run_all_requests_even_if_some_fail() {
struct TestDistantApi;
#[async_trait]
impl DistantApi for TestDistantApi {
async fn read_file(&self, _ctx: DistantCtx, path: PathBuf) -> io::Result<Vec<u8>> {
if path.to_str().unwrap() == "fail" {
return Err(io::Error::new(io::ErrorKind::Other, "test error"));
}
Ok(Vec::new())
}
}
let (mut client, _server) = setup(TestDistantApi).await;
let request = Request::new(Msg::batch([
RequestPayload::FileRead {
path: PathBuf::from("file1"),
},
RequestPayload::FileRead {
path: PathBuf::from("fail"),
},
RequestPayload::FileRead {
path: PathBuf::from("file2"),
},
]));
let response = client.send(request).await.unwrap();
let payloads = response.payload.into_batch().unwrap();
// Should be a success, error, and success
assert!(
matches!(payloads[0], distant_protocol::Response::Blob { .. }),
"Unexpected payloads[0]: {:?}",
payloads[0]
);
assert!(
matches!(
&payloads[1],
distant_protocol::Response::Error(distant_protocol::Error { kind, description })
if matches!(kind, distant_protocol::ErrorKind::Other) && description == "test error"
),
"Unexpected payloads[1]: {:?}",
payloads[1]
);
assert!(
matches!(payloads[2], distant_protocol::Response::Blob { .. }),
"Unexpected payloads[2]: {:?}",
payloads[2]
);
}
}
mod batch_sequence {
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use distant_net::common::Request;
use distant_protocol::{Msg, Request as RequestPayload};
use test_log::test;
use super::*;
#[test(tokio::test)]
async fn should_support_multiple_requests_running_in_sequence() {
struct TestDistantApi;
#[async_trait]
impl DistantApi for TestDistantApi {
async fn read_file(&self, _ctx: DistantCtx, path: PathBuf) -> io::Result<Vec<u8>> {
if path.to_str().unwrap() == "slow" {
tokio::time::sleep(Duration::from_millis(500)).await;
}
let time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
Ok((time.as_millis() as u64).to_be_bytes().to_vec())
}
}
let (mut client, _server) = setup(TestDistantApi).await;
let mut request = Request::new(Msg::batch([
RequestPayload::FileRead {
path: PathBuf::from("file1"),
},
RequestPayload::FileRead {
path: PathBuf::from("slow"),
},
RequestPayload::FileRead {
path: PathBuf::from("file2"),
},
]));
// Mark as running in sequence
request.header.insert("sequence", true);
let response = client.send(request).await.unwrap();
let payloads = response.payload.into_batch().unwrap();
// Collect our times from the reading
let mut times = Vec::new();
for payload in payloads {
match payload {
distant_protocol::Response::Blob { data } => {
let mut buf = [0u8; 8];
buf.copy_from_slice(&data[..8]);
times.push(u64::from_be_bytes(buf));
}
x => panic!("Unexpected payload: {x:?}"),
}
}
// Verify that these ran in sequence as the first and third requests should be
// over 500 milliseconds apart due to the sleep in the middle!
let diff = times[0].abs_diff(times[2]);
assert!(diff > 500, "Parallel ordering detected");
}
#[test(tokio::test)]
async fn should_interrupt_any_requests_following_a_failure() {
struct TestDistantApi;
#[async_trait]
impl DistantApi for TestDistantApi {
async fn read_file(&self, _ctx: DistantCtx, path: PathBuf) -> io::Result<Vec<u8>> {
if path.to_str().unwrap() == "fail" {
return Err(io::Error::new(io::ErrorKind::Other, "test error"));
}
Ok(Vec::new())
}
}
let (mut client, _server) = setup(TestDistantApi).await;
let mut request = Request::new(Msg::batch([
RequestPayload::FileRead {
path: PathBuf::from("file1"),
},
RequestPayload::FileRead {
path: PathBuf::from("fail"),
},
RequestPayload::FileRead {
path: PathBuf::from("file2"),
},
]));
// Mark as running in sequence
request.header.insert("sequence", true);
let response = client.send(request).await.unwrap();
let payloads = response.payload.into_batch().unwrap();
// Should be a success, error, and interrupt
assert!(
matches!(payloads[0], distant_protocol::Response::Blob { .. }),
"Unexpected payloads[0]: {:?}",
payloads[0]
);
assert!(
matches!(
&payloads[1],
distant_protocol::Response::Error(distant_protocol::Error { kind, description })
if matches!(kind, distant_protocol::ErrorKind::Other) && description == "test error"
),
"Unexpected payloads[1]: {:?}",
payloads[1]
);
assert!(
matches!(
&payloads[2],
distant_protocol::Response::Error(distant_protocol::Error { kind, .. })
if matches!(kind, distant_protocol::ErrorKind::Interrupted)
),
"Unexpected payloads[2]: {:?}",
payloads[2]
);
}
}

@ -2,7 +2,7 @@
name = "distant-local"
description = "Library implementing distant API for local interactions"
categories = ["network-programming"]
version = "0.20.0-alpha.8"
version = "0.20.0"
authors = ["Chip Senkbeil <chip@senkbeil.org>"]
edition = "2021"
homepage = "https://github.com/chipsenkbeil/distant"
@ -21,7 +21,7 @@ macos-kqueue = ["notify/macos_kqueue"]
[dependencies]
async-trait = "0.1.68"
distant-core = { version = "=0.20.0-alpha.8", path = "../distant-core" }
distant-core = { version = "=0.20.0", path = "../distant-core" }
grep = "0.2.12"
ignore = "0.4.20"
log = "0.4.18"

@ -1,13 +1,13 @@
# distant local
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.68.0][distant_rustc_img]][distant_rustc_lnk]
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.70.0][distant_rustc_img]][distant_rustc_lnk]
[distant_crates_img]: https://img.shields.io/crates/v/distant-local.svg
[distant_crates_lnk]: https://crates.io/crates/distant-local
[distant_doc_img]: https://docs.rs/distant-local/badge.svg
[distant_doc_lnk]: https://docs.rs/distant-local
[distant_rustc_img]: https://img.shields.io/badge/distant_local-rustc_1.68+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/03/09/Rust-1.68.0.html
[distant_rustc_img]: https://img.shields.io/badge/distant_local-rustc_1.70+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/06/01/Rust-1.70.0.html
## Details

File diff suppressed because it is too large Load Diff

@ -85,6 +85,7 @@ impl ProcessInstance {
let args = cmd_and_args.split_off(1);
let cmd = cmd_and_args.into_iter().next().unwrap();
debug!("Spawning process: {cmd} {args:?}");
let mut child: Box<dyn Process> = match pty {
Some(size) => Box::new(PtyProcess::spawn(
cmd.clone(),
@ -173,7 +174,7 @@ async fn stdout_task(
loop {
match stdout.recv().await {
Ok(Some(data)) => {
reply.send(Response::ProcStdout { id, data }).await?;
reply.send(Response::ProcStdout { id, data })?;
}
Ok(None) => return Ok(()),
Err(x) => return Err(x),
@ -189,7 +190,7 @@ async fn stderr_task(
loop {
match stderr.recv().await {
Ok(Some(data)) => {
reply.send(Response::ProcStderr { id, data }).await?;
reply.send(Response::ProcStderr { id, data })?;
}
Ok(None) => return Ok(()),
Err(x) => return Err(x),
@ -205,15 +206,11 @@ async fn wait_task(
let status = child.wait().await;
match status {
Ok(status) => {
reply
.send(Response::ProcDone {
id,
success: status.success,
code: status.code,
})
.await
}
Err(x) => reply.send(Response::from(x)).await,
Ok(status) => reply.send(Response::ProcDone {
id,
success: status.success,
code: status.code,
}),
Err(x) => reply.send(Response::from(x)),
}
}

@ -137,7 +137,11 @@ async fn search_task(tx: mpsc::Sender<InnerSearchMsg>, mut rx: mpsc::Receiver<In
Ok(executor) => executor,
Err(x) => {
let _ = cb.send(Err(x));
return;
// NOTE: We do not want to exit our task! This processes all of our search
// requests, so if we exit, things have gone terrible. This is just a
// regular error, so we merely continue to wait for the next request.
continue;
}
};
@ -224,13 +228,10 @@ impl SearchQueryReporter {
if let Some(len) = options.pagination {
if matches.len() as u64 >= len {
trace!("[Query {id}] Reached {len} paginated matches");
if let Err(x) = reply
.send(Response::SearchResults {
id,
matches: std::mem::take(&mut matches),
})
.await
{
if let Err(x) = reply.send(Response::SearchResults {
id,
matches: std::mem::take(&mut matches),
}) {
error!("[Query {id}] Failed to send paginated matches: {x}");
}
}
@ -240,14 +241,14 @@ impl SearchQueryReporter {
// Send any remaining matches
if !matches.is_empty() {
trace!("[Query {id}] Sending {} remaining matches", matches.len());
if let Err(x) = reply.send(Response::SearchResults { id, matches }).await {
if let Err(x) = reply.send(Response::SearchResults { id, matches }) {
error!("[Query {id}] Failed to send final matches: {x}");
}
}
// Report that we are done
trace!("[Query {id}] Reporting as done");
if let Err(x) = reply.send(Response::SearchDone { id }).await {
if let Err(x) = reply.send(Response::SearchDone { id }) {
error!("[Query {id}] Failed to send done status: {x}");
}
}
@ -344,6 +345,13 @@ impl SearchQueryExecutor {
.build()
.map_err(|x| io::Error::new(io::ErrorKind::Other, x))?,
)
.standard_filters(false)
.hidden(query.options.ignore_hidden)
.ignore(query.options.use_ignore_files)
.parents(query.options.use_parent_ignore_files)
.git_ignore(query.options.use_git_ignore_files)
.git_global(query.options.use_global_git_ignore_files)
.git_exclude(query.options.use_git_exclude_files)
.skip_stdout(true);
if query.options.upward {
@ -842,7 +850,7 @@ mod tests {
let root = setup_dir(Vec::new());
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
let query = SearchQuery {
paths: vec![root.path().to_path_buf()],
@ -869,7 +877,7 @@ mod tests {
]);
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
let query = SearchQuery {
paths: vec![root.path().to_path_buf()],
@ -946,7 +954,7 @@ mod tests {
]);
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
let query = SearchQuery {
paths: vec![root.path().to_path_buf()],
@ -1021,7 +1029,7 @@ mod tests {
]);
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
let query = SearchQuery {
paths: vec![root.path().to_path_buf()],
@ -1089,7 +1097,7 @@ mod tests {
let root = setup_dir(vec![("path/to/file.txt", "aa ab ac\nba bb bc\nca cb cc")]);
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
let query = SearchQuery {
paths: vec![root.path().to_path_buf()],
@ -1183,7 +1191,7 @@ mod tests {
]);
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
let query = SearchQuery {
paths: vec![root.path().to_path_buf()],
@ -1276,7 +1284,7 @@ mod tests {
]);
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
let query = SearchQuery {
paths: vec![root.path().to_path_buf()],
@ -1310,7 +1318,7 @@ mod tests {
]);
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
let query = SearchQuery {
paths: vec![root.path().to_path_buf()],
@ -1353,7 +1361,7 @@ mod tests {
expected_paths: Vec<PathBuf>,
) {
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
let query = SearchQuery {
paths: vec![root.path().to_path_buf()],
target: SearchQueryTarget::Path,
@ -1441,7 +1449,7 @@ mod tests {
]);
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
let query = SearchQuery {
paths: vec![root.path().to_path_buf()],
@ -1493,7 +1501,7 @@ mod tests {
]);
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
let query = SearchQuery {
paths: vec![root.path().to_path_buf()],
@ -1559,7 +1567,7 @@ mod tests {
.unwrap();
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
// NOTE: We provide regex that matches an invalid UTF-8 character by disabling the u flag
// and checking for 0x9F (159)
@ -1611,7 +1619,7 @@ mod tests {
.unwrap();
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
// NOTE: We provide regex that matches an invalid UTF-8 character by disabling the u flag
// and checking for 0x9F (159)
@ -1647,7 +1655,7 @@ mod tests {
expected_paths: Vec<PathBuf>,
) {
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
let query = SearchQuery {
paths: vec![root.path().to_path_buf()],
@ -1731,7 +1739,7 @@ mod tests {
.unwrap();
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
let query = SearchQuery {
paths: vec![root.path().to_path_buf()],
@ -1786,7 +1794,7 @@ mod tests {
.unwrap();
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
// NOTE: Following symlobic links on its own does nothing, but when combined with a file
// type filter, it will evaluate the underlying type of symbolic links and filter
@ -1834,7 +1842,7 @@ mod tests {
]);
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
let query = SearchQuery {
paths: vec![
@ -1918,7 +1926,7 @@ mod tests {
expected_paths: Vec<PathBuf>,
) {
let state = SearchState::new();
let (reply, mut rx) = mpsc::channel(100);
let (reply, mut rx) = mpsc::unbounded_channel();
let query = SearchQuery {
paths: vec![path],
target: SearchQueryTarget::Path,

@ -5,9 +5,9 @@ use std::path::{Path, PathBuf};
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use distant_core::net::common::ConnectionId;
use distant_core::protocol::{Change, ChangeDetails, ChangeDetailsAttributes, ChangeKind};
use distant_core::protocol::{Change, ChangeDetails, ChangeDetailsAttribute, ChangeKind};
use log::*;
use notify::event::{AccessKind, AccessMode, MetadataKind, ModifyKind};
use notify::event::{AccessKind, AccessMode, MetadataKind, ModifyKind, RenameMode};
use notify::{
Config as WatcherConfig, Error as WatcherError, ErrorKind as WatcherErrorKind,
Event as WatcherEvent, EventKind, PollWatcher, RecommendedWatcher, RecursiveMode, Watcher,
@ -337,33 +337,71 @@ async fn watcher_task<W>(
_ => ChangeKind::Unknown,
};
let attributes = match ev.kind {
EventKind::Modify(ModifyKind::Metadata(MetadataKind::WriteTime)) => {
vec![ChangeDetailsAttributes::Timestamp]
}
EventKind::Modify(ModifyKind::Metadata(
MetadataKind::Ownership | MetadataKind::Permissions,
)) => vec![ChangeDetailsAttributes::Permissions],
_ => Vec::new(),
};
for registered_path in registered_paths.iter() {
let change = Change {
timestamp,
kind,
paths: ev.paths.clone(),
details: ChangeDetails {
attributes: attributes.clone(),
extra: ev.info().map(ToString::to_string),
},
};
match registered_path.filter_and_send(change).await {
Ok(_) => (),
Err(x) => error!(
"[Conn {}] Failed to forward changes to paths: {}",
registered_path.id(),
x
// For rename both, we assume the paths is a pair that represents before and
// after, so we want to grab the before and use it!
let (paths, renamed): (&[PathBuf], Option<PathBuf>) = match ev.kind {
EventKind::Modify(ModifyKind::Name(RenameMode::Both)) => (
&ev.paths[0..1],
if ev.paths.len() > 1 {
ev.paths.last().cloned()
} else {
None
},
),
_ => (&ev.paths, None),
};
for path in paths {
let attribute = match ev.kind {
EventKind::Modify(ModifyKind::Metadata(MetadataKind::Ownership)) => {
Some(ChangeDetailsAttribute::Ownership)
}
EventKind::Modify(ModifyKind::Metadata(MetadataKind::Permissions)) => {
Some(ChangeDetailsAttribute::Permissions)
}
EventKind::Modify(ModifyKind::Metadata(MetadataKind::WriteTime)) => {
Some(ChangeDetailsAttribute::Timestamp)
}
_ => None,
};
// Calculate a timestamp for creation & modification paths
let details_timestamp = match ev.kind {
EventKind::Create(_) => tokio::fs::symlink_metadata(path.as_path())
.await
.ok()
.and_then(|m| m.created().ok())
.and_then(|t| t.duration_since(UNIX_EPOCH).ok())
.map(|d| d.as_secs()),
EventKind::Modify(_) => tokio::fs::symlink_metadata(path.as_path())
.await
.ok()
.and_then(|m| m.modified().ok())
.and_then(|t| t.duration_since(UNIX_EPOCH).ok())
.map(|d| d.as_secs()),
_ => None,
};
let change = Change {
timestamp,
kind,
path: path.to_path_buf(),
details: ChangeDetails {
attribute,
renamed: renamed.clone(),
timestamp: details_timestamp,
extra: ev.info().map(ToString::to_string),
},
};
match registered_path.filter_and_send(change) {
Ok(_) => (),
Err(x) => error!(
"[Conn {}] Failed to forward changes to paths: {}",
registered_path.id(),
x
),
}
}
}
}
@ -372,10 +410,11 @@ async fn watcher_task<W>(
error!("Watcher encountered an error {} for {:?}", msg, err.paths);
for registered_path in registered_paths.iter() {
match registered_path
.filter_and_send_error(&msg, &err.paths, !err.paths.is_empty())
.await
{
match registered_path.filter_and_send_error(
&msg,
&err.paths,
!err.paths.is_empty(),
) {
Ok(_) => (),
Err(x) => error!(
"[Conn {}] Failed to forward changes to paths: {}",

@ -119,32 +119,27 @@ impl RegisteredPath {
}
/// Sends a reply for a change tied to this registered path, filtering
/// out any paths that are not applicable
/// out any changes that are not applicable.
///
/// Returns true if message was sent, and false if not
pub async fn filter_and_send(&self, mut change: Change) -> io::Result<bool> {
/// Returns true if message was sent, and false if not.
pub fn filter_and_send(&self, change: Change) -> io::Result<bool> {
if !self.allowed().contains(&change.kind) {
return Ok(false);
}
// filter the paths that are not applicable
change.paths.retain(|p| self.applies_to_path(p.as_path()));
if !change.paths.is_empty() {
self.reply
.send(Response::Changed(change))
.await
.map(|_| true)
// Only send if this registered path applies to the changed path
if self.applies_to_path(&change.path) {
self.reply.send(Response::Changed(change)).map(|_| true)
} else {
Ok(false)
}
}
/// Sends an error message and includes paths if provided, skipping sending the message if
/// no paths match and `skip_if_no_paths` is true
/// no paths match and `skip_if_no_paths` is true.
///
/// Returns true if message was sent, and false if not
pub async fn filter_and_send_error<T>(
/// Returns true if message was sent, and false if not.
pub fn filter_and_send_error<T>(
&self,
msg: &str,
paths: T,
@ -167,7 +162,6 @@ impl RegisteredPath {
} else {
Response::Error(Error::from(format!("{msg} about {paths:?}")))
})
.await
.map(|_| true)
} else {
Ok(false)

@ -9,10 +9,10 @@ mod config;
mod constants;
pub use api::Api;
pub use config::*;
use distant_core::{DistantApi, DistantApiServerHandler};
use distant_core::DistantApiServerHandler;
/// Implementation of [`DistantApiServerHandler`] using [`Api`].
pub type Handler = DistantApiServerHandler<Api, <Api as DistantApi>::LocalData>;
pub type Handler = DistantApiServerHandler<Api>;
/// Initializes a new [`Handler`].
pub fn new_handler(config: Config) -> std::io::Result<Handler> {

@ -3,7 +3,7 @@ name = "distant-net"
description = "Network library for distant, providing implementations to support client/server architecture"
categories = ["network-programming"]
keywords = ["api", "async"]
version = "0.20.0-alpha.8"
version = "0.20.0"
authors = ["Chip Senkbeil <chip@senkbeil.org>"]
edition = "2021"
homepage = "https://github.com/chipsenkbeil/distant"
@ -15,8 +15,9 @@ license = "MIT OR Apache-2.0"
async-trait = "0.1.68"
bytes = "1.4.0"
chacha20poly1305 = "0.10.1"
const-str = "0.5.6"
derive_more = { version = "0.99.17", default-features = false, features = ["as_mut", "as_ref", "deref", "deref_mut", "display", "from", "error", "into", "into_iterator", "is_variant", "try_into"] }
distant-auth = { version = "=0.20.0-alpha.8", path = "../distant-auth" }
distant-auth = { version = "=0.20.0", path = "../distant-auth" }
dyn-clone = "1.0.11"
flate2 = "1.0.26"
hex = "0.4.3"
@ -25,15 +26,18 @@ log = "0.4.18"
paste = "1.0.12"
p256 = { version = "0.13.2", features = ["ecdh", "pem"] }
rand = { version = "0.8.5", features = ["getrandom"] }
rmp = "0.8.11"
rmp-serde = "1.1.1"
sha2 = "0.10.6"
semver = { version = "1.0.17", features = ["serde"] }
serde = { version = "1.0.163", features = ["derive"] }
serde_bytes = "0.11.9"
serde_json = "1.0.96"
strum = { version = "0.24.1", features = ["derive"] }
tokio = { version = "1.28.2", features = ["full"] }
[dev-dependencies]
distant-auth = { version = "=0.20.0-alpha.8", path = "../distant-auth", features = ["tests"] }
distant-auth = { version = "=0.20.0", path = "../distant-auth", features = ["tests"] }
env_logger = "0.10.0"
serde_json = "1.0.96"
tempfile = "3.5.0"

@ -1,13 +1,13 @@
# distant net
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.68.0][distant_rustc_img]][distant_rustc_lnk]
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.70.0][distant_rustc_img]][distant_rustc_lnk]
[distant_crates_img]: https://img.shields.io/crates/v/distant-net.svg
[distant_crates_lnk]: https://crates.io/crates/distant-net
[distant_doc_img]: https://docs.rs/distant-net/badge.svg
[distant_doc_lnk]: https://docs.rs/distant-net
[distant_rustc_img]: https://img.shields.io/badge/distant_net-rustc_1.68+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/03/09/Rust-1.68.0.html
[distant_rustc_img]: https://img.shields.io/badge/distant_net-rustc_1.70+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/06/01/Rust-1.70.0.html
## Details

@ -216,9 +216,7 @@ impl UntypedClient {
// If we have flagged that a reconnect is needed, attempt to do so
if needs_reconnect {
info!("Client encountered issue, attempting to reconnect");
if log::log_enabled!(log::Level::Debug) {
debug!("Using strategy {reconnect_strategy:?}");
}
debug!("Using strategy {reconnect_strategy:?}");
match reconnect_strategy.reconnect(&mut connection).await {
Ok(()) => {
info!("Client successfully reconnected!");
@ -236,7 +234,7 @@ impl UntypedClient {
macro_rules! silence_needs_reconnect {
() => {{
debug!(
info!(
"Client exceeded {}s without server activity, so attempting to reconnect",
silence_duration.as_secs_f32(),
);
@ -260,7 +258,7 @@ impl UntypedClient {
let ready = tokio::select! {
// NOTE: This should NEVER return None as we never allow the channel to close.
cb = shutdown_rx.recv() => {
debug!("Client got shutdown signal, so exiting event loop");
info!("Client got shutdown signal, so exiting event loop");
let cb = cb.expect("Impossible: shutdown channel closed!");
let _ = cb.send(Ok(()));
watcher_tx.send_replace(ConnectionState::Disconnected);
@ -335,7 +333,7 @@ impl UntypedClient {
}
Ok(None) => {
debug!("Connection closed");
info!("Connection closed");
needs_reconnect = true;
watcher_tx.send_replace(ConnectionState::Reconnecting);
continue;

@ -20,7 +20,7 @@ pub use windows::*;
use super::ClientConfig;
use crate::client::{Client, UntypedClient};
use crate::common::{Connection, Transport};
use crate::common::{Connection, Transport, Version};
/// Interface that performs the connection to produce a [`Transport`] for use by the [`Client`].
#[async_trait]
@ -46,6 +46,7 @@ pub struct ClientBuilder<H, C> {
connector: C,
config: ClientConfig,
connect_timeout: Option<Duration>,
version: Version,
}
impl<H, C> ClientBuilder<H, C> {
@ -56,6 +57,7 @@ impl<H, C> ClientBuilder<H, C> {
config: self.config,
connector: self.connector,
connect_timeout: self.connect_timeout,
version: self.version,
}
}
@ -66,6 +68,7 @@ impl<H, C> ClientBuilder<H, C> {
config,
connector: self.connector,
connect_timeout: self.connect_timeout,
version: self.version,
}
}
@ -76,6 +79,7 @@ impl<H, C> ClientBuilder<H, C> {
config: self.config,
connector,
connect_timeout: self.connect_timeout,
version: self.version,
}
}
@ -86,6 +90,18 @@ impl<H, C> ClientBuilder<H, C> {
config: self.config,
connector: self.connector,
connect_timeout: connect_timeout.into(),
version: self.version,
}
}
/// Configure the version of the client.
pub fn version(self, version: Version) -> Self {
Self {
auth_handler: self.auth_handler,
config: self.config,
connector: self.connector,
connect_timeout: self.connect_timeout,
version,
}
}
}
@ -97,6 +113,7 @@ impl ClientBuilder<(), ()> {
config: Default::default(),
connector: (),
connect_timeout: None,
version: Default::default(),
}
}
}
@ -119,6 +136,7 @@ where
let auth_handler = self.auth_handler;
let config = self.config;
let connect_timeout = self.connect_timeout;
let version = self.version;
let f = async move {
let transport = match connect_timeout {
@ -128,7 +146,7 @@ where
.and_then(convert::identity)?,
None => self.connector.connect().await?,
};
let connection = Connection::client(transport, auth_handler).await?;
let connection = Connection::client(transport, auth_handler, version).await?;
Ok(UntypedClient::spawn(connection, config))
};

@ -9,6 +9,7 @@ mod packet;
mod port;
mod transport;
pub(crate) mod utils;
mod version;
pub use any::*;
pub(crate) use connection::Connection;
@ -21,3 +22,4 @@ pub use map::*;
pub use packet::*;
pub use port::*;
pub use transport::*;
pub use version::*;

@ -11,6 +11,7 @@ use tokio::sync::oneshot;
use crate::common::InmemoryTransport;
use crate::common::{
Backup, FramedTransport, HeapSecretKey, Keychain, KeychainResult, Reconnectable, Transport,
TransportExt, Version,
};
/// Id of the connection
@ -110,6 +111,19 @@ where
debug!("[Conn {id}] Re-establishing connection");
Reconnectable::reconnect(transport).await?;
// Wait for exactly version bytes (24 where 8 bytes for major, minor, patch)
// but with a reconnect we don't actually validate it because we did that
// the first time we connected
//
// NOTE: We do this with the raw transport and not the framed version!
debug!("[Conn {id}] Waiting for server version");
if transport.as_mut_inner().read_exact(&mut [0u8; 24]).await? != 24 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Wrong version byte len received",
));
}
// Perform a handshake to ensure that the connection is properly established and encrypted
debug!("[Conn {id}] Performing handshake");
transport.client_handshake().await?;
@ -190,13 +204,42 @@ where
/// Transforms a raw [`Transport`] into an established [`Connection`] from the client-side by
/// performing the following:
///
/// 1. Handshakes to derive the appropriate [`Codec`](crate::Codec) to use
/// 2. Authenticates the established connection to ensure it is valid
/// 3. Restores pre-existing state using the provided backup, replaying any missing frames and
/// 1. Performs a version check with the server
/// 2. Handshakes to derive the appropriate [`Codec`](crate::Codec) to use
/// 3. Authenticates the established connection to ensure it is valid
/// 4. Restores pre-existing state using the provided backup, replaying any missing frames and
/// receiving any frames from the other side
pub async fn client<H: AuthHandler + Send>(transport: T, handler: H) -> io::Result<Self> {
pub async fn client<H: AuthHandler + Send>(
transport: T,
handler: H,
version: Version,
) -> io::Result<Self> {
let id: ConnectionId = rand::random();
// Wait for exactly version bytes (24 where 8 bytes for major, minor, patch)
debug!("[Conn {id}] Waiting for server version");
let mut version_bytes = [0u8; 24];
if transport.read_exact(&mut version_bytes).await? != 24 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Wrong version byte len received",
));
}
// Compare versions for compatibility and drop the connection if incompatible
let server_version = Version::from_be_bytes(version_bytes);
debug!(
"[Conn {id}] Checking compatibility between client {version} & server {server_version}"
);
if !version.is_compatible_with(&server_version) {
return Err(io::Error::new(
io::ErrorKind::Other,
format!(
"Client version {version} is incompatible with server version {server_version}"
),
));
}
// Perform a handshake to ensure that the connection is properly established and encrypted
debug!("[Conn {id}] Performing handshake");
let mut transport: FramedTransport<T> =
@ -238,19 +281,25 @@ where
/// Transforms a raw [`Transport`] into an established [`Connection`] from the server-side by
/// performing the following:
///
/// 1. Handshakes to derive the appropriate [`Codec`](crate::Codec) to use
/// 2. Authenticates the established connection to ensure it is valid by either using the
/// 1. Performs a version check with the client
/// 2. Handshakes to derive the appropriate [`Codec`](crate::Codec) to use
/// 3. Authenticates the established connection to ensure it is valid by either using the
/// given `verifier` or, if working with an existing client connection, will validate an OTP
/// from our database
/// 3. Restores pre-existing state using the provided backup, replaying any missing frames and
/// 4. Restores pre-existing state using the provided backup, replaying any missing frames and
/// receiving any frames from the other side
pub async fn server(
transport: T,
verifier: &Verifier,
keychain: Keychain<oneshot::Receiver<Backup>>,
version: Version,
) -> io::Result<Self> {
let id: ConnectionId = rand::random();
// Write the version as bytes
debug!("[Conn {id}] Sending version {version}");
transport.write_all(&version.to_be_bytes()).await?;
// Perform a handshake to ensure that the connection is properly established and encrypted
debug!("[Conn {id}] Performing handshake");
let mut transport: FramedTransport<T> =
@ -464,6 +513,60 @@ mod tests {
use super::*;
use crate::common::Frame;
macro_rules! server_version {
() => {
Version::new(1, 2, 3)
};
}
macro_rules! send_server_version {
($transport:expr, $version:expr) => {{
($transport)
.as_mut_inner()
.write_all(&$version.to_be_bytes())
.await
.unwrap();
}};
($transport:expr) => {
send_server_version!($transport, server_version!());
};
}
macro_rules! receive_version {
($transport:expr) => {{
let mut bytes = [0u8; 24];
assert_eq!(
($transport)
.as_mut_inner()
.read_exact(&mut bytes)
.await
.unwrap(),
24,
"Wrong version len received"
);
Version::from_be_bytes(bytes)
}};
}
#[test(tokio::test)]
async fn client_should_fail_when_server_sends_incompatible_version() {
let (mut t1, t2) = FramedTransport::pair(100);
// Spawn a task to perform the client connection so we don't deadlock while simulating the
// server actions on the other side
let task = tokio::spawn(async move {
Connection::client(t2.into_inner(), DummyAuthHandler, Version::new(1, 2, 3))
.await
.unwrap()
});
// Send invalid version to fail the handshake
send_server_version!(t1, Version::new(2, 0, 0));
// Client should fail
task.await.unwrap_err();
}
#[test(tokio::test)]
async fn client_should_fail_if_codec_handshake_fails() {
let (mut t1, t2) = FramedTransport::pair(100);
@ -471,11 +574,14 @@ mod tests {
// Spawn a task to perform the client connection so we don't deadlock while simulating the
// server actions on the other side
let task = tokio::spawn(async move {
Connection::client(t2.into_inner(), DummyAuthHandler)
Connection::client(t2.into_inner(), DummyAuthHandler, server_version!())
.await
.unwrap()
});
// Send server version for client to confirm
send_server_version!(t1);
// Send garbage to fail the handshake
t1.write_frame(Frame::new(b"invalid")).await.unwrap();
@ -490,11 +596,14 @@ mod tests {
// Spawn a task to perform the client connection so we don't deadlock while simulating the
// server actions on the other side
let task = tokio::spawn(async move {
Connection::client(t2.into_inner(), DummyAuthHandler)
Connection::client(t2.into_inner(), DummyAuthHandler, server_version!())
.await
.unwrap()
});
// Send server version for client to confirm
send_server_version!(t1);
// Perform first step of connection by establishing the codec
t1.server_handshake().await.unwrap();
@ -519,11 +628,14 @@ mod tests {
// Spawn a task to perform the client connection so we don't deadlock while simulating the
// server actions on the other side
let task = tokio::spawn(async move {
Connection::client(t2.into_inner(), DummyAuthHandler)
Connection::client(t2.into_inner(), DummyAuthHandler, server_version!())
.await
.unwrap()
});
// Send server version for client to confirm
send_server_version!(t1);
// Perform first step of connection by establishing the codec
t1.server_handshake().await.unwrap();
@ -559,11 +671,14 @@ mod tests {
// Spawn a task to perform the client connection so we don't deadlock while simulating the
// server actions on the other side
let task = tokio::spawn(async move {
Connection::client(t2.into_inner(), DummyAuthHandler)
Connection::client(t2.into_inner(), DummyAuthHandler, server_version!())
.await
.unwrap()
});
// Send server version for client to confirm
send_server_version!(t1);
// Perform first step of connection by establishing the codec
t1.server_handshake().await.unwrap();
@ -597,11 +712,14 @@ mod tests {
// Spawn a task to perform the client connection so we don't deadlock while simulating the
// server actions on the other side
let task = tokio::spawn(async move {
Connection::client(t2.into_inner(), DummyAuthHandler)
Connection::client(t2.into_inner(), DummyAuthHandler, server_version!())
.await
.unwrap()
});
// Send server version for client to confirm
send_server_version!(t1);
// Perform first step of connection by establishing the codec
t1.server_handshake().await.unwrap();
@ -629,6 +747,30 @@ mod tests {
assert_eq!(client.otp(), Some(&otp));
}
#[test(tokio::test)]
async fn server_should_fail_if_client_drops_due_to_version() {
let (mut t1, t2) = FramedTransport::pair(100);
let verifier = Verifier::none();
let keychain = Keychain::new();
// Spawn a task to perform the server connection so we don't deadlock while simulating the
// client actions on the other side
let task = tokio::spawn(async move {
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
});
// Receive the version from the server
let _ = receive_version!(t1);
// Drop client connection as a result of an "incompatible version"
drop(t1);
// Server should fail
task.await.unwrap_err();
}
#[test(tokio::test)]
async fn server_should_fail_if_codec_handshake_fails() {
let (mut t1, t2) = FramedTransport::pair(100);
@ -638,11 +780,14 @@ mod tests {
// Spawn a task to perform the server connection so we don't deadlock while simulating the
// client actions on the other side
let task = tokio::spawn(async move {
Connection::server(t2.into_inner(), &verifier, keychain)
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
});
// Receive the version from the server
let _ = receive_version!(t1);
// Send garbage to fail the handshake
t1.write_frame(Frame::new(b"invalid")).await.unwrap();
@ -659,11 +804,14 @@ mod tests {
// Spawn a task to perform the server connection so we don't deadlock while simulating the
// client actions on the other side
let task = tokio::spawn(async move {
Connection::server(t2.into_inner(), &verifier, keychain)
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
});
// Receive the version from the server
let _ = receive_version!(t1);
// Perform first step of completing client-side of handshake
t1.client_handshake().await.unwrap();
@ -683,11 +831,14 @@ mod tests {
// Spawn a task to perform the server connection so we don't deadlock while simulating the
// client actions on the other side
let task = tokio::spawn(async move {
Connection::server(t2.into_inner(), &verifier, keychain)
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
});
// Receive the version from the server
let _ = receive_version!(t1);
// Perform first step of completing client-side of handshake
t1.client_handshake().await.unwrap();
@ -717,11 +868,14 @@ mod tests {
// Spawn a task to perform the server connection so we don't deadlock while simulating the
// client actions on the other side
let task = tokio::spawn(async move {
Connection::server(t2.into_inner(), &verifier, keychain)
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
});
// Receive the version from the server
let _ = receive_version!(t1);
// Perform first step of completing client-side of handshake
t1.client_handshake().await.unwrap();
@ -750,11 +904,14 @@ mod tests {
// Spawn a task to perform the server connection so we don't deadlock while simulating the
// client actions on the other side
let task = tokio::spawn(async move {
Connection::server(t2.into_inner(), &verifier, keychain)
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
});
// Receive the version from the server
let _ = receive_version!(t1);
// Perform first step of completing client-side of handshake
t1.client_handshake().await.unwrap();
@ -790,11 +947,14 @@ mod tests {
// Spawn a task to perform the server connection so we don't deadlock while simulating the
// client actions on the other side
let task = tokio::spawn(async move {
Connection::server(t2.into_inner(), &verifier, keychain)
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
});
// Receive the version from the server
let _ = receive_version!(t1);
// Perform first step of completing client-side of handshake
t1.client_handshake().await.unwrap();
@ -828,11 +988,14 @@ mod tests {
// Spawn a task to perform the server connection so we don't deadlock while simulating the
// client actions on the other side
let task = tokio::spawn(async move {
Connection::server(t2.into_inner(), &verifier, keychain)
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
});
// Receive the version from the server
let _ = receive_version!(t1);
// Perform first step of completing client-side of handshake
t1.client_handshake().await.unwrap();
@ -866,11 +1029,14 @@ mod tests {
// Spawn a task to perform the server connection so we don't deadlock while simulating the
// client actions on the other side
let task = tokio::spawn(async move {
Connection::server(t2.into_inner(), &verifier, keychain)
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
});
// Receive the version from the server
let _ = receive_version!(t1);
// Perform first step of completing client-side of handshake
t1.client_handshake().await.unwrap();
@ -904,12 +1070,15 @@ mod tests {
let task = tokio::spawn({
let keychain = keychain.clone();
async move {
Connection::server(t2.into_inner(), &verifier, keychain)
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
}
});
// Receive the version from the server
let _ = receive_version!(t1);
// Perform first step of completing client-side of handshake
t1.client_handshake().await.unwrap();
@ -969,12 +1138,15 @@ mod tests {
let task = tokio::spawn({
let keychain = keychain.clone();
async move {
Connection::server(t2.into_inner(), &verifier, keychain)
Connection::server(t2.into_inner(), &verifier, keychain, server_version!())
.await
.unwrap()
}
});
// Receive the version from the server
let _ = receive_version!(t1);
// Perform first step of completing client-side of handshake
t1.client_handshake().await.unwrap();
@ -1029,13 +1201,13 @@ mod tests {
// Spawn a task to perform the server connection so we don't deadlock
let task = tokio::spawn(async move {
Connection::server(t2, &verifier, keychain)
Connection::server(t2, &verifier, keychain, server_version!())
.await
.expect("Failed to connect from server")
});
// Perform the client-side of the connection
let mut client = Connection::client(t1, DummyAuthHandler)
let mut client = Connection::client(t1, DummyAuthHandler, server_version!())
.await
.expect("Failed to connect from client");
let mut server = task.await.unwrap();
@ -1063,14 +1235,14 @@ mod tests {
let verifier = Arc::clone(&verifier);
let keychain = keychain.clone();
tokio::spawn(async move {
Connection::server(t2, &verifier, keychain)
Connection::server(t2, &verifier, keychain, server_version!())
.await
.expect("Failed to connect from server")
})
};
// Perform the client-side of the connection
let mut client = Connection::client(t1, DummyAuthHandler)
let mut client = Connection::client(t1, DummyAuthHandler, server_version!())
.await
.expect("Failed to connect from client");
@ -1093,6 +1265,9 @@ mod tests {
// Spawn a task to perform the client reconnection so we don't deadlock
let task = tokio::spawn(async move { client.reconnect().await.unwrap() });
// Send a version, although it'll be ignored by a reconnecting client
send_server_version!(transport);
// Send garbage to fail handshake from server-side
transport.write_frame(b"hello").await.unwrap();
@ -1108,6 +1283,9 @@ mod tests {
// Spawn a task to perform the client reconnection so we don't deadlock
let task = tokio::spawn(async move { client.reconnect().await.unwrap() });
// Send a version, although it'll be ignored by a reconnecting client
send_server_version!(transport);
// Perform first step of completing server-side of handshake
transport.server_handshake().await.unwrap();
@ -1126,6 +1304,9 @@ mod tests {
// Spawn a task to perform the client reconnection so we don't deadlock
let task = tokio::spawn(async move { client.reconnect().await.unwrap() });
// Send a version, although it'll be ignored by a reconnecting client
send_server_version!(transport);
// Perform first step of completing server-side of handshake
transport.server_handshake().await.unwrap();
@ -1162,6 +1343,9 @@ mod tests {
// Spawn a task to perform the client reconnection so we don't deadlock
let task = tokio::spawn(async move { client.reconnect().await.unwrap() });
// Send a version, although it'll be ignored by a reconnecting client
send_server_version!(transport);
// Perform first step of completing server-side of handshake
transport.server_handshake().await.unwrap();
@ -1205,6 +1389,9 @@ mod tests {
client
});
// Send a version, although it'll be ignored by a reconnecting client
send_server_version!(transport);
// Perform first step of completing server-side of handshake
transport.server_handshake().await.unwrap();
@ -1275,7 +1462,7 @@ mod tests {
// Spawn a task to perform the server reconnection so we don't deadlock
let task = tokio::spawn(async move {
Connection::server(transport, &verifier, keychain)
Connection::server(transport, &verifier, keychain, server_version!())
.await
.expect("Failed to connect from server")
});

@ -69,11 +69,8 @@ fn parse_scheme(s: &str) -> PResult<&str> {
fn parse_username_password(s: &str) -> PResult<(Option<&str>, Option<&str>)> {
let (auth, remaining) = s.split_once('@').ok_or("Auth missing @")?;
let (auth, username) = maybe(parse_until(|c| !c.is_alphanumeric()))(auth)?;
let (auth, password) = maybe(prefixed(
parse_char(':'),
parse_until(|c| !c.is_alphanumeric()),
))(auth)?;
let (auth, username) = maybe(parse_until(|c| c == ':'))(auth)?;
let (auth, password) = maybe(prefixed(parse_char(':'), |s| Ok(("", s))))(auth)?;
if !auth.is_empty() {
return Err("Dangling characters after username/password");
@ -297,16 +294,6 @@ mod tests {
let _ = parse_username_password("username:password").unwrap_err();
}
#[test]
fn should_fail_if_username_not_alphanumeric() {
let _ = parse_username_password("us\x1bername:password@").unwrap_err();
}
#[test]
fn should_fail_if_password_not_alphanumeric() {
let _ = parse_username_password("username:pas\x1bsword@").unwrap_err();
}
#[test]
fn should_return_username_if_available() {
let (s, username_password) = parse_username_password("username@").unwrap();
@ -331,6 +318,57 @@ mod tests {
assert_eq!(username_password.1, Some("password"));
}
#[test]
fn should_return_username_with_hyphen_and_password() {
let (s, username_password) =
parse_username_password("some-user:password@").unwrap();
assert_eq!(s, "");
assert_eq!(username_password.0, Some("some-user"));
assert_eq!(username_password.1, Some("password"));
}
#[test]
fn should_return_username_password_if_username_starts_or_ends_with_hyphen() {
let (s, username_password) =
parse_username_password("-some-user-:password@").unwrap();
assert_eq!(s, "");
assert_eq!(username_password.0, Some("-some-user-"));
assert_eq!(username_password.1, Some("password"));
}
#[test]
fn should_support_username_with_backslash() {
let (s, username_password) = parse_username_password(r#"orgname\myname@"#).unwrap();
assert_eq!(s, "");
assert_eq!(username_password.0, Some(r#"orgname\myname"#));
assert_eq!(username_password.1, None);
let (s, username_password) =
parse_username_password(r#"orgname\myname:password@"#).unwrap();
assert_eq!(s, "");
assert_eq!(username_password.0, Some(r#"orgname\myname"#));
assert_eq!(username_password.1, Some("password"));
}
#[test]
fn should_support_username_and_password_with_arbitrary_characters() {
let (s, username_password) =
parse_username_password("name1!#$%^&*()[]{{}}\x1b:pass1!#$%^&*()[]{{}}\x1b@")
.unwrap();
assert_eq!(s, "");
assert_eq!(username_password.0, Some("name1!#$%^&*()[]{{}}\x1b"));
assert_eq!(username_password.1, Some("pass1!#$%^&*()[]{{}}\x1b"));
}
#[test]
fn should_support_colons_in_password() {
let (s, username_password) =
parse_username_password("user:name:password@").unwrap();
assert_eq!(s, "");
assert_eq!(username_password.0, Some("user"));
assert_eq!(username_password.1, Some("name:password"));
}
#[test]
fn should_consume_up_to_the_ending_sequence() {
let (s, username_password) =
@ -338,6 +376,18 @@ mod tests {
assert_eq!(s, "example.com");
assert_eq!(username_password.0, Some("username"));
assert_eq!(username_password.1, Some("password"));
let (s, username_password) =
parse_username_password("user@name:password@").unwrap();
assert_eq!(s, "name:password@");
assert_eq!(username_password.0, Some("user"));
assert_eq!(username_password.1, None);
let (s, username_password) =
parse_username_password("username:pass@word@").unwrap();
assert_eq!(s, "word@");
assert_eq!(username_password.0, Some("username"));
assert_eq!(username_password.1, Some("pass"));
}
}
@ -653,6 +703,16 @@ mod tests {
assert_eq!(destination.port, Some(22));
}
#[test]
fn parse_should_succeed_if_given_username_has_hyphen() {
let destination = parse("some-user@example.com:22").unwrap();
assert_eq!(destination.scheme, None);
assert_eq!(destination.username.as_deref(), Some("some-user"));
assert_eq!(destination.password, None);
assert_eq!(destination.host, "example.com");
assert_eq!(destination.port, Some(22));
}
#[test]
fn parse_should_succeed_if_given_password_host_and_port() {
let destination = parse(":password@example.com:22").unwrap();

File diff suppressed because it is too large Load Diff

@ -0,0 +1,109 @@
use std::collections::HashMap;
use std::ops::{Deref, DerefMut};
use std::{fmt, io};
use derive_more::IntoIterator;
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use crate::common::{utils, Value};
/// Generates a new [`Header`] of key/value pairs based on literals.
///
/// ```
/// use distant_net::header;
///
/// let _header = header!("key" -> "value", "key2" -> 123);
/// ```
#[macro_export]
macro_rules! header {
($($key:literal -> $value:expr),* $(,)?) => {{
let mut _header = $crate::common::Header::default();
$(
_header.insert($key, $value);
)*
_header
}};
}
/// Represents a packet header comprised of arbitrary data tied to string keys.
#[derive(Clone, Debug, Default, PartialEq, Eq, IntoIterator, Serialize, Deserialize)]
#[serde(transparent)]
pub struct Header(HashMap<String, Value>);
impl Header {
/// Creates an empty [`Header`] newtype wrapper.
pub fn new() -> Self {
Self::default()
}
/// Exists purely to support serde serialization checks.
#[inline]
pub(crate) fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Inserts a key-value pair into the map.
///
/// If the map did not have this key present, [`None`] is returned.
///
/// If the map did have this key present, the value is updated, and the old value is returned.
/// The key is not updated, though; this matters for types that can be `==` without being
/// identical. See the [module-level documentation](std::collections#insert-and-complex-keys)
/// for more.
pub fn insert(&mut self, key: impl Into<String>, value: impl Into<Value>) -> Option<Value> {
self.0.insert(key.into(), value.into())
}
/// Retrieves a value from the header, attempting to convert it to the specified type `T`
/// by cloning the value and then converting it.
pub fn get_as<T>(&self, key: impl AsRef<str>) -> Option<io::Result<T>>
where
T: DeserializeOwned,
{
self.0
.get(key.as_ref())
.map(|value| value.clone().cast_as())
}
/// Serializes the header into bytes.
pub fn to_vec(&self) -> io::Result<Vec<u8>> {
utils::serialize_to_vec(self)
}
/// Deserializes the header from bytes.
pub fn from_slice(slice: &[u8]) -> io::Result<Self> {
utils::deserialize_from_slice(slice)
}
}
impl Deref for Header {
type Target = HashMap<String, Value>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for Header {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl fmt::Display for Header {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{{")?;
for (key, value) in self.0.iter() {
let value = serde_json::to_string(value).unwrap_or_else(|_| String::from("--"));
write!(f, "\"{key}\" = {value}")?;
}
write!(f, "}}")?;
Ok(())
}
}

@ -5,12 +5,17 @@ use derive_more::{Display, Error};
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use super::{parse_msg_pack_str, write_str_msg_pack, Id};
use super::{read_header_bytes, read_key_eq, read_str_bytes, Header, Id};
use crate::common::utils;
use crate::header;
/// Represents a request to send
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct Request<T> {
/// Optional header data to include with request
#[serde(default, skip_serializing_if = "Header::is_empty")]
pub header: Header,
/// Unique id associated with the request
pub id: Id,
@ -19,9 +24,10 @@ pub struct Request<T> {
}
impl<T> Request<T> {
/// Creates a new request with a random, unique id
/// Creates a new request with a random, unique id and no header data
pub fn new(payload: T) -> Self {
Self {
header: header!(),
id: rand::random::<u64>().to_string(),
payload,
}
@ -45,6 +51,11 @@ where
/// Attempts to convert a typed request to an untyped request
pub fn to_untyped_request(&self) -> io::Result<UntypedRequest> {
Ok(UntypedRequest {
header: Cow::Owned(if !self.header.is_empty() {
utils::serialize_to_vec(&self.header)?
} else {
Vec::new()
}),
id: Cow::Borrowed(&self.id),
payload: Cow::Owned(self.to_payload_vec()?),
})
@ -73,13 +84,34 @@ pub enum UntypedRequestParseError {
/// When the bytes do not represent a request
WrongType,
/// When a header should be present, but the key is wrong
InvalidHeaderKey,
/// When a header should be present, but the header bytes are wrong
InvalidHeader,
/// When the key for the id is wrong
InvalidIdKey,
/// When the id is not a valid UTF-8 string
InvalidId,
/// When the key for the payload is wrong
InvalidPayloadKey,
}
#[inline]
fn header_is_empty(header: &[u8]) -> bool {
header.is_empty()
}
/// Represents a request to send whose payload is bytes instead of a specific type
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct UntypedRequest<'a> {
/// Header data associated with the request as bytes
#[serde(default, skip_serializing_if = "header_is_empty")]
pub header: Cow<'a, [u8]>,
/// Unique id associated with the request
pub id: Cow<'a, str>,
@ -91,6 +123,11 @@ impl<'a> UntypedRequest<'a> {
/// Attempts to convert an untyped request to a typed request
pub fn to_typed_request<T: DeserializeOwned>(&self) -> io::Result<Request<T>> {
Ok(Request {
header: if header_is_empty(&self.header) {
header!()
} else {
utils::deserialize_from_slice(&self.header)?
},
id: self.id.to_string(),
payload: utils::deserialize_from_slice(&self.payload)?,
})
@ -99,6 +136,10 @@ impl<'a> UntypedRequest<'a> {
/// Convert into a borrowed version
pub fn as_borrowed(&self) -> UntypedRequest<'_> {
UntypedRequest {
header: match &self.header {
Cow::Borrowed(x) => Cow::Borrowed(x),
Cow::Owned(x) => Cow::Borrowed(x.as_slice()),
},
id: match &self.id {
Cow::Borrowed(x) => Cow::Borrowed(x),
Cow::Owned(x) => Cow::Borrowed(x.as_str()),
@ -113,6 +154,10 @@ impl<'a> UntypedRequest<'a> {
/// Convert into an owned version
pub fn into_owned(self) -> UntypedRequest<'static> {
UntypedRequest {
header: match self.header {
Cow::Borrowed(x) => Cow::Owned(x.to_vec()),
Cow::Owned(x) => Cow::Owned(x),
},
id: match self.id {
Cow::Borrowed(x) => Cow::Owned(x.to_string()),
Cow::Owned(x) => Cow::Owned(x),
@ -124,6 +169,11 @@ impl<'a> UntypedRequest<'a> {
}
}
/// Updates the header of the request to the given `header`.
pub fn set_header(&mut self, header: impl IntoIterator<Item = u8>) {
self.header = Cow::Owned(header.into_iter().collect());
}
/// Updates the id of the request to the given `id`.
pub fn set_id(&mut self, id: impl Into<String>) {
self.id = Cow::Owned(id.into());
@ -131,61 +181,80 @@ impl<'a> UntypedRequest<'a> {
/// Allocates a new collection of bytes representing the request.
pub fn to_bytes(&self) -> Vec<u8> {
let mut bytes = vec![0x82];
let mut bytes = vec![];
let has_header = !header_is_empty(&self.header);
if has_header {
rmp::encode::write_map_len(&mut bytes, 3).unwrap();
} else {
rmp::encode::write_map_len(&mut bytes, 2).unwrap();
}
if has_header {
rmp::encode::write_str(&mut bytes, "header").unwrap();
bytes.extend_from_slice(&self.header);
}
write_str_msg_pack("id", &mut bytes);
write_str_msg_pack(&self.id, &mut bytes);
rmp::encode::write_str(&mut bytes, "id").unwrap();
rmp::encode::write_str(&mut bytes, &self.id).unwrap();
write_str_msg_pack("payload", &mut bytes);
rmp::encode::write_str(&mut bytes, "payload").unwrap();
bytes.extend_from_slice(&self.payload);
bytes
}
/// Parses a collection of bytes, returning a partial request if it can be potentially
/// represented as a [`Request`] depending on the payload, or the original bytes if it does not
/// represent a [`Request`]
/// represented as a [`Request`] depending on the payload.
///
/// NOTE: This supports parsing an invalid request where the payload would not properly
/// deserialize, but the bytes themselves represent a complete request of some kind.
pub fn from_slice(input: &'a [u8]) -> Result<Self, UntypedRequestParseError> {
if input.len() < 2 {
if input.is_empty() {
return Err(UntypedRequestParseError::WrongType);
}
// MsgPack marks a fixmap using 0x80 - 0x8f to indicate the size (up to 15 elements).
//
// In the case of the request, there are only two elements: id and payload. So the first
// byte should ALWAYS be 0x82 (130).
if input[0] != 0x82 {
return Err(UntypedRequestParseError::WrongType);
}
let has_header = match rmp::Marker::from_u8(input[0]) {
rmp::Marker::FixMap(2) => false,
rmp::Marker::FixMap(3) => true,
_ => return Err(UntypedRequestParseError::WrongType),
};
// Skip the first byte representing the fixmap
// Advance position by marker
let input = &input[1..];
// Validate that first field is id
let (input, id_key) =
parse_msg_pack_str(input).map_err(|_| UntypedRequestParseError::WrongType)?;
if id_key != "id" {
return Err(UntypedRequestParseError::WrongType);
}
// Parse the header if we have one
let (header, input) = if has_header {
let (_, input) = read_key_eq(input, "header")
.map_err(|_| UntypedRequestParseError::InvalidHeaderKey)?;
let (header, input) =
read_header_bytes(input).map_err(|_| UntypedRequestParseError::InvalidHeader)?;
(header, input)
} else {
([0u8; 0].as_slice(), input)
};
// Validate that next field is id
let (_, input) =
read_key_eq(input, "id").map_err(|_| UntypedRequestParseError::InvalidIdKey)?;
// Get the id itself
let (input, id) =
parse_msg_pack_str(input).map_err(|_| UntypedRequestParseError::InvalidId)?;
let (id, input) = read_str_bytes(input).map_err(|_| UntypedRequestParseError::InvalidId)?;
// Validate that second field is payload
let (input, payload_key) =
parse_msg_pack_str(input).map_err(|_| UntypedRequestParseError::WrongType)?;
if payload_key != "payload" {
return Err(UntypedRequestParseError::WrongType);
}
// Validate that final field is payload
let (_, input) = read_key_eq(input, "payload")
.map_err(|_| UntypedRequestParseError::InvalidPayloadKey)?;
let header = Cow::Borrowed(header);
let id = Cow::Borrowed(id);
let payload = Cow::Borrowed(input);
Ok(Self { id, payload })
Ok(Self {
header,
id,
payload,
})
}
}
@ -198,18 +267,33 @@ mod tests {
const TRUE_BYTE: u8 = 0xc3;
const NEVER_USED_BYTE: u8 = 0xc1;
// fixstr of 6 bytes with str "header"
const HEADER_FIELD_BYTES: &[u8] = &[0xa6, b'h', b'e', b'a', b'd', b'e', b'r'];
// fixmap of 2 objects with
// 1. key fixstr "key" and value fixstr "value"
// 1. key fixstr "num" and value fixint 123
const HEADER_BYTES: &[u8] = &[
0x82, // valid map with 2 pair
0xa3, b'k', b'e', b'y', // key: "key"
0xa5, b'v', b'a', b'l', b'u', b'e', // value: "value"
0xa3, b'n', b'u', b'm', // key: "num"
0x7b, // value: 123
];
// fixstr of 2 bytes with str "id"
const ID_FIELD_BYTES: &[u8] = &[0xa2, 0x69, 0x64];
const ID_FIELD_BYTES: &[u8] = &[0xa2, b'i', b'd'];
// fixstr of 7 bytes with str "payload"
const PAYLOAD_FIELD_BYTES: &[u8] = &[0xa7, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64];
const PAYLOAD_FIELD_BYTES: &[u8] = &[0xa7, b'p', b'a', b'y', b'l', b'o', b'a', b'd'];
/// fixstr of 4 bytes with str "test"
const TEST_STR_BYTES: &[u8] = &[0xa4, 0x74, 0x65, 0x73, 0x74];
// fixstr of 4 bytes with str "test"
const TEST_STR_BYTES: &[u8] = &[0xa4, b't', b'e', b's', b't'];
#[test]
fn untyped_request_should_support_converting_to_bytes() {
let bytes = Request {
header: header!(),
id: "some id".to_string(),
payload: true,
}
@ -220,9 +304,44 @@ mod tests {
assert_eq!(untyped_request.to_bytes(), bytes);
}
#[test]
fn untyped_request_should_support_converting_to_bytes_with_header() {
let bytes = Request {
header: header!("key" -> 123),
id: "some id".to_string(),
payload: true,
}
.to_vec()
.unwrap();
let untyped_request = UntypedRequest::from_slice(&bytes).unwrap();
assert_eq!(untyped_request.to_bytes(), bytes);
}
#[test]
fn untyped_request_should_support_parsing_from_request_bytes_with_header() {
let bytes = Request {
header: header!("key" -> 123),
id: "some id".to_string(),
payload: true,
}
.to_vec()
.unwrap();
assert_eq!(
UntypedRequest::from_slice(&bytes),
Ok(UntypedRequest {
header: Cow::Owned(utils::serialize_to_vec(&header!("key" -> 123)).unwrap()),
id: Cow::Borrowed("some id"),
payload: Cow::Owned(vec![TRUE_BYTE]),
})
);
}
#[test]
fn untyped_request_should_support_parsing_from_request_bytes_with_valid_payload() {
let bytes = Request {
header: header!(),
id: "some id".to_string(),
payload: true,
}
@ -232,6 +351,7 @@ mod tests {
assert_eq!(
UntypedRequest::from_slice(&bytes),
Ok(UntypedRequest {
header: Cow::Owned(vec![]),
id: Cow::Borrowed("some id"),
payload: Cow::Owned(vec![TRUE_BYTE]),
})
@ -242,6 +362,7 @@ mod tests {
fn untyped_request_should_support_parsing_from_request_bytes_with_invalid_payload() {
// Request with id < 32 bytes
let mut bytes = Request {
header: header!(),
id: "".to_string(),
payload: true,
}
@ -255,12 +376,35 @@ mod tests {
assert_eq!(
UntypedRequest::from_slice(&bytes),
Ok(UntypedRequest {
header: Cow::Owned(vec![]),
id: Cow::Owned("".to_string()),
payload: Cow::Owned(vec![TRUE_BYTE, NEVER_USED_BYTE]),
})
);
}
#[test]
fn untyped_request_should_support_parsing_full_request() {
let input = [
&[0x83],
HEADER_FIELD_BYTES,
HEADER_BYTES,
ID_FIELD_BYTES,
TEST_STR_BYTES,
PAYLOAD_FIELD_BYTES,
&[TRUE_BYTE],
]
.concat();
// Convert into typed so we can test
let untyped_request = UntypedRequest::from_slice(&input).unwrap();
let request: Request<bool> = untyped_request.to_typed_request().unwrap();
assert_eq!(request.header, header!("key" -> "value", "num" -> 123));
assert_eq!(request.id, "test");
assert!(request.payload);
}
#[test]
fn untyped_request_should_fail_to_parse_if_given_bytes_not_representing_a_request() {
// Empty byte slice
@ -281,10 +425,46 @@ mod tests {
Err(UntypedRequestParseError::WrongType)
);
// Invalid header key
assert_eq!(
UntypedRequest::from_slice(
[
&[0x83],
&[0xa0], // header key would be defined here, set to empty str
HEADER_BYTES,
ID_FIELD_BYTES,
TEST_STR_BYTES,
PAYLOAD_FIELD_BYTES,
&[TRUE_BYTE],
]
.concat()
.as_slice()
),
Err(UntypedRequestParseError::InvalidHeaderKey)
);
// Invalid header bytes
assert_eq!(
UntypedRequest::from_slice(
[
&[0x83],
HEADER_FIELD_BYTES,
&[0xa0], // header would be defined here, set to empty str
ID_FIELD_BYTES,
TEST_STR_BYTES,
PAYLOAD_FIELD_BYTES,
&[TRUE_BYTE],
]
.concat()
.as_slice()
),
Err(UntypedRequestParseError::InvalidHeader)
);
// Missing fields (corrupt data)
assert_eq!(
UntypedRequest::from_slice(&[0x82]),
Err(UntypedRequestParseError::WrongType)
Err(UntypedRequestParseError::InvalidIdKey)
);
// Missing id field (has valid data itself)
@ -300,7 +480,7 @@ mod tests {
.concat()
.as_slice()
),
Err(UntypedRequestParseError::WrongType)
Err(UntypedRequestParseError::InvalidIdKey)
);
// Non-str id field value
@ -348,7 +528,7 @@ mod tests {
.concat()
.as_slice()
),
Err(UntypedRequestParseError::WrongType)
Err(UntypedRequestParseError::InvalidPayloadKey)
);
}
}

@ -5,12 +5,17 @@ use derive_more::{Display, Error};
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use super::{parse_msg_pack_str, write_str_msg_pack, Id};
use super::{read_header_bytes, read_key_eq, read_str_bytes, Header, Id};
use crate::common::utils;
use crate::header;
/// Represents a response received related to some response
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct Response<T> {
/// Optional header data to include with response
#[serde(default, skip_serializing_if = "Header::is_empty")]
pub header: Header,
/// Unique id associated with the response
pub id: Id,
@ -22,9 +27,10 @@ pub struct Response<T> {
}
impl<T> Response<T> {
/// Creates a new response with a random, unique id
/// Creates a new response with a random, unique id and no header data
pub fn new(origin_id: Id, payload: T) -> Self {
Self {
header: header!(),
id: rand::random::<u64>().to_string(),
origin_id,
payload,
@ -49,6 +55,11 @@ where
/// Attempts to convert a typed response to an untyped response
pub fn to_untyped_response(&self) -> io::Result<UntypedResponse> {
Ok(UntypedResponse {
header: Cow::Owned(if !self.header.is_empty() {
utils::serialize_to_vec(&self.header)?
} else {
Vec::new()
}),
id: Cow::Borrowed(&self.id),
origin_id: Cow::Borrowed(&self.origin_id),
payload: Cow::Owned(self.to_payload_vec()?),
@ -72,16 +83,40 @@ pub enum UntypedResponseParseError {
/// When the bytes do not represent a response
WrongType,
/// When a header should be present, but the key is wrong
InvalidHeaderKey,
/// When a header should be present, but the header bytes are wrong
InvalidHeader,
/// When the key for the id is wrong
InvalidIdKey,
/// When the id is not a valid UTF-8 string
InvalidId,
/// When the key for the origin id is wrong
InvalidOriginIdKey,
/// When the origin id is not a valid UTF-8 string
InvalidOriginId,
/// When the key for the payload is wrong
InvalidPayloadKey,
}
#[inline]
fn header_is_empty(header: &[u8]) -> bool {
header.is_empty()
}
/// Represents a response to send whose payload is bytes instead of a specific type
#[derive(Clone, Debug, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)]
pub struct UntypedResponse<'a> {
/// Header data associated with the response as bytes
#[serde(default, skip_serializing_if = "header_is_empty")]
pub header: Cow<'a, [u8]>,
/// Unique id associated with the response
pub id: Cow<'a, str>,
@ -93,9 +128,14 @@ pub struct UntypedResponse<'a> {
}
impl<'a> UntypedResponse<'a> {
/// Attempts to convert an untyped request to a typed request
/// Attempts to convert an untyped response to a typed response
pub fn to_typed_response<T: DeserializeOwned>(&self) -> io::Result<Response<T>> {
Ok(Response {
header: if header_is_empty(&self.header) {
header!()
} else {
utils::deserialize_from_slice(&self.header)?
},
id: self.id.to_string(),
origin_id: self.origin_id.to_string(),
payload: utils::deserialize_from_slice(&self.payload)?,
@ -105,6 +145,10 @@ impl<'a> UntypedResponse<'a> {
/// Convert into a borrowed version
pub fn as_borrowed(&self) -> UntypedResponse<'_> {
UntypedResponse {
header: match &self.header {
Cow::Borrowed(x) => Cow::Borrowed(x),
Cow::Owned(x) => Cow::Borrowed(x.as_slice()),
},
id: match &self.id {
Cow::Borrowed(x) => Cow::Borrowed(x),
Cow::Owned(x) => Cow::Borrowed(x.as_str()),
@ -123,6 +167,10 @@ impl<'a> UntypedResponse<'a> {
/// Convert into an owned version
pub fn into_owned(self) -> UntypedResponse<'static> {
UntypedResponse {
header: match self.header {
Cow::Borrowed(x) => Cow::Owned(x.to_vec()),
Cow::Owned(x) => Cow::Owned(x),
},
id: match self.id {
Cow::Borrowed(x) => Cow::Owned(x.to_string()),
Cow::Owned(x) => Cow::Owned(x),
@ -138,6 +186,11 @@ impl<'a> UntypedResponse<'a> {
}
}
/// Updates the header of the response to the given `header`.
pub fn set_header(&mut self, header: impl IntoIterator<Item = u8>) {
self.header = Cow::Owned(header.into_iter().collect());
}
/// Updates the id of the response to the given `id`.
pub fn set_id(&mut self, id: impl Into<String>) {
self.id = Cow::Owned(id.into());
@ -150,76 +203,90 @@ impl<'a> UntypedResponse<'a> {
/// Allocates a new collection of bytes representing the response.
pub fn to_bytes(&self) -> Vec<u8> {
let mut bytes = vec![0x83];
let mut bytes = vec![];
write_str_msg_pack("id", &mut bytes);
write_str_msg_pack(&self.id, &mut bytes);
let has_header = !header_is_empty(&self.header);
if has_header {
rmp::encode::write_map_len(&mut bytes, 4).unwrap();
} else {
rmp::encode::write_map_len(&mut bytes, 3).unwrap();
}
write_str_msg_pack("origin_id", &mut bytes);
write_str_msg_pack(&self.origin_id, &mut bytes);
if has_header {
rmp::encode::write_str(&mut bytes, "header").unwrap();
bytes.extend_from_slice(&self.header);
}
write_str_msg_pack("payload", &mut bytes);
rmp::encode::write_str(&mut bytes, "id").unwrap();
rmp::encode::write_str(&mut bytes, &self.id).unwrap();
rmp::encode::write_str(&mut bytes, "origin_id").unwrap();
rmp::encode::write_str(&mut bytes, &self.origin_id).unwrap();
rmp::encode::write_str(&mut bytes, "payload").unwrap();
bytes.extend_from_slice(&self.payload);
bytes
}
/// Parses a collection of bytes, returning an untyped response if it can be potentially
/// represented as a [`Response`] depending on the payload, or the original bytes if it does not
/// represent a [`Response`].
/// represented as a [`Response`] depending on the payload.
///
/// NOTE: This supports parsing an invalid response where the payload would not properly
/// deserialize, but the bytes themselves represent a complete response of some kind.
pub fn from_slice(input: &'a [u8]) -> Result<Self, UntypedResponseParseError> {
if input.len() < 2 {
if input.is_empty() {
return Err(UntypedResponseParseError::WrongType);
}
// MsgPack marks a fixmap using 0x80 - 0x8f to indicate the size (up to 15 elements).
//
// In the case of the request, there are only three elements: id, origin_id, and payload.
// So the first byte should ALWAYS be 0x83 (131).
if input[0] != 0x83 {
return Err(UntypedResponseParseError::WrongType);
}
let has_header = match rmp::Marker::from_u8(input[0]) {
rmp::Marker::FixMap(3) => false,
rmp::Marker::FixMap(4) => true,
_ => return Err(UntypedResponseParseError::WrongType),
};
// Skip the first byte representing the fixmap
// Advance position by marker
let input = &input[1..];
// Validate that first field is id
let (input, id_key) =
parse_msg_pack_str(input).map_err(|_| UntypedResponseParseError::WrongType)?;
if id_key != "id" {
return Err(UntypedResponseParseError::WrongType);
}
// Parse the header if we have one
let (header, input) = if has_header {
let (_, input) = read_key_eq(input, "header")
.map_err(|_| UntypedResponseParseError::InvalidHeaderKey)?;
let (header, input) =
read_header_bytes(input).map_err(|_| UntypedResponseParseError::InvalidHeader)?;
(header, input)
} else {
([0u8; 0].as_slice(), input)
};
// Validate that next field is id
let (_, input) =
read_key_eq(input, "id").map_err(|_| UntypedResponseParseError::InvalidIdKey)?;
// Get the id itself
let (input, id) =
parse_msg_pack_str(input).map_err(|_| UntypedResponseParseError::InvalidId)?;
let (id, input) =
read_str_bytes(input).map_err(|_| UntypedResponseParseError::InvalidId)?;
// Validate that second field is origin_id
let (input, origin_id_key) =
parse_msg_pack_str(input).map_err(|_| UntypedResponseParseError::WrongType)?;
if origin_id_key != "origin_id" {
return Err(UntypedResponseParseError::WrongType);
}
// Validate that next field is origin_id
let (_, input) = read_key_eq(input, "origin_id")
.map_err(|_| UntypedResponseParseError::InvalidOriginIdKey)?;
// Get the origin_id itself
let (input, origin_id) =
parse_msg_pack_str(input).map_err(|_| UntypedResponseParseError::InvalidOriginId)?;
let (origin_id, input) =
read_str_bytes(input).map_err(|_| UntypedResponseParseError::InvalidOriginId)?;
// Validate that second field is payload
let (input, payload_key) =
parse_msg_pack_str(input).map_err(|_| UntypedResponseParseError::WrongType)?;
if payload_key != "payload" {
return Err(UntypedResponseParseError::WrongType);
}
// Validate that final field is payload
let (_, input) = read_key_eq(input, "payload")
.map_err(|_| UntypedResponseParseError::InvalidPayloadKey)?;
let header = Cow::Borrowed(header);
let id = Cow::Borrowed(id);
let origin_id = Cow::Borrowed(origin_id);
let payload = Cow::Borrowed(input);
Ok(Self {
header,
id,
origin_id,
payload,
@ -236,22 +303,52 @@ mod tests {
const TRUE_BYTE: u8 = 0xc3;
const NEVER_USED_BYTE: u8 = 0xc1;
// fixstr of 6 bytes with str "header"
const HEADER_FIELD_BYTES: &[u8] = &[0xa6, b'h', b'e', b'a', b'd', b'e', b'r'];
// fixmap of 2 objects with
// 1. key fixstr "key" and value fixstr "value"
// 1. key fixstr "num" and value fixint 123
const HEADER_BYTES: &[u8] = &[
0x82, // valid map with 2 pair
0xa3, b'k', b'e', b'y', // key: "key"
0xa5, b'v', b'a', b'l', b'u', b'e', // value: "value"
0xa3, b'n', b'u', b'm', // key: "num"
0x7b, // value: 123
];
// fixstr of 2 bytes with str "id"
const ID_FIELD_BYTES: &[u8] = &[0xa2, 0x69, 0x64];
const ID_FIELD_BYTES: &[u8] = &[0xa2, b'i', b'd'];
// fixstr of 9 bytes with str "origin_id"
const ORIGIN_ID_FIELD_BYTES: &[u8] =
&[0xa9, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x69, 0x64];
// fixstr of 7 bytes with str "payload"
const PAYLOAD_FIELD_BYTES: &[u8] = &[0xa7, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64];
const PAYLOAD_FIELD_BYTES: &[u8] = &[0xa7, b'p', b'a', b'y', b'l', b'o', b'a', b'd'];
/// fixstr of 4 bytes with str "test"
const TEST_STR_BYTES: &[u8] = &[0xa4, 0x74, 0x65, 0x73, 0x74];
const TEST_STR_BYTES: &[u8] = &[0xa4, b't', b'e', b's', b't'];
#[test]
fn untyped_response_should_support_converting_to_bytes() {
let bytes = Response {
header: header!(),
id: "some id".to_string(),
origin_id: "some origin id".to_string(),
payload: true,
}
.to_vec()
.unwrap();
let untyped_response = UntypedResponse::from_slice(&bytes).unwrap();
assert_eq!(untyped_response.to_bytes(), bytes);
}
#[test]
fn untyped_response_should_support_converting_to_bytes_with_header() {
let bytes = Response {
header: header!("key" -> 123),
id: "some id".to_string(),
origin_id: "some origin id".to_string(),
payload: true,
@ -263,9 +360,32 @@ mod tests {
assert_eq!(untyped_response.to_bytes(), bytes);
}
#[test]
fn untyped_response_should_support_parsing_from_response_bytes_with_header() {
let bytes = Response {
header: header!("key" -> 123),
id: "some id".to_string(),
origin_id: "some origin id".to_string(),
payload: true,
}
.to_vec()
.unwrap();
assert_eq!(
UntypedResponse::from_slice(&bytes),
Ok(UntypedResponse {
header: Cow::Owned(utils::serialize_to_vec(&header!("key" -> 123)).unwrap()),
id: Cow::Borrowed("some id"),
origin_id: Cow::Borrowed("some origin id"),
payload: Cow::Owned(vec![TRUE_BYTE]),
})
);
}
#[test]
fn untyped_response_should_support_parsing_from_response_bytes_with_valid_payload() {
let bytes = Response {
header: header!(),
id: "some id".to_string(),
origin_id: "some origin id".to_string(),
payload: true,
@ -276,6 +396,7 @@ mod tests {
assert_eq!(
UntypedResponse::from_slice(&bytes),
Ok(UntypedResponse {
header: Cow::Owned(vec![]),
id: Cow::Borrowed("some id"),
origin_id: Cow::Borrowed("some origin id"),
payload: Cow::Owned(vec![TRUE_BYTE]),
@ -287,6 +408,7 @@ mod tests {
fn untyped_response_should_support_parsing_from_response_bytes_with_invalid_payload() {
// Response with id < 32 bytes
let mut bytes = Response {
header: header!(),
id: "".to_string(),
origin_id: "".to_string(),
payload: true,
@ -301,6 +423,7 @@ mod tests {
assert_eq!(
UntypedResponse::from_slice(&bytes),
Ok(UntypedResponse {
header: Cow::Owned(vec![]),
id: Cow::Owned("".to_string()),
origin_id: Cow::Owned("".to_string()),
payload: Cow::Owned(vec![TRUE_BYTE, NEVER_USED_BYTE]),
@ -308,6 +431,31 @@ mod tests {
);
}
#[test]
fn untyped_response_should_support_parsing_full_request() {
let input = [
&[0x84],
HEADER_FIELD_BYTES,
HEADER_BYTES,
ID_FIELD_BYTES,
TEST_STR_BYTES,
ORIGIN_ID_FIELD_BYTES,
&[0xa2, b'o', b'g'],
PAYLOAD_FIELD_BYTES,
&[TRUE_BYTE],
]
.concat();
// Convert into typed so we can test
let untyped_response = UntypedResponse::from_slice(&input).unwrap();
let response: Response<bool> = untyped_response.to_typed_response().unwrap();
assert_eq!(response.header, header!("key" -> "value", "num" -> 123));
assert_eq!(response.id, "test");
assert_eq!(response.origin_id, "og");
assert!(response.payload);
}
#[test]
fn untyped_response_should_fail_to_parse_if_given_bytes_not_representing_a_response() {
// Empty byte slice
@ -328,10 +476,50 @@ mod tests {
Err(UntypedResponseParseError::WrongType)
);
// Invalid header key
assert_eq!(
UntypedResponse::from_slice(
[
&[0x84],
&[0xa0], // header key would be defined here, set to empty str
HEADER_BYTES,
ID_FIELD_BYTES,
TEST_STR_BYTES,
ORIGIN_ID_FIELD_BYTES,
TEST_STR_BYTES,
PAYLOAD_FIELD_BYTES,
&[TRUE_BYTE],
]
.concat()
.as_slice()
),
Err(UntypedResponseParseError::InvalidHeaderKey)
);
// Invalid header bytes
assert_eq!(
UntypedResponse::from_slice(
[
&[0x84],
HEADER_FIELD_BYTES,
&[0xa0], // header would be defined here, set to empty str
ID_FIELD_BYTES,
TEST_STR_BYTES,
ORIGIN_ID_FIELD_BYTES,
TEST_STR_BYTES,
PAYLOAD_FIELD_BYTES,
&[TRUE_BYTE],
]
.concat()
.as_slice()
),
Err(UntypedResponseParseError::InvalidHeader)
);
// Missing fields (corrupt data)
assert_eq!(
UntypedResponse::from_slice(&[0x83]),
Err(UntypedResponseParseError::WrongType)
Err(UntypedResponseParseError::InvalidIdKey)
);
// Missing id field (has valid data itself)
@ -349,7 +537,7 @@ mod tests {
.concat()
.as_slice()
),
Err(UntypedResponseParseError::WrongType)
Err(UntypedResponseParseError::InvalidIdKey)
);
// Non-str id field value
@ -403,7 +591,7 @@ mod tests {
.concat()
.as_slice()
),
Err(UntypedResponseParseError::WrongType)
Err(UntypedResponseParseError::InvalidOriginIdKey)
);
// Non-str origin_id field value
@ -457,7 +645,7 @@ mod tests {
.concat()
.as_slice()
),
Err(UntypedResponseParseError::WrongType)
Err(UntypedResponseParseError::InvalidPayloadKey)
);
}
}

@ -0,0 +1,112 @@
use std::borrow::Cow;
use std::io;
use std::ops::{Deref, DerefMut};
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use crate::common::utils;
/// Generic value type for data passed through header.
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(transparent)]
pub struct Value(serde_json::Value);
impl Value {
/// Creates a new [`Value`] by converting `value` to the underlying type.
pub fn new(value: impl Into<serde_json::Value>) -> Self {
Self(value.into())
}
/// Serializes the value into bytes.
pub fn to_vec(&self) -> io::Result<Vec<u8>> {
utils::serialize_to_vec(self)
}
/// Deserializes the value from bytes.
pub fn from_slice(slice: &[u8]) -> io::Result<Self> {
utils::deserialize_from_slice(slice)
}
/// Attempts to convert this generic value to a specific type.
pub fn cast_as<T>(self) -> io::Result<T>
where
T: DeserializeOwned,
{
serde_json::from_value(self.0).map_err(|x| io::Error::new(io::ErrorKind::InvalidData, x))
}
}
impl Deref for Value {
type Target = serde_json::Value;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for Value {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
macro_rules! impl_from {
($($type:ty),+) => {
$(
impl From<$type> for Value {
fn from(x: $type) -> Self {
Self(From::from(x))
}
}
)+
};
}
impl_from!(
(),
i8, i16, i32, i64, isize,
u8, u16, u32, u64, usize,
f32, f64,
bool, String, serde_json::Number,
serde_json::Map<String, serde_json::Value>
);
impl<'a, T> From<&'a [T]> for Value
where
T: Clone + Into<serde_json::Value>,
{
fn from(x: &'a [T]) -> Self {
Self(From::from(x))
}
}
impl<'a> From<&'a str> for Value {
fn from(x: &'a str) -> Self {
Self(From::from(x))
}
}
impl<'a> From<Cow<'a, str>> for Value {
fn from(x: Cow<'a, str>) -> Self {
Self(From::from(x))
}
}
impl<T> From<Option<T>> for Value
where
T: Into<serde_json::Value>,
{
fn from(x: Option<T>) -> Self {
Self(From::from(x))
}
}
impl<T> From<Vec<T>> for Value
where
T: Into<serde_json::Value>,
{
fn from(x: Vec<T>) -> Self {
Self(From::from(x))
}
}

@ -8,7 +8,7 @@ pub struct PlainCodec;
impl PlainCodec {
pub fn new() -> Self {
Self::default()
Self
}
}

@ -0,0 +1,132 @@
use semver::{Comparator, Op, Prerelease, Version as SemVer};
use std::fmt;
/// Represents a version and compatibility rules.
#[derive(Clone, Debug)]
pub struct Version {
inner: SemVer,
lower: Comparator,
upper: Comparator,
}
impl Version {
/// Creates a new version in the form `major.minor.patch` with a ruleset that is used to check
/// other versions such that `>=0.1.2, <0.2.0` or `>=1.2.3, <2` depending on whether or not the
/// major version is `0`.
///
/// ```
/// use distant_net::common::Version;
///
/// // Matching versions are compatible
/// let a = Version::new(1, 2, 3);
/// let b = Version::new(1, 2, 3);
/// assert!(a.is_compatible_with(&b));
///
/// // Version 1.2.3 is compatible with 1.2.4, but not the other way
/// let a = Version::new(1, 2, 3);
/// let b = Version::new(1, 2, 4);
/// assert!(a.is_compatible_with(&b));
/// assert!(!b.is_compatible_with(&a));
///
/// // Version 1.2.3 is compatible with 1.3.0, but not 2
/// let a = Version::new(1, 2, 3);
/// assert!(a.is_compatible_with(&Version::new(1, 3, 0)));
/// assert!(!a.is_compatible_with(&Version::new(2, 0, 0)));
///
/// // Version 0.1.2 is compatible with 0.1.3, but not the other way
/// let a = Version::new(0, 1, 2);
/// let b = Version::new(0, 1, 3);
/// assert!(a.is_compatible_with(&b));
/// assert!(!b.is_compatible_with(&a));
///
/// // Version 0.1.2 is not compatible with 0.2
/// let a = Version::new(0, 1, 2);
/// let b = Version::new(0, 2, 0);
/// assert!(!a.is_compatible_with(&b));
/// assert!(!b.is_compatible_with(&a));
/// ```
pub const fn new(major: u64, minor: u64, patch: u64) -> Self {
Self {
inner: SemVer::new(major, minor, patch),
lower: Comparator {
op: Op::GreaterEq,
major,
minor: Some(minor),
patch: Some(patch),
pre: Prerelease::EMPTY,
},
upper: Comparator {
op: Op::Less,
major: if major == 0 { 0 } else { major + 1 },
minor: if major == 0 { Some(minor + 1) } else { None },
patch: None,
pre: Prerelease::EMPTY,
},
}
}
/// Returns true if this version is compatible with another version.
pub fn is_compatible_with(&self, other: &Self) -> bool {
self.lower.matches(&other.inner) && self.upper.matches(&other.inner)
}
/// Converts from a collection of bytes into a version using the byte form major/minor/patch
/// using big endian.
pub const fn from_be_bytes(bytes: [u8; 24]) -> Self {
Self::new(
u64::from_be_bytes([
bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], bytes[6], bytes[7],
]),
u64::from_be_bytes([
bytes[8], bytes[9], bytes[10], bytes[11], bytes[12], bytes[13], bytes[14],
bytes[15],
]),
u64::from_be_bytes([
bytes[16], bytes[17], bytes[18], bytes[19], bytes[20], bytes[21], bytes[22],
bytes[23],
]),
)
}
/// Converts the version into a byte form of major/minor/patch using big endian.
pub const fn to_be_bytes(&self) -> [u8; 24] {
let major = self.inner.major.to_be_bytes();
let minor = self.inner.minor.to_be_bytes();
let patch = self.inner.patch.to_be_bytes();
[
major[0], major[1], major[2], major[3], major[4], major[5], major[6], major[7],
minor[0], minor[1], minor[2], minor[3], minor[4], minor[5], minor[6], minor[7],
patch[0], patch[1], patch[2], patch[3], patch[4], patch[5], patch[6], patch[7],
]
}
}
impl Default for Version {
/// Default version is `0.0.0`.
fn default() -> Self {
Self::new(0, 0, 0)
}
}
impl fmt::Display for Version {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.inner)
}
}
impl From<semver::Version> for Version {
/// Creates a new [`Version`] using the major, minor, and patch information from
/// [`semver::Version`].
fn from(version: semver::Version) -> Self {
let mut this = Self::new(version.major, version.minor, version.patch);
this.inner = version;
this
}
}
impl From<Version> for semver::Version {
fn from(version: Version) -> Self {
version.inner
}
}

@ -5,3 +5,12 @@ mod server;
pub use client::*;
pub use data::*;
pub use server::*;
use crate::common::Version;
/// Represents the version associated with the manager's protocol.
pub const PROTOCOL_VERSION: Version = Version::new(
const_str::parse!(env!("CARGO_PKG_VERSION_MAJOR"), u64),
const_str::parse!(env!("CARGO_PKG_VERSION_MINOR"), u64),
const_str::parse!(env!("CARGO_PKG_VERSION_PATCH"), u64),
);

@ -7,7 +7,7 @@ use log::*;
use crate::client::Client;
use crate::common::{ConnectionId, Destination, Map, Request};
use crate::manager::data::{
ConnectionInfo, ConnectionList, ManagerCapabilities, ManagerRequest, ManagerResponse,
ConnectionInfo, ConnectionList, ManagerRequest, ManagerResponse, SemVer,
};
mod channel;
@ -231,12 +231,12 @@ impl ManagerClient {
RawChannel::spawn(connection_id, self).await
}
/// Retrieves a list of supported capabilities
pub async fn capabilities(&mut self) -> io::Result<ManagerCapabilities> {
trace!("capabilities()");
let res = self.send(ManagerRequest::Capabilities).await?;
/// Retrieves the version of the manager.
pub async fn version(&mut self) -> io::Result<SemVer> {
trace!("version()");
let res = self.send(ManagerRequest::Version).await?;
match res.payload {
ManagerResponse::Capabilities { supported } => Ok(supported),
ManagerResponse::Version { version } => Ok(version),
ManagerResponse::Error { description } => {
Err(io::Error::new(io::ErrorKind::Other, description))
}

@ -1,8 +1,6 @@
pub type ManagerChannelId = u32;
pub type ManagerAuthenticationId = u32;
mod capabilities;
pub use capabilities::*;
pub use semver::Version as SemVer;
mod info;
pub use info::*;

@ -1,189 +0,0 @@
use std::cmp::Ordering;
use std::collections::HashSet;
use std::hash::{Hash, Hasher};
use std::ops::{BitAnd, BitOr, BitXor};
use std::str::FromStr;
use derive_more::{From, Into, IntoIterator};
use serde::{Deserialize, Serialize};
use strum::{EnumMessage, IntoEnumIterator};
use super::ManagerCapabilityKind;
/// Set of supported capabilities for a manager
#[derive(Clone, Debug, From, Into, PartialEq, Eq, IntoIterator, Serialize, Deserialize)]
#[serde(transparent)]
pub struct ManagerCapabilities(#[into_iterator(owned, ref)] HashSet<ManagerCapability>);
impl ManagerCapabilities {
/// Return set of capabilities encompassing all possible capabilities
pub fn all() -> Self {
Self(
ManagerCapabilityKind::iter()
.map(ManagerCapability::from)
.collect(),
)
}
/// Return empty set of capabilities
pub fn none() -> Self {
Self(HashSet::new())
}
/// Returns true if the capability with described kind is included
pub fn contains(&self, kind: impl AsRef<str>) -> bool {
let cap = ManagerCapability {
kind: kind.as_ref().to_string(),
description: String::new(),
};
self.0.contains(&cap)
}
/// Adds the specified capability to the set of capabilities
///
/// * If the set did not have this capability, returns `true`
/// * If the set did have this capability, returns `false`
pub fn insert(&mut self, cap: impl Into<ManagerCapability>) -> bool {
self.0.insert(cap.into())
}
/// Removes the capability with the described kind, returning the capability
pub fn take(&mut self, kind: impl AsRef<str>) -> Option<ManagerCapability> {
let cap = ManagerCapability {
kind: kind.as_ref().to_string(),
description: String::new(),
};
self.0.take(&cap)
}
/// Removes the capability with the described kind, returning true if it existed
pub fn remove(&mut self, kind: impl AsRef<str>) -> bool {
let cap = ManagerCapability {
kind: kind.as_ref().to_string(),
description: String::new(),
};
self.0.remove(&cap)
}
/// Converts into vec of capabilities sorted by kind
pub fn into_sorted_vec(self) -> Vec<ManagerCapability> {
let mut this = self.0.into_iter().collect::<Vec<_>>();
this.sort_unstable();
this
}
}
impl BitAnd for &ManagerCapabilities {
type Output = ManagerCapabilities;
fn bitand(self, rhs: Self) -> Self::Output {
ManagerCapabilities(self.0.bitand(&rhs.0))
}
}
impl BitOr for &ManagerCapabilities {
type Output = ManagerCapabilities;
fn bitor(self, rhs: Self) -> Self::Output {
ManagerCapabilities(self.0.bitor(&rhs.0))
}
}
impl BitOr<ManagerCapability> for &ManagerCapabilities {
type Output = ManagerCapabilities;
fn bitor(self, rhs: ManagerCapability) -> Self::Output {
let mut other = ManagerCapabilities::none();
other.0.insert(rhs);
self.bitor(&other)
}
}
impl BitXor for &ManagerCapabilities {
type Output = ManagerCapabilities;
fn bitxor(self, rhs: Self) -> Self::Output {
ManagerCapabilities(self.0.bitxor(&rhs.0))
}
}
impl FromIterator<ManagerCapability> for ManagerCapabilities {
fn from_iter<I: IntoIterator<Item = ManagerCapability>>(iter: I) -> Self {
let mut this = ManagerCapabilities::none();
for capability in iter {
this.0.insert(capability);
}
this
}
}
/// ManagerCapability tied to a manager. A capability is equivalent based on its kind and not
/// description.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
pub struct ManagerCapability {
/// Label describing the kind of capability
pub kind: String,
/// Information about the capability
pub description: String,
}
impl ManagerCapability {
/// Will convert the [`ManagerCapability`]'s `kind` into a known [`ManagerCapabilityKind`] if
/// possible, returning None if the capability is unknown
pub fn to_capability_kind(&self) -> Option<ManagerCapabilityKind> {
ManagerCapabilityKind::from_str(&self.kind).ok()
}
/// Returns true if the described capability is unknown
pub fn is_unknown(&self) -> bool {
self.to_capability_kind().is_none()
}
}
impl PartialEq for ManagerCapability {
fn eq(&self, other: &Self) -> bool {
self.kind.eq_ignore_ascii_case(&other.kind)
}
}
impl Eq for ManagerCapability {}
impl PartialOrd for ManagerCapability {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for ManagerCapability {
fn cmp(&self, other: &Self) -> Ordering {
self.kind
.to_ascii_lowercase()
.cmp(&other.kind.to_ascii_lowercase())
}
}
impl Hash for ManagerCapability {
fn hash<H: Hasher>(&self, state: &mut H) {
self.kind.to_ascii_lowercase().hash(state);
}
}
impl From<ManagerCapabilityKind> for ManagerCapability {
/// Creates a new capability using the kind's default message
fn from(kind: ManagerCapabilityKind) -> Self {
Self {
kind: kind.to_string(),
description: kind
.get_message()
.map(ToString::to_string)
.unwrap_or_default(),
}
}
}

@ -1,36 +1,17 @@
use derive_more::IsVariant;
use distant_auth::msg::AuthenticationResponse;
use serde::{Deserialize, Serialize};
use strum::{AsRefStr, EnumDiscriminants, EnumIter, EnumMessage, EnumString};
use super::{ManagerAuthenticationId, ManagerChannelId};
use crate::common::{ConnectionId, Destination, Map, UntypedRequest};
#[allow(clippy::large_enum_variant)]
#[derive(Clone, Debug, EnumDiscriminants, Serialize, Deserialize)]
#[strum_discriminants(derive(
AsRefStr,
strum::Display,
EnumIter,
EnumMessage,
EnumString,
Hash,
PartialOrd,
Ord,
IsVariant,
Serialize,
Deserialize
))]
#[strum_discriminants(name(ManagerCapabilityKind))]
#[strum_discriminants(strum(serialize_all = "snake_case"))]
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case", deny_unknown_fields, tag = "type")]
pub enum ManagerRequest {
/// Retrieve information about the server's capabilities
#[strum_discriminants(strum(message = "Supports retrieving capabilities"))]
Capabilities,
/// Retrieve information about the manager's version.
Version,
/// Launch a server using the manager
#[strum_discriminants(strum(message = "Supports launching a server on remote machines"))]
Launch {
// NOTE: Boxed per clippy's large_enum_variant warning
destination: Box<Destination>,
@ -40,7 +21,6 @@ pub enum ManagerRequest {
},
/// Initiate a connection through the manager
#[strum_discriminants(strum(message = "Supports connecting to remote servers"))]
Connect {
// NOTE: Boxed per clippy's large_enum_variant warning
destination: Box<Destination>,
@ -50,7 +30,6 @@ pub enum ManagerRequest {
},
/// Submit some authentication message for the manager to use with an active connection
#[strum_discriminants(strum(message = "Supports authenticating with a remote server"))]
Authenticate {
/// Id of the authentication request that is being responded to
id: ManagerAuthenticationId,
@ -60,16 +39,12 @@ pub enum ManagerRequest {
},
/// Opens a channel for communication with an already-connected server
#[strum_discriminants(strum(message = "Supports opening a channel with a remote server"))]
OpenChannel {
/// Id of the connection
id: ConnectionId,
},
/// Sends data through channel
#[strum_discriminants(strum(
message = "Supports sending data through a channel with a remote server"
))]
Channel {
/// Id of the channel
id: ManagerChannelId,
@ -79,21 +54,17 @@ pub enum ManagerRequest {
},
/// Closes an open channel
#[strum_discriminants(strum(message = "Supports closing a channel with a remote server"))]
CloseChannel {
/// Id of the channel to close
id: ManagerChannelId,
},
/// Retrieve information about a specific connection
#[strum_discriminants(strum(message = "Supports retrieving connection-specific information"))]
Info { id: ConnectionId },
/// Kill a specific connection
#[strum_discriminants(strum(message = "Supports killing a remote connection"))]
Kill { id: ConnectionId },
/// Retrieve list of connections being managed
#[strum_discriminants(strum(message = "Supports retrieving a list of managed connections"))]
List,
}

@ -1,9 +1,7 @@
use distant_auth::msg::Authentication;
use serde::{Deserialize, Serialize};
use super::{
ConnectionInfo, ConnectionList, ManagerAuthenticationId, ManagerCapabilities, ManagerChannelId,
};
use super::{ConnectionInfo, ConnectionList, ManagerAuthenticationId, ManagerChannelId, SemVer};
use crate::common::{ConnectionId, Destination, UntypedResponse};
#[derive(Clone, Debug, Serialize, Deserialize)]
@ -15,8 +13,8 @@ pub enum ManagerResponse {
/// Indicates that some error occurred during a request
Error { description: String },
/// Response to retrieving information about the manager's capabilities
Capabilities { supported: ManagerCapabilities },
/// Information about the manager's version.
Version { version: SemVer },
/// Confirmation of a server being launched
Launched {

@ -9,10 +9,10 @@ use tokio::sync::{oneshot, RwLock};
use crate::common::{ConnectionId, Destination, Map};
use crate::manager::{
ConnectionInfo, ConnectionList, ManagerAuthenticationId, ManagerCapabilities, ManagerChannelId,
ManagerRequest, ManagerResponse,
ConnectionInfo, ConnectionList, ManagerAuthenticationId, ManagerChannelId, ManagerRequest,
ManagerResponse, SemVer,
};
use crate::server::{Server, ServerCtx, ServerHandler};
use crate::server::{RequestCtx, Server, ServerHandler};
mod authentication;
pub use authentication::*;
@ -31,6 +31,10 @@ pub struct ManagerServer {
/// Configuration settings for the server
config: Config,
/// Holds on to open channels feeding data back from a server to some connected client,
/// enabling us to cancel the tasks on demand
channels: RwLock<HashMap<ManagerChannelId, ManagerChannel>>,
/// Mapping of connection id -> connection
connections: RwLock<HashMap<ConnectionId, ManagerConnection>>,
@ -46,6 +50,7 @@ impl ManagerServer {
pub fn new(config: Config) -> Server<Self> {
Server::new().handler(Self {
config,
channels: RwLock::new(HashMap::new()),
connections: RwLock::new(HashMap::new()),
registry: Arc::new(RwLock::new(HashMap::new())),
})
@ -133,9 +138,11 @@ impl ManagerServer {
Ok(id)
}
/// Retrieves the list of supported capabilities for this manager
async fn capabilities(&self) -> io::Result<ManagerCapabilities> {
Ok(ManagerCapabilities::all())
/// Retrieves the manager's version.
async fn version(&self) -> io::Result<SemVer> {
env!("CARGO_PKG_VERSION")
.parse()
.map_err(|x| io::Error::new(io::ErrorKind::Other, x))
}
/// Retrieves information about the connection to the server with the specified `id`
@ -168,7 +175,25 @@ impl ManagerServer {
/// Kills the connection to the server with the specified `id`
async fn kill(&self, id: ConnectionId) -> io::Result<()> {
match self.connections.write().await.remove(&id) {
Some(_) => Ok(()),
Some(connection) => {
// Close any open channels
if let Ok(ids) = connection.channel_ids().await {
let mut channels_lock = self.channels.write().await;
for id in ids {
if let Some(channel) = channels_lock.remove(&id) {
if let Err(x) = channel.close() {
error!("[Conn {id}] {x}");
}
}
}
}
// Make sure the connection is aborted so nothing new can happen
debug!("[Conn {id}] Aborting");
connection.abort();
Ok(())
}
None => Err(io::Error::new(
io::ErrorKind::NotConnected,
"No connection found",
@ -177,104 +202,120 @@ impl ManagerServer {
}
}
#[derive(Default)]
pub struct DistantManagerServerConnection {
/// Holds on to open channels feeding data back from a server to some connected client,
/// enabling us to cancel the tasks on demand
channels: RwLock<HashMap<ManagerChannelId, ManagerChannel>>,
}
#[async_trait]
impl ServerHandler for ManagerServer {
type LocalData = DistantManagerServerConnection;
type Request = ManagerRequest;
type Response = ManagerResponse;
async fn on_request(&self, ctx: ServerCtx<Self::Request, Self::Response, Self::LocalData>) {
let ServerCtx {
async fn on_request(&self, ctx: RequestCtx<Self::Request, Self::Response>) {
debug!("manager::on_request({ctx:?})");
let RequestCtx {
connection_id,
request,
reply,
local_data,
} = ctx;
let response = match request.payload {
ManagerRequest::Capabilities {} => match self.capabilities().await {
Ok(supported) => ManagerResponse::Capabilities { supported },
Err(x) => ManagerResponse::from(x),
},
ManagerRequest::Version {} => {
debug!("Looking up version");
match self.version().await {
Ok(version) => ManagerResponse::Version { version },
Err(x) => ManagerResponse::from(x),
}
}
ManagerRequest::Launch {
destination,
options,
} => match self
.launch(
*destination,
options,
ManagerAuthenticator {
reply: reply.clone(),
registry: Arc::clone(&self.registry),
},
)
.await
{
Ok(destination) => ManagerResponse::Launched { destination },
Err(x) => ManagerResponse::from(x),
},
} => {
info!("Launching {destination} with {options}");
match self
.launch(
*destination,
options,
ManagerAuthenticator {
reply: reply.clone(),
registry: Arc::clone(&self.registry),
},
)
.await
{
Ok(destination) => ManagerResponse::Launched { destination },
Err(x) => ManagerResponse::from(x),
}
}
ManagerRequest::Connect {
destination,
options,
} => match self
.connect(
*destination,
options,
ManagerAuthenticator {
reply: reply.clone(),
registry: Arc::clone(&self.registry),
},
)
.await
{
Ok(id) => ManagerResponse::Connected { id },
Err(x) => ManagerResponse::from(x),
},
} => {
info!("Connecting to {destination} with {options}");
match self
.connect(
*destination,
options,
ManagerAuthenticator {
reply: reply.clone(),
registry: Arc::clone(&self.registry),
},
)
.await
{
Ok(id) => ManagerResponse::Connected { id },
Err(x) => ManagerResponse::from(x),
}
}
ManagerRequest::Authenticate { id, msg } => {
trace!("Retrieving authentication callback registry");
match self.registry.write().await.remove(&id) {
Some(cb) => match cb.send(msg) {
Ok(_) => return,
Err(_) => ManagerResponse::Error {
description: "Unable to forward authentication callback".to_string(),
},
},
Some(cb) => {
trace!("Sending {msg:?} through authentication callback");
match cb.send(msg) {
Ok(_) => return,
Err(_) => ManagerResponse::Error {
description: "Unable to forward authentication callback"
.to_string(),
},
}
}
None => ManagerResponse::from(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid authentication id",
)),
}
}
ManagerRequest::OpenChannel { id } => match self.connections.read().await.get(&id) {
Some(connection) => match connection.open_channel(reply.clone()) {
Ok(channel) => {
debug!("[Conn {id}] Channel {} has been opened", channel.id());
let id = channel.id();
local_data.channels.write().await.insert(id, channel);
ManagerResponse::ChannelOpened { id }
ManagerRequest::OpenChannel { id } => {
debug!("Attempting to retrieve connection {id}");
match self.connections.read().await.get(&id) {
Some(connection) => {
debug!("Opening channel through connection {id}");
match connection.open_channel(reply.clone()) {
Ok(channel) => {
info!("[Conn {id}] Channel {} has been opened", channel.id());
let id = channel.id();
self.channels.write().await.insert(id, channel);
ManagerResponse::ChannelOpened { id }
}
Err(x) => ManagerResponse::from(x),
}
}
Err(x) => ManagerResponse::from(x),
},
None => ManagerResponse::from(io::Error::new(
io::ErrorKind::NotConnected,
"Connection does not exist",
)),
},
None => ManagerResponse::from(io::Error::new(
io::ErrorKind::NotConnected,
"Connection does not exist",
)),
}
}
ManagerRequest::Channel { id, request } => {
match local_data.channels.read().await.get(&id) {
debug!("Attempting to retrieve channel {id}");
match self.channels.read().await.get(&id) {
// TODO: For now, we are NOT sending back a response to acknowledge
// a successful channel send. We could do this in order for
// the client to listen for a complete send, but is it worth it?
Some(channel) => match channel.send(request) {
Ok(_) => return,
Err(x) => ManagerResponse::from(x),
},
Some(channel) => {
debug!("Sending {request:?} through channel {id}");
match channel.send(request) {
Ok(_) => return,
Err(x) => ManagerResponse::from(x),
}
}
None => ManagerResponse::from(io::Error::new(
io::ErrorKind::NotConnected,
"Channel is not open or does not exist",
@ -282,35 +323,57 @@ impl ServerHandler for ManagerServer {
}
}
ManagerRequest::CloseChannel { id } => {
match local_data.channels.write().await.remove(&id) {
Some(channel) => match channel.close() {
Ok(_) => {
debug!("Channel {id} has been closed");
ManagerResponse::ChannelClosed { id }
debug!("Attempting to remove channel {id}");
match self.channels.write().await.remove(&id) {
Some(channel) => {
debug!("Removed channel {}", channel.id());
match channel.close() {
Ok(_) => {
info!("Channel {id} has been closed");
ManagerResponse::ChannelClosed { id }
}
Err(x) => ManagerResponse::from(x),
}
Err(x) => ManagerResponse::from(x),
},
}
None => ManagerResponse::from(io::Error::new(
io::ErrorKind::NotConnected,
"Channel is not open or does not exist",
)),
}
}
ManagerRequest::Info { id } => match self.info(id).await {
Ok(info) => ManagerResponse::Info(info),
Err(x) => ManagerResponse::from(x),
},
ManagerRequest::List => match self.list().await {
Ok(list) => ManagerResponse::List(list),
Err(x) => ManagerResponse::from(x),
},
ManagerRequest::Kill { id } => match self.kill(id).await {
Ok(()) => ManagerResponse::Killed,
Err(x) => ManagerResponse::from(x),
},
ManagerRequest::Info { id } => {
debug!("Attempting to retrieve information for connection {id}");
match self.info(id).await {
Ok(info) => {
info!("Retrieved information for connection {id}");
ManagerResponse::Info(info)
}
Err(x) => ManagerResponse::from(x),
}
}
ManagerRequest::List => {
debug!("Attempting to retrieve the list of connections");
match self.list().await {
Ok(list) => {
info!("Retrieved list of connections");
ManagerResponse::List(list)
}
Err(x) => ManagerResponse::from(x),
}
}
ManagerRequest::Kill { id } => {
debug!("Attempting to kill connection {id}");
match self.kill(id).await {
Ok(()) => {
info!("Killed connection {id}");
ManagerResponse::Killed
}
Err(x) => ManagerResponse::from(x),
}
}
};
if let Err(x) = reply.send(response).await {
if let Err(x) = reply.send(response) {
error!("[Conn {}] {}", connection_id, x);
}
}
@ -349,13 +412,14 @@ mod tests {
let authenticator = ManagerAuthenticator {
reply: ServerReply {
origin_id: format!("{}", rand::random::<u8>()),
tx: mpsc::channel(1).0,
tx: mpsc::unbounded_channel().0,
},
registry: Arc::clone(&registry),
};
let server = ManagerServer {
config,
channels: RwLock::new(HashMap::new()),
connections: RwLock::new(HashMap::new()),
registry,
};

@ -29,19 +29,15 @@ impl ManagerAuthenticator {
let id = rand::random();
self.registry.write().await.insert(id, tx);
self.reply
.send(ManagerResponse::Authenticate { id, msg })
.await?;
self.reply.send(ManagerResponse::Authenticate { id, msg })?;
rx.await
.map_err(|x| io::Error::new(io::ErrorKind::Other, x))
}
/// Sends an [`Authentication`] `msg` without expecting a reply. No callback is stored.
async fn fire(&self, msg: Authentication) -> io::Result<()> {
fn fire(&self, msg: Authentication) -> io::Result<()> {
let id = rand::random();
self.reply
.send(ManagerResponse::Authenticate { id, msg })
.await?;
self.reply.send(ManagerResponse::Authenticate { id, msg })?;
Ok(())
}
}
@ -89,18 +85,18 @@ impl Authenticator for ManagerAuthenticator {
}
async fn info(&mut self, info: Info) -> io::Result<()> {
self.fire(Authentication::Info(info)).await
self.fire(Authentication::Info(info))
}
async fn error(&mut self, error: Error) -> io::Result<()> {
self.fire(Authentication::Error(error)).await
self.fire(Authentication::Error(error))
}
async fn start_method(&mut self, start_method: StartMethod) -> io::Result<()> {
self.fire(Authentication::StartMethod(start_method)).await
self.fire(Authentication::StartMethod(start_method))
}
async fn finished(&mut self) -> io::Result<()> {
self.fire(Authentication::Finished).await
self.fire(Authentication::Finished)
}
}

@ -1,8 +1,8 @@
use std::collections::HashMap;
use std::io;
use std::{fmt, io};
use log::*;
use tokio::sync::mpsc;
use tokio::sync::{mpsc, oneshot};
use tokio::task::JoinHandle;
use crate::client::{Mailbox, UntypedClient};
@ -62,11 +62,17 @@ impl ManagerConnection {
pub async fn spawn(
spawn: Destination,
options: Map,
client: UntypedClient,
mut client: UntypedClient,
) -> io::Result<Self> {
let connection_id = rand::random();
let (tx, rx) = mpsc::unbounded_channel();
// NOTE: Ensure that the connection is severed when the client is dropped; otherwise, when
// the connection is terminated via aborting it or the connection being dropped, the
// connection will persist which can cause problems such as lonely shutdown of the server
// never triggering!
client.shutdown_on_drop(true);
let (request_tx, request_rx) = mpsc::unbounded_channel();
let action_task = tokio::spawn(action_task(connection_id, rx, request_tx));
let response_task = tokio::spawn(response_task(
@ -105,16 +111,41 @@ impl ManagerConnection {
tx: self.tx.clone(),
})
}
}
impl Drop for ManagerConnection {
fn drop(&mut self) {
pub async fn channel_ids(&self) -> io::Result<Vec<ManagerChannelId>> {
let (tx, rx) = oneshot::channel();
self.tx
.send(Action::GetRegistered { cb: tx })
.map_err(|x| {
io::Error::new(
io::ErrorKind::BrokenPipe,
format!("channel_ids failed: {x}"),
)
})?;
let channel_ids = rx.await.map_err(|x| {
io::Error::new(
io::ErrorKind::BrokenPipe,
format!("channel_ids callback dropped: {x}"),
)
})?;
Ok(channel_ids)
}
/// Aborts the tasks used to engage with the connection.
pub fn abort(&self) {
self.action_task.abort();
self.request_task.abort();
self.response_task.abort();
}
}
impl Drop for ManagerConnection {
fn drop(&mut self) {
self.abort();
}
}
enum Action {
Register {
id: ManagerChannelId,
@ -125,6 +156,10 @@ enum Action {
id: ManagerChannelId,
},
GetRegistered {
cb: oneshot::Sender<Vec<ManagerChannelId>>,
},
Read {
res: UntypedResponse<'static>,
},
@ -135,6 +170,18 @@ enum Action {
},
}
impl fmt::Debug for Action {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Register { id, .. } => write!(f, "Action::Register {{ id: {id}, .. }}"),
Self::Unregister { id } => write!(f, "Action::Unregister {{ id: {id} }}"),
Self::GetRegistered { .. } => write!(f, "Action::GetRegistered {{ .. }}"),
Self::Read { .. } => write!(f, "Action::Read {{ .. }}"),
Self::Write { id, .. } => write!(f, "Action::Write {{ id: {id}, .. }}"),
}
}
}
/// Internal task to process outgoing [`UntypedRequest`]s.
async fn request_task(
id: ConnectionId,
@ -142,10 +189,13 @@ async fn request_task(
mut rx: mpsc::UnboundedReceiver<UntypedRequest<'static>>,
) {
while let Some(req) = rx.recv().await {
trace!("[Conn {id}] Firing off request {}", req.id);
if let Err(x) = client.fire(req).await {
error!("[Conn {id}] Failed to send request: {x}");
}
}
trace!("[Conn {id}] Manager request task closed");
}
/// Internal task to process incoming [`UntypedResponse`]s.
@ -155,10 +205,17 @@ async fn response_task(
tx: mpsc::UnboundedSender<Action>,
) {
while let Some(res) = mailbox.next().await {
trace!(
"[Conn {id}] Receiving response {} to request {}",
res.id,
res.origin_id
);
if let Err(x) = tx.send(Action::Read { res }) {
error!("[Conn {id}] Failed to forward received response: {x}");
}
}
trace!("[Conn {id}] Manager response task closed");
}
/// Internal task to process [`Action`] items.
@ -174,6 +231,8 @@ async fn action_task(
let mut registered = HashMap::new();
while let Some(action) = rx.recv().await {
trace!("[Conn {id}] {action:?}");
match action {
Action::Register { id, reply } => {
registered.insert(id, reply);
@ -181,6 +240,9 @@ async fn action_task(
Action::Unregister { id } => {
registered.remove(&id);
}
Action::GetRegistered { cb } => {
let _ = cb.send(registered.keys().copied().collect());
}
Action::Read { mut res } => {
// Split {channel id}_{request id} back into pieces and
// update the origin id to match the request id only
@ -201,7 +263,8 @@ async fn action_task(
id: channel_id,
response: res,
};
if let Err(x) = reply.send(response).await {
if let Err(x) = reply.send(response) {
error!("[Conn {id}] {x}");
}
}
@ -217,4 +280,6 @@ async fn action_task(
}
}
}
trace!("[Conn {id}] Manager action task closed");
}

@ -9,7 +9,7 @@ use serde::de::DeserializeOwned;
use serde::Serialize;
use tokio::sync::{broadcast, RwLock};
use crate::common::{Listener, Response, Transport};
use crate::common::{ConnectionId, Listener, Response, Transport, Version};
mod builder;
pub use builder::*;
@ -45,6 +45,9 @@ pub struct Server<T> {
/// Performs authentication using various methods
verifier: Verifier,
/// Version associated with the server used by clients to verify compatibility
version: Version,
}
/// Interface for a handler that receives connections and requests
@ -56,23 +59,21 @@ pub trait ServerHandler: Send {
/// Type of data sent back by the server
type Response;
/// Type of data to store locally tied to the specific connection
type LocalData: Send;
/// Invoked upon a new connection becoming established.
///
/// ### Note
///
/// This can be useful in performing some additional initialization on the connection's local
/// data prior to it being used anywhere else.
#[allow(unused_variables)]
async fn on_accept(&self, ctx: ConnectionCtx<'_, Self::LocalData>) -> io::Result<()> {
async fn on_connect(&self, id: ConnectionId) -> io::Result<()> {
Ok(())
}
/// Invoked upon an existing connection getting dropped.
#[allow(unused_variables)]
async fn on_disconnect(&self, id: ConnectionId) -> io::Result<()> {
Ok(())
}
/// Invoked upon receiving a request from a client. The server should process this
/// request, which can be found in `ctx`, and send one or more replies in response.
async fn on_request(&self, ctx: ServerCtx<Self::Request, Self::Response, Self::LocalData>);
async fn on_request(&self, ctx: RequestCtx<Self::Request, Self::Response>);
}
impl Server<()> {
@ -83,6 +84,7 @@ impl Server<()> {
config: Default::default(),
handler: (),
verifier: Verifier::empty(),
version: Default::default(),
}
}
@ -117,6 +119,7 @@ impl<T> Server<T> {
config,
handler: self.handler,
verifier: self.verifier,
version: self.version,
}
}
@ -126,6 +129,7 @@ impl<T> Server<T> {
config: self.config,
handler,
verifier: self.verifier,
version: self.version,
}
}
@ -135,6 +139,17 @@ impl<T> Server<T> {
config: self.config,
handler: self.handler,
verifier,
version: self.version,
}
}
/// Consumes the current server, replacing its version with `version` and returning it.
pub fn version(self, version: Version) -> Self {
Self {
config: self.config,
handler: self.handler,
verifier: self.verifier,
version,
}
}
}
@ -144,11 +159,10 @@ where
T: ServerHandler + Sync + 'static,
T::Request: DeserializeOwned + Send + Sync + 'static,
T::Response: Serialize + Send + 'static,
T::LocalData: Default + Send + Sync + 'static,
{
/// Consumes the server, starting a task to process connections from the `listener` and
/// returning a [`ServerRef`] that can be used to control the active server instance.
pub fn start<L>(self, listener: L) -> io::Result<Box<dyn ServerRef>>
pub fn start<L>(self, listener: L) -> io::Result<ServerRef>
where
L: Listener + 'static,
L::Output: Transport + 'static,
@ -157,7 +171,7 @@ where
let (tx, rx) = broadcast::channel(1);
let task = tokio::spawn(self.task(Arc::clone(&state), listener, tx.clone(), rx));
Ok(Box::new(GenericServerRef { shutdown: tx, task }))
Ok(ServerRef { shutdown: tx, task })
}
/// Internal task that is run to receive connections and spawn connection tasks
@ -175,6 +189,7 @@ where
config,
handler,
verifier,
version,
} = self;
let handler = Arc::new(handler);
@ -224,8 +239,12 @@ where
.sleep_duration(config.connection_sleep)
.heartbeat_duration(config.connection_heartbeat)
.verifier(Arc::downgrade(&verifier))
.version(version.clone())
.spawn(),
);
// Clean up current tasks being tracked
connection_tasks.retain(|task| !task.is_finished());
}
// Once we stop listening, we still want to wait until all connections have terminated
@ -253,21 +272,22 @@ mod tests {
use super::*;
use crate::common::{Connection, InmemoryTransport, MpscListener, Request, Response};
macro_rules! server_version {
() => {
Version::new(1, 2, 3)
};
}
pub struct TestServerHandler;
#[async_trait]
impl ServerHandler for TestServerHandler {
type LocalData = ();
type Request = u16;
type Response = String;
async fn on_accept(&self, _: ConnectionCtx<'_, Self::LocalData>) -> io::Result<()> {
Ok(())
}
async fn on_request(&self, ctx: ServerCtx<Self::Request, Self::Response, Self::LocalData>) {
async fn on_request(&self, ctx: RequestCtx<Self::Request, Self::Response>) {
// Always send back "hello"
ctx.reply.send("hello".to_string()).await.unwrap();
ctx.reply.send("hello".to_string()).unwrap();
}
}
@ -280,6 +300,7 @@ mod tests {
config,
handler: TestServerHandler,
verifier: Verifier::new(methods),
version: server_version!(),
}
}
@ -309,7 +330,7 @@ mod tests {
.expect("Failed to start server");
// Perform handshake and authentication with the server before beginning to send data
let mut connection = Connection::client(transport, DummyAuthHandler)
let mut connection = Connection::client(transport, DummyAuthHandler, server_version!())
.await
.expect("Failed to connect to server");

@ -5,7 +5,7 @@ use distant_auth::Verifier;
use serde::de::DeserializeOwned;
use serde::Serialize;
use crate::common::{PortRange, TcpListener};
use crate::common::{PortRange, TcpListener, Version};
use crate::server::{Server, ServerConfig, ServerHandler, TcpServerRef};
pub struct TcpServerBuilder<T>(Server<T>);
@ -35,6 +35,10 @@ impl<T> TcpServerBuilder<T> {
pub fn verifier(self, verifier: Verifier) -> Self {
Self(self.0.verifier(verifier))
}
pub fn version(self, version: Version) -> Self {
Self(self.0.version(version))
}
}
impl<T> TcpServerBuilder<T>
@ -42,7 +46,6 @@ where
T: ServerHandler + Sync + 'static,
T::Request: DeserializeOwned + Send + Sync + 'static,
T::Response: Serialize + Send + 'static,
T::LocalData: Default + Send + Sync + 'static,
{
pub async fn start<P>(self, addr: IpAddr, port: P) -> io::Result<TcpServerRef>
where
@ -66,22 +69,18 @@ mod tests {
use super::*;
use crate::client::Client;
use crate::common::Request;
use crate::server::ServerCtx;
use crate::server::RequestCtx;
pub struct TestServerHandler;
#[async_trait]
impl ServerHandler for TestServerHandler {
type LocalData = ();
type Request = String;
type Response = String;
async fn on_request(&self, ctx: ServerCtx<Self::Request, Self::Response, Self::LocalData>) {
async fn on_request(&self, ctx: RequestCtx<Self::Request, Self::Response>) {
// Echo back what we received
ctx.reply
.send(ctx.request.payload.to_string())
.await
.unwrap();
ctx.reply.send(ctx.request.payload.to_string()).unwrap();
}
}

@ -5,7 +5,7 @@ use distant_auth::Verifier;
use serde::de::DeserializeOwned;
use serde::Serialize;
use crate::common::UnixSocketListener;
use crate::common::{UnixSocketListener, Version};
use crate::server::{Server, ServerConfig, ServerHandler, UnixSocketServerRef};
pub struct UnixSocketServerBuilder<T>(Server<T>);
@ -35,6 +35,10 @@ impl<T> UnixSocketServerBuilder<T> {
pub fn verifier(self, verifier: Verifier) -> Self {
Self(self.0.verifier(verifier))
}
pub fn version(self, version: Version) -> Self {
Self(self.0.version(version))
}
}
impl<T> UnixSocketServerBuilder<T>
@ -42,7 +46,6 @@ where
T: ServerHandler + Sync + 'static,
T::Request: DeserializeOwned + Send + Sync + 'static,
T::Response: Serialize + Send + 'static,
T::LocalData: Default + Send + Sync + 'static,
{
pub async fn start<P>(self, path: P) -> io::Result<UnixSocketServerRef>
where
@ -66,22 +69,18 @@ mod tests {
use super::*;
use crate::client::Client;
use crate::common::Request;
use crate::server::ServerCtx;
use crate::server::RequestCtx;
pub struct TestServerHandler;
#[async_trait]
impl ServerHandler for TestServerHandler {
type LocalData = ();
type Request = String;
type Response = String;
async fn on_request(&self, ctx: ServerCtx<Self::Request, Self::Response, Self::LocalData>) {
async fn on_request(&self, ctx: RequestCtx<Self::Request, Self::Response>) {
// Echo back what we received
ctx.reply
.send(ctx.request.payload.to_string())
.await
.unwrap();
ctx.reply.send(ctx.request.payload.to_string()).unwrap();
}
}

@ -5,7 +5,7 @@ use distant_auth::Verifier;
use serde::de::DeserializeOwned;
use serde::Serialize;
use crate::common::WindowsPipeListener;
use crate::common::{Version, WindowsPipeListener};
use crate::server::{Server, ServerConfig, ServerHandler, WindowsPipeServerRef};
pub struct WindowsPipeServerBuilder<T>(Server<T>);
@ -35,6 +35,10 @@ impl<T> WindowsPipeServerBuilder<T> {
pub fn verifier(self, verifier: Verifier) -> Self {
Self(self.0.verifier(verifier))
}
pub fn version(self, version: Version) -> Self {
Self(self.0.version(version))
}
}
impl<T> WindowsPipeServerBuilder<T>
@ -42,7 +46,6 @@ where
T: ServerHandler + Sync + 'static,
T::Request: DeserializeOwned + Send + Sync + 'static,
T::Response: Serialize + Send + 'static,
T::LocalData: Default + Send + Sync + 'static,
{
/// Start a new server at the specified address using the given codec
pub async fn start<A>(self, addr: A) -> io::Result<WindowsPipeServerRef>
@ -77,22 +80,18 @@ mod tests {
use super::*;
use crate::client::Client;
use crate::common::Request;
use crate::server::ServerCtx;
use crate::server::RequestCtx;
pub struct TestServerHandler;
#[async_trait]
impl ServerHandler for TestServerHandler {
type LocalData = ();
type Request = String;
type Response = String;
async fn on_request(&self, ctx: ServerCtx<Self::Request, Self::Response, Self::LocalData>) {
async fn on_request(&self, ctx: RequestCtx<Self::Request, Self::Response>) {
// Echo back what we received
ctx.reply
.send(ctx.request.payload.to_string())
.await
.unwrap();
ctx.reply.send(ctx.request.payload.to_string()).unwrap();
}
}

@ -12,12 +12,9 @@ use serde::Serialize;
use tokio::sync::{broadcast, mpsc, oneshot, RwLock};
use tokio::task::JoinHandle;
use super::{
ConnectionCtx, ConnectionState, ServerCtx, ServerHandler, ServerReply, ServerState,
ShutdownTimer,
};
use super::{ConnectionState, RequestCtx, ServerHandler, ServerReply, ServerState, ShutdownTimer};
use crate::common::{
Backup, Connection, Frame, Interest, Keychain, Response, Transport, UntypedRequest,
Backup, Connection, Frame, Interest, Keychain, Response, Transport, UntypedRequest, Version,
};
pub type ServerKeychain = Keychain<oneshot::Receiver<Backup>>;
@ -68,6 +65,7 @@ pub(super) struct ConnectionTaskBuilder<H, S, T> {
sleep_duration: Duration,
heartbeat_duration: Duration,
verifier: Weak<Verifier>,
version: Version,
}
impl ConnectionTaskBuilder<(), (), ()> {
@ -83,6 +81,7 @@ impl ConnectionTaskBuilder<(), (), ()> {
sleep_duration: SLEEP_DURATION,
heartbeat_duration: MINIMUM_HEARTBEAT_DURATION,
verifier: Weak::new(),
version: Version::default(),
}
}
}
@ -99,6 +98,7 @@ impl<H, S, T> ConnectionTaskBuilder<H, S, T> {
sleep_duration: self.sleep_duration,
heartbeat_duration: self.heartbeat_duration,
verifier: self.verifier,
version: self.version,
}
}
@ -113,6 +113,7 @@ impl<H, S, T> ConnectionTaskBuilder<H, S, T> {
sleep_duration: self.sleep_duration,
heartbeat_duration: self.heartbeat_duration,
verifier: self.verifier,
version: self.version,
}
}
@ -127,6 +128,7 @@ impl<H, S, T> ConnectionTaskBuilder<H, S, T> {
sleep_duration: self.sleep_duration,
heartbeat_duration: self.heartbeat_duration,
verifier: self.verifier,
version: self.version,
}
}
@ -141,6 +143,7 @@ impl<H, S, T> ConnectionTaskBuilder<H, S, T> {
sleep_duration: self.sleep_duration,
heartbeat_duration: self.heartbeat_duration,
verifier: self.verifier,
version: self.version,
}
}
@ -155,6 +158,7 @@ impl<H, S, T> ConnectionTaskBuilder<H, S, T> {
sleep_duration: self.sleep_duration,
heartbeat_duration: self.heartbeat_duration,
verifier: self.verifier,
version: self.version,
}
}
@ -172,6 +176,7 @@ impl<H, S, T> ConnectionTaskBuilder<H, S, T> {
sleep_duration: self.sleep_duration,
heartbeat_duration: self.heartbeat_duration,
verifier: self.verifier,
version: self.version,
}
}
@ -186,6 +191,7 @@ impl<H, S, T> ConnectionTaskBuilder<H, S, T> {
sleep_duration,
heartbeat_duration: self.heartbeat_duration,
verifier: self.verifier,
version: self.version,
}
}
@ -203,6 +209,7 @@ impl<H, S, T> ConnectionTaskBuilder<H, S, T> {
sleep_duration: self.sleep_duration,
heartbeat_duration,
verifier: self.verifier,
version: self.version,
}
}
@ -217,6 +224,22 @@ impl<H, S, T> ConnectionTaskBuilder<H, S, T> {
sleep_duration: self.sleep_duration,
heartbeat_duration: self.heartbeat_duration,
verifier,
version: self.version,
}
}
pub fn version(self, version: Version) -> ConnectionTaskBuilder<H, S, T> {
ConnectionTaskBuilder {
handler: self.handler,
state: self.state,
keychain: self.keychain,
transport: self.transport,
shutdown: self.shutdown,
shutdown_timer: self.shutdown_timer,
sleep_duration: self.sleep_duration,
heartbeat_duration: self.heartbeat_duration,
verifier: self.verifier,
version,
}
}
}
@ -226,7 +249,6 @@ where
H: ServerHandler + Sync + 'static,
H::Request: DeserializeOwned + Send + Sync + 'static,
H::Response: Serialize + Send + 'static,
H::LocalData: Default + Send + Sync + 'static,
T: Transport + 'static,
{
pub fn spawn(self) -> ConnectionTask {
@ -244,6 +266,7 @@ where
sleep_duration,
heartbeat_duration,
verifier,
version,
} = self;
// NOTE: This exists purely to make the compiler happy for macro_rules declaration order.
@ -412,7 +435,8 @@ where
match await_or_shutdown!(Box::pin(Connection::server(
transport,
verifier.as_ref(),
keychain
keychain,
version
))) {
Ok(connection) => connection,
Err(x) => {
@ -429,16 +453,11 @@ where
let id = connection.id();
// Create local data for the connection and then process it
debug!("[Conn {id}] Officially accepting connection");
let mut local_data = H::LocalData::default();
if let Err(x) = await_or_shutdown!(handler.on_accept(ConnectionCtx {
connection_id: id,
local_data: &mut local_data
})) {
info!("[Conn {id}] Connection established");
if let Err(x) = await_or_shutdown!(handler.on_connect(id)) {
terminate_connection!(@fatal "[Conn {id}] Accepting connection failed: {x}");
}
let local_data = Arc::new(local_data);
let mut last_heartbeat = Instant::now();
// Restore our connection's channels if we have them, otherwise make new ones
@ -450,12 +469,12 @@ where
}
None => {
warn!("[Conn {id}] Existing connection with id, but channels not saved");
mpsc::channel::<Response<H::Response>>(1)
mpsc::unbounded_channel::<Response<H::Response>>()
}
},
None => {
debug!("[Conn {id}] Marked as new connection");
mpsc::channel::<Response<H::Response>>(1)
mpsc::unbounded_channel::<Response<H::Response>>()
}
};
@ -483,15 +502,22 @@ where
Ok(Some(frame)) => match UntypedRequest::from_slice(frame.as_item()) {
Ok(request) => match request.to_typed_request() {
Ok(request) => {
if log::log_enabled!(Level::Debug) {
let debug_header = if !request.header.is_empty() {
format!(" | header {}", request.header)
} else {
String::new()
};
debug!("[Conn {id}] New request {}{debug_header}", request.id);
}
let origin_id = request.id.clone();
let ctx = ServerCtx {
let ctx = RequestCtx {
connection_id: id,
request,
reply: ServerReply {
origin_id,
tx: tx.clone(),
},
local_data: Arc::clone(&local_data),
};
// Spawn a new task to run the request handler so we don't block
@ -500,8 +526,8 @@ where
tokio::spawn(async move { handler.on_request(ctx).await });
}
Err(x) => {
if log::log_enabled!(Level::Trace) {
trace!(
if log::log_enabled!(Level::Debug) {
error!(
"[Conn {id}] Failed receiving {}",
String::from_utf8_lossy(&request.payload),
);
@ -600,23 +626,18 @@ mod tests {
use crate::common::{
HeapSecretKey, InmemoryTransport, Ready, Reconnectable, Request, Response,
};
use crate::server::Shutdown;
use crate::server::{ConnectionId, Shutdown};
struct TestServerHandler;
#[async_trait]
impl ServerHandler for TestServerHandler {
type LocalData = ();
type Request = u16;
type Response = String;
async fn on_accept(&self, _: ConnectionCtx<'_, Self::LocalData>) -> io::Result<()> {
Ok(())
}
async fn on_request(&self, ctx: ServerCtx<Self::Request, Self::Response, Self::LocalData>) {
async fn on_request(&self, ctx: RequestCtx<Self::Request, Self::Response>) {
// Always send back "hello"
ctx.reply.send("hello".to_string()).await.unwrap();
ctx.reply.send("hello".to_string()).unwrap();
}
}
@ -634,6 +655,12 @@ mod tests {
}};
}
macro_rules! server_version {
() => {
Version::new(1, 2, 3)
};
}
#[test(tokio::test)]
async fn should_terminate_if_fails_access_verifier() {
let handler = Arc::new(TestServerHandler);
@ -678,11 +705,12 @@ mod tests {
.transport(t1)
.shutdown_timer(Arc::downgrade(&shutdown_timer))
.verifier(Arc::downgrade(&verifier))
.version(server_version!())
.spawn();
// Spawn a task to handle establishing connection from client-side
tokio::spawn(async move {
let _client = Connection::client(t2, DummyAuthHandler)
let _client = Connection::client(t2, DummyAuthHandler, server_version!())
.await
.expect("Fail to establish client-side connection");
});
@ -711,11 +739,12 @@ mod tests {
.transport(t1)
.shutdown_timer(Arc::downgrade(&shutdown_timer))
.verifier(Arc::downgrade(&verifier))
.version(server_version!())
.spawn();
// Spawn a task to handle establishing connection from client-side
tokio::spawn(async move {
let _client = Connection::client(t2, DummyAuthHandler)
let _client = Connection::client(t2, DummyAuthHandler, server_version!())
.await
.expect("Fail to establish client-side connection");
});
@ -735,18 +764,14 @@ mod tests {
#[async_trait]
impl ServerHandler for BadAcceptServerHandler {
type LocalData = ();
type Request = u16;
type Response = String;
async fn on_accept(&self, _: ConnectionCtx<'_, Self::LocalData>) -> io::Result<()> {
Err(io::Error::new(io::ErrorKind::Other, "bad accept"))
async fn on_connect(&self, _: ConnectionId) -> io::Result<()> {
Err(io::Error::new(io::ErrorKind::Other, "bad connect"))
}
async fn on_request(
&self,
_: ServerCtx<Self::Request, Self::Response, Self::LocalData>,
) {
async fn on_request(&self, _: RequestCtx<Self::Request, Self::Response>) {
unreachable!();
}
}
@ -765,12 +790,13 @@ mod tests {
.transport(t1)
.shutdown_timer(Arc::downgrade(&shutdown_timer))
.verifier(Arc::downgrade(&verifier))
.version(server_version!())
.spawn();
// Spawn a task to handle establishing connection from client-side, and then closes to
// trigger the server-side to close
tokio::spawn(async move {
let _client = Connection::client(t2, DummyAuthHandler)
let _client = Connection::client(t2, DummyAuthHandler, server_version!())
.await
.expect("Fail to establish client-side connection");
});
@ -839,12 +865,13 @@ mod tests {
})
.shutdown_timer(Arc::downgrade(&shutdown_timer))
.verifier(Arc::downgrade(&verifier))
.version(server_version!())
.spawn();
// Spawn a task to handle establishing connection from client-side, set ready to fail
// for the server-side after client connection completes, and wait a bit
tokio::spawn(async move {
let _client = Connection::client(t2, DummyAuthHandler)
let _client = Connection::client(t2, DummyAuthHandler, server_version!())
.await
.expect("Fail to establish client-side connection");
@ -883,12 +910,13 @@ mod tests {
.transport(t1)
.shutdown_timer(Arc::downgrade(&shutdown_timer))
.verifier(Arc::downgrade(&verifier))
.version(server_version!())
.spawn();
// Spawn a task to handle establishing connection from client-side, and then closes to
// trigger the server-side to close
tokio::spawn(async move {
let _client = Connection::client(t2, DummyAuthHandler)
let _client = Connection::client(t2, DummyAuthHandler, server_version!())
.await
.expect("Fail to establish client-side connection");
});
@ -913,11 +941,12 @@ mod tests {
.transport(t1)
.shutdown_timer(Arc::downgrade(&shutdown_timer))
.verifier(Arc::downgrade(&verifier))
.version(server_version!())
.spawn();
// Spawn a task to handle establishing connection from client-side
let task = tokio::spawn(async move {
let mut client = Connection::client(t2, DummyAuthHandler)
let mut client = Connection::client(t2, DummyAuthHandler, server_version!())
.await
.expect("Fail to establish client-side connection");
@ -950,11 +979,12 @@ mod tests {
.shutdown_timer(Arc::downgrade(&shutdown_timer))
.heartbeat_duration(Duration::from_millis(200))
.verifier(Arc::downgrade(&verifier))
.version(server_version!())
.spawn();
// Spawn a task to handle establishing connection from client-side
let task = tokio::spawn(async move {
let mut client = Connection::client(t2, DummyAuthHandler)
let mut client = Connection::client(t2, DummyAuthHandler, server_version!())
.await
.expect("Fail to establish client-side connection");
@ -1027,20 +1057,16 @@ mod tests {
#[async_trait]
impl ServerHandler for HangingAcceptServerHandler {
type LocalData = ();
type Request = ();
type Response = ();
async fn on_accept(&self, _: ConnectionCtx<'_, Self::LocalData>) -> io::Result<()> {
async fn on_connect(&self, _: ConnectionId) -> io::Result<()> {
// Wait "forever" so we can ensure that we fail at this step
tokio::time::sleep(Duration::MAX).await;
Err(io::Error::new(io::ErrorKind::Other, "bad accept"))
Err(io::Error::new(io::ErrorKind::Other, "bad connect"))
}
async fn on_request(
&self,
_: ServerCtx<Self::Request, Self::Response, Self::LocalData>,
) {
async fn on_request(&self, _: RequestCtx<Self::Request, Self::Response>) {
unreachable!();
}
}
@ -1062,10 +1088,12 @@ mod tests {
.shutdown_timer(Arc::downgrade(&shutdown_timer))
.heartbeat_duration(Duration::from_millis(200))
.verifier(Arc::downgrade(&verifier))
.version(server_version!())
.spawn();
// Spawn a task to handle the client-side establishment of a full connection
let _client_task = tokio::spawn(Connection::client(t2, DummyAuthHandler));
let _client_task =
tokio::spawn(Connection::client(t2, DummyAuthHandler, server_version!()));
// Shutdown server connection task while it is accepting the connection, verifying that we
// do not get an error in return
@ -1083,19 +1111,15 @@ mod tests {
#[async_trait]
impl ServerHandler for AcceptServerHandler {
type LocalData = ();
type Request = ();
type Response = ();
async fn on_accept(&self, _: ConnectionCtx<'_, Self::LocalData>) -> io::Result<()> {
async fn on_connect(&self, _: ConnectionId) -> io::Result<()> {
self.tx.send(()).await.unwrap();
Ok(())
}
async fn on_request(
&self,
_: ServerCtx<Self::Request, Self::Response, Self::LocalData>,
) {
async fn on_request(&self, _: RequestCtx<Self::Request, Self::Response>) {
unreachable!();
}
}
@ -1118,10 +1142,12 @@ mod tests {
.shutdown_timer(Arc::downgrade(&shutdown_timer))
.heartbeat_duration(Duration::from_millis(200))
.verifier(Arc::downgrade(&verifier))
.version(server_version!())
.spawn();
// Spawn a task to handle the client-side establishment of a full connection
let _client_task = tokio::spawn(Connection::client(t2, DummyAuthHandler));
let _client_task =
tokio::spawn(Connection::client(t2, DummyAuthHandler, server_version!()));
// Wait to ensure we complete the accept call first
let _ = rx.recv().await;

@ -1,28 +1,29 @@
use std::sync::Arc;
use std::fmt;
use super::ServerReply;
use crate::common::{ConnectionId, Request};
/// Represents contextual information for working with an inbound request
pub struct ServerCtx<T, U, D> {
/// Unique identifer associated with the connection that sent the request
/// Represents contextual information for working with an inbound request.
pub struct RequestCtx<T, U> {
/// Unique identifer associated with the connection that sent the request.
pub connection_id: ConnectionId,
/// The request being handled
/// The request being handled.
pub request: Request<T>,
/// Used to send replies back to be sent out by the server
/// Used to send replies back to be sent out by the server.
pub reply: ServerReply<U>,
/// Reference to the connection's local data
pub local_data: Arc<D>,
}
/// Represents contextual information for working with an inbound connection
pub struct ConnectionCtx<'a, D> {
/// Unique identifer associated with the connection
pub connection_id: ConnectionId,
/// Reference to the connection's local data
pub local_data: &'a mut D,
impl<T, U> fmt::Debug for RequestCtx<T, U>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RequestCtx")
.field("connection_id", &self.connection_id)
.field("request", &self.request)
.field("reply", &"...")
.finish()
}
}

@ -1,94 +1,27 @@
use std::future::Future;
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::Duration;
use tokio::sync::broadcast;
use tokio::task::{JoinError, JoinHandle};
use crate::common::AsAny;
/// Interface to engage with a server instance.
pub trait ServerRef: AsAny + Send {
/// Returns true if the server is no longer running.
fn is_finished(&self) -> bool;
/// Sends a shutdown signal to the server.
fn shutdown(&self);
fn wait(self) -> Pin<Box<dyn Future<Output = io::Result<()>>>>
where
Self: Sized + 'static,
{
Box::pin(async {
let task = tokio::spawn(async move {
while !self.is_finished() {
tokio::time::sleep(Duration::from_millis(100)).await;
}
});
task.await
.map_err(|x| io::Error::new(io::ErrorKind::Other, x))
})
}
}
impl dyn ServerRef {
/// Attempts to convert this ref into a concrete ref by downcasting
pub fn as_server_ref<R: ServerRef>(&self) -> Option<&R> {
self.as_any().downcast_ref::<R>()
}
/// Attempts to convert this mutable ref into a concrete mutable ref by downcasting
pub fn as_mut_server_ref<R: ServerRef>(&mut self) -> Option<&mut R> {
self.as_mut_any().downcast_mut::<R>()
}
/// Attempts to convert this into a concrete, boxed ref by downcasting
pub fn into_boxed_server_ref<R: ServerRef>(
self: Box<Self>,
) -> Result<Box<R>, Box<dyn std::any::Any>> {
self.into_any().downcast::<R>()
}
/// Waits for the server to complete by continuously polling the finished state.
pub async fn polling_wait(&self) -> io::Result<()> {
while !self.is_finished() {
tokio::time::sleep(Duration::from_millis(100)).await;
}
Ok(())
}
}
/// Represents a generic reference to a server
pub struct GenericServerRef {
/// Represents a reference to a server
pub struct ServerRef {
pub(crate) shutdown: broadcast::Sender<()>,
pub(crate) task: JoinHandle<()>,
}
/// Runtime-specific implementation of [`ServerRef`] for a [`tokio::task::JoinHandle`]
impl ServerRef for GenericServerRef {
fn is_finished(&self) -> bool {
impl ServerRef {
pub fn is_finished(&self) -> bool {
self.task.is_finished()
}
fn shutdown(&self) {
pub fn shutdown(&self) {
let _ = self.shutdown.send(());
}
fn wait(self) -> Pin<Box<dyn Future<Output = io::Result<()>>>>
where
Self: Sized + 'static,
{
Box::pin(async {
self.task
.await
.map_err(|x| io::Error::new(io::ErrorKind::Other, x))
})
}
}
impl Future for GenericServerRef {
impl Future for ServerRef {
type Output = Result<(), JoinError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {

@ -1,36 +1,59 @@
use std::future::Future;
use std::net::IpAddr;
use std::ops::{Deref, DerefMut};
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::task::JoinError;
use super::ServerRef;
/// Reference to a TCP server instance
/// Reference to a TCP server instance.
pub struct TcpServerRef {
pub(crate) addr: IpAddr,
pub(crate) port: u16,
pub(crate) inner: Box<dyn ServerRef>,
pub(crate) inner: ServerRef,
}
impl TcpServerRef {
pub fn new(addr: IpAddr, port: u16, inner: Box<dyn ServerRef>) -> Self {
pub fn new(addr: IpAddr, port: u16, inner: ServerRef) -> Self {
Self { addr, port, inner }
}
/// Returns the IP address that the listener is bound to
/// Returns the IP address that the listener is bound to.
pub fn ip_addr(&self) -> IpAddr {
self.addr
}
/// Returns the port that the listener is bound to
/// Returns the port that the listener is bound to.
pub fn port(&self) -> u16 {
self.port
}
/// Consumes ref, returning inner ref.
pub fn into_inner(self) -> ServerRef {
self.inner
}
}
impl ServerRef for TcpServerRef {
fn is_finished(&self) -> bool {
self.inner.is_finished()
impl Future for TcpServerRef {
type Output = Result<(), JoinError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Pin::new(&mut self.inner.task).poll(cx)
}
}
impl Deref for TcpServerRef {
type Target = ServerRef;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
fn shutdown(&self) {
self.inner.shutdown();
impl DerefMut for TcpServerRef {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}

@ -1,35 +1,53 @@
use std::future::Future;
use std::ops::{Deref, DerefMut};
use std::path::{Path, PathBuf};
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::task::JoinError;
use super::ServerRef;
/// Reference to a unix socket server instance
/// Reference to a unix socket server instance.
pub struct UnixSocketServerRef {
pub(crate) path: PathBuf,
pub(crate) inner: Box<dyn ServerRef>,
pub(crate) inner: ServerRef,
}
impl UnixSocketServerRef {
pub fn new(path: PathBuf, inner: Box<dyn ServerRef>) -> Self {
pub fn new(path: PathBuf, inner: ServerRef) -> Self {
Self { path, inner }
}
/// Returns the path to the socket
/// Returns the path to the socket.
pub fn path(&self) -> &Path {
&self.path
}
/// Consumes ref, returning inner ref
pub fn into_inner(self) -> Box<dyn ServerRef> {
/// Consumes ref, returning inner ref.
pub fn into_inner(self) -> ServerRef {
self.inner
}
}
impl ServerRef for UnixSocketServerRef {
fn is_finished(&self) -> bool {
self.inner.is_finished()
impl Future for UnixSocketServerRef {
type Output = Result<(), JoinError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Pin::new(&mut self.inner.task).poll(cx)
}
}
impl Deref for UnixSocketServerRef {
type Target = ServerRef;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
fn shutdown(&self) {
self.inner.shutdown();
impl DerefMut for UnixSocketServerRef {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}

@ -1,35 +1,53 @@
use std::ffi::{OsStr, OsString};
use std::future::Future;
use std::ops::{Deref, DerefMut};
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::task::JoinError;
use super::ServerRef;
/// Reference to a unix socket server instance
/// Reference to a windows pipe server instance.
pub struct WindowsPipeServerRef {
pub(crate) addr: OsString,
pub(crate) inner: Box<dyn ServerRef>,
pub(crate) inner: ServerRef,
}
impl WindowsPipeServerRef {
pub fn new(addr: OsString, inner: Box<dyn ServerRef>) -> Self {
pub fn new(addr: OsString, inner: ServerRef) -> Self {
Self { addr, inner }
}
/// Returns the addr that the listener is bound to
/// Returns the addr that the listener is bound to.
pub fn addr(&self) -> &OsStr {
&self.addr
}
/// Consumes ref, returning inner ref
pub fn into_inner(self) -> Box<dyn ServerRef> {
/// Consumes ref, returning inner ref.
pub fn into_inner(self) -> ServerRef {
self.inner
}
}
impl ServerRef for WindowsPipeServerRef {
fn is_finished(&self) -> bool {
self.inner.is_finished()
impl Future for WindowsPipeServerRef {
type Output = Result<(), JoinError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Pin::new(&mut self.inner.task).poll(cx)
}
}
impl Deref for WindowsPipeServerRef {
type Target = ServerRef;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
fn shutdown(&self) {
self.inner.shutdown();
impl DerefMut for WindowsPipeServerRef {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}

@ -1,9 +1,7 @@
use std::future::Future;
use std::io;
use std::pin::Pin;
use std::sync::Arc;
use std::sync::{Arc, Mutex};
use tokio::sync::{mpsc, Mutex};
use tokio::sync::mpsc;
use crate::common::{Id, Response};
@ -11,29 +9,18 @@ use crate::common::{Id, Response};
pub trait Reply: Send + Sync {
type Data;
/// Sends a reply out from the server
fn send(&self, data: Self::Data) -> Pin<Box<dyn Future<Output = io::Result<()>> + Send + '_>>;
/// Sends a reply out from the server.
fn send(&self, data: Self::Data) -> io::Result<()>;
/// Blocking version of sending a reply out from the server
fn blocking_send(&self, data: Self::Data) -> io::Result<()>;
/// Clones this reply
/// Clones this reply.
fn clone_reply(&self) -> Box<dyn Reply<Data = Self::Data>>;
}
impl<T: Send + 'static> Reply for mpsc::Sender<T> {
impl<T: Send + 'static> Reply for mpsc::UnboundedSender<T> {
type Data = T;
fn send(&self, data: Self::Data) -> Pin<Box<dyn Future<Output = io::Result<()>> + Send + '_>> {
Box::pin(async move {
mpsc::Sender::send(self, data)
.await
.map_err(|x| io::Error::new(io::ErrorKind::Other, x.to_string()))
})
}
fn blocking_send(&self, data: Self::Data) -> io::Result<()> {
mpsc::Sender::blocking_send(self, data)
fn send(&self, data: Self::Data) -> io::Result<()> {
mpsc::UnboundedSender::send(self, data)
.map_err(|x| io::Error::new(io::ErrorKind::Other, x.to_string()))
}
@ -45,7 +32,7 @@ impl<T: Send + 'static> Reply for mpsc::Sender<T> {
/// Utility to send ad-hoc replies from the server back through the connection
pub struct ServerReply<T> {
pub(crate) origin_id: Id,
pub(crate) tx: mpsc::Sender<Response<T>>,
pub(crate) tx: mpsc::UnboundedSender<Response<T>>,
}
impl<T> Clone for ServerReply<T> {
@ -58,16 +45,9 @@ impl<T> Clone for ServerReply<T> {
}
impl<T> ServerReply<T> {
pub async fn send(&self, data: T) -> io::Result<()> {
pub fn send(&self, data: T) -> io::Result<()> {
self.tx
.send(Response::new(self.origin_id.clone(), data))
.await
.map_err(|_| io::Error::new(io::ErrorKind::BrokenPipe, "Connection reply closed"))
}
pub fn blocking_send(&self, data: T) -> io::Result<()> {
self.tx
.blocking_send(Response::new(self.origin_id.clone(), data))
.map_err(|_| io::Error::new(io::ErrorKind::BrokenPipe, "Connection reply closed"))
}
@ -87,12 +67,8 @@ impl<T> ServerReply<T> {
impl<T: Send + 'static> Reply for ServerReply<T> {
type Data = T;
fn send(&self, data: Self::Data) -> Pin<Box<dyn Future<Output = io::Result<()>> + Send + '_>> {
Box::pin(ServerReply::send(self, data))
}
fn blocking_send(&self, data: Self::Data) -> io::Result<()> {
ServerReply::blocking_send(self, data)
fn send(&self, data: Self::Data) -> io::Result<()> {
ServerReply::send(self, data)
}
fn clone_reply(&self) -> Box<dyn Reply<Data = Self::Data>> {
@ -125,38 +101,27 @@ impl<T> QueuedServerReply<T> {
///
/// * If true, all messages are held until the queue is flushed
/// * If false, messages are sent directly as they come in
pub async fn hold(&self, hold: bool) {
*self.hold.lock().await = hold;
}
/// Send this message, adding it to a queue if holding messages
pub async fn send(&self, data: T) -> io::Result<()> {
if *self.hold.lock().await {
self.queue.lock().await.push(data);
Ok(())
} else {
self.inner.send(data).await
}
pub fn hold(&self, hold: bool) {
*self.hold.lock().unwrap() = hold;
}
/// Send this message, adding it to a queue if holding messages, blocking
/// for access to locks and other internals
pub fn blocking_send(&self, data: T) -> io::Result<()> {
if *self.hold.blocking_lock() {
self.queue.blocking_lock().push(data);
/// Send this message, adding it to a queue if holding messages.
pub fn send(&self, data: T) -> io::Result<()> {
if *self.hold.lock().unwrap() {
self.queue.lock().unwrap().push(data);
Ok(())
} else {
self.inner.blocking_send(data)
self.inner.send(data)
}
}
/// Send this message before anything else in the queue
pub async fn send_before(&self, data: T) -> io::Result<()> {
if *self.hold.lock().await {
self.queue.lock().await.insert(0, data);
pub fn send_before(&self, data: T) -> io::Result<()> {
if *self.hold.lock().unwrap() {
self.queue.lock().unwrap().insert(0, data);
Ok(())
} else {
self.inner.send(data).await
self.inner.send(data)
}
}
@ -165,14 +130,14 @@ impl<T> QueuedServerReply<T> {
/// Additionally, takes `hold` to indicate whether or not new msgs
/// after the flush should continue to be held within the queue
/// or if all future msgs will be sent immediately
pub async fn flush(&self, hold: bool) -> io::Result<()> {
pub fn flush(&self, hold: bool) -> io::Result<()> {
// Lock hold so we can ensure that nothing gets sent
// to the queue after we clear it
let mut hold_lock = self.hold.lock().await;
let mut hold_lock = self.hold.lock().unwrap();
// Clear the queue by sending everything
for data in self.queue.lock().await.drain(..) {
self.inner.send(data).await?;
for data in self.queue.lock().unwrap().drain(..) {
self.inner.send(data)?;
}
// Update hold to
@ -189,12 +154,8 @@ impl<T> QueuedServerReply<T> {
impl<T: Send + 'static> Reply for QueuedServerReply<T> {
type Data = T;
fn send(&self, data: Self::Data) -> Pin<Box<dyn Future<Output = io::Result<()>> + Send + '_>> {
Box::pin(QueuedServerReply::send(self, data))
}
fn blocking_send(&self, data: Self::Data) -> io::Result<()> {
QueuedServerReply::blocking_send(self, data)
fn send(&self, data: Self::Data) -> io::Result<()> {
QueuedServerReply::send(self, data)
}
fn clone_reply(&self) -> Box<dyn Reply<Data = Self::Data>> {

@ -31,7 +31,7 @@ impl<T> Default for ServerState<T> {
pub struct ConnectionState<T> {
shutdown_tx: oneshot::Sender<()>,
task: JoinHandle<Option<(mpsc::Sender<T>, mpsc::Receiver<T>)>>,
task: JoinHandle<Option<(mpsc::UnboundedSender<T>, mpsc::UnboundedReceiver<T>)>>,
}
impl<T: Send + 'static> ConnectionState<T> {
@ -40,7 +40,7 @@ impl<T: Send + 'static> ConnectionState<T> {
#[allow(clippy::type_complexity)]
pub fn channel() -> (
oneshot::Receiver<()>,
oneshot::Sender<(mpsc::Sender<T>, mpsc::Receiver<T>)>,
oneshot::Sender<(mpsc::UnboundedSender<T>, mpsc::UnboundedReceiver<T>)>,
Self,
) {
let (shutdown_tx, shutdown_rx) = oneshot::channel();
@ -65,7 +65,9 @@ impl<T: Send + 'static> ConnectionState<T> {
self.task.is_finished()
}
pub async fn shutdown_and_wait(self) -> Option<(mpsc::Sender<T>, mpsc::Receiver<T>)> {
pub async fn shutdown_and_wait(
self,
) -> Option<(mpsc::UnboundedSender<T>, mpsc::UnboundedReceiver<T>)> {
let _ = self.shutdown_tx.send(());
self.task.await.unwrap()
}

@ -6,7 +6,7 @@ use distant_net::boxed_connect_handler;
use distant_net::client::Client;
use distant_net::common::{Destination, InmemoryTransport, Map, OneshotListener};
use distant_net::manager::{Config, ManagerClient, ManagerServer};
use distant_net::server::{Server, ServerCtx, ServerHandler};
use distant_net::server::{RequestCtx, Server, ServerHandler};
use log::*;
use test_log::test;
@ -14,14 +14,12 @@ struct TestServerHandler;
#[async_trait]
impl ServerHandler for TestServerHandler {
type LocalData = ();
type Request = String;
type Response = String;
async fn on_request(&self, ctx: ServerCtx<Self::Request, Self::Response, Self::LocalData>) {
async fn on_request(&self, ctx: RequestCtx<Self::Request, Self::Response>) {
ctx.reply
.send(format!("echo {}", ctx.request.payload))
.await
.expect("Failed to send response")
}
}
@ -37,7 +35,7 @@ async fn should_be_able_to_establish_a_single_connection_and_communicate_with_a_
let (t1, t2) = InmemoryTransport::pair(100);
// Spawn a server on one end and connect to it on the other
let _ = Server::new()
let _server = Server::new()
.handler(TestServerHandler)
.verifier(Verifier::none())
.start(OneshotListener::from_value(t2))?;

@ -2,7 +2,7 @@ use async_trait::async_trait;
use distant_auth::{DummyAuthHandler, Verifier};
use distant_net::client::Client;
use distant_net::common::{InmemoryTransport, OneshotListener};
use distant_net::server::{Server, ServerCtx, ServerHandler};
use distant_net::server::{RequestCtx, Server, ServerHandler};
use log::*;
use test_log::test;
@ -10,17 +10,15 @@ struct TestServerHandler;
#[async_trait]
impl ServerHandler for TestServerHandler {
type LocalData = ();
type Request = (u8, String);
type Response = String;
async fn on_request(&self, ctx: ServerCtx<Self::Request, Self::Response, Self::LocalData>) {
async fn on_request(&self, ctx: RequestCtx<Self::Request, Self::Response>) {
let (cnt, msg) = ctx.request.payload;
for i in 0..cnt {
ctx.reply
.send(format!("echo {i} {msg}"))
.await
.expect("Failed to send response");
}
}
@ -30,7 +28,7 @@ impl ServerHandler for TestServerHandler {
async fn should_be_able_to_send_and_receive_typed_payloads_between_client_and_server() {
let (t1, t2) = InmemoryTransport::pair(100);
let _ = Server::new()
let _server = Server::new()
.handler(TestServerHandler)
.verifier(Verifier::none())
.start(OneshotListener::from_value(t2))

@ -2,7 +2,7 @@ use async_trait::async_trait;
use distant_auth::{DummyAuthHandler, Verifier};
use distant_net::client::Client;
use distant_net::common::{InmemoryTransport, OneshotListener, Request};
use distant_net::server::{Server, ServerCtx, ServerHandler};
use distant_net::server::{RequestCtx, Server, ServerHandler};
use log::*;
use test_log::test;
@ -10,17 +10,15 @@ struct TestServerHandler;
#[async_trait]
impl ServerHandler for TestServerHandler {
type LocalData = ();
type Request = (u8, String);
type Response = String;
async fn on_request(&self, ctx: ServerCtx<Self::Request, Self::Response, Self::LocalData>) {
async fn on_request(&self, ctx: RequestCtx<Self::Request, Self::Response>) {
let (cnt, msg) = ctx.request.payload;
for i in 0..cnt {
ctx.reply
.send(format!("echo {i} {msg}"))
.await
.expect("Failed to send response");
}
}
@ -30,7 +28,7 @@ impl ServerHandler for TestServerHandler {
async fn should_be_able_to_send_and_receive_untyped_payloads_between_client_and_server() {
let (t1, t2) = InmemoryTransport::pair(100);
let _ = Server::new()
let _server = Server::new()
.handler(TestServerHandler)
.verifier(Verifier::none())
.start(OneshotListener::from_value(t2))

@ -3,7 +3,7 @@ name = "distant-protocol"
description = "Protocol library for distant, providing data structures used between the client and server"
categories = ["data-structures"]
keywords = ["protocol"]
version = "0.20.0-alpha.8"
version = "0.20.0"
authors = ["Chip Senkbeil <chip@senkbeil.org>"]
edition = "2021"
homepage = "https://github.com/chipsenkbeil/distant"
@ -17,8 +17,10 @@ tests = []
[dependencies]
bitflags = "2.3.1"
const-str = "0.5.6"
derive_more = { version = "0.99.17", default-features = false, features = ["deref", "deref_mut", "display", "from", "error", "into", "into_iterator", "is_variant"] }
regex = "1.8.3"
semver = { version = "1.0.17", features = ["serde"] }
serde = { version = "1.0.163", features = ["derive"] }
serde_bytes = "0.11.9"
strum = { version = "0.24.1", features = ["derive"] }

@ -1,13 +1,13 @@
# distant protocol
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.68.0][distant_rustc_img]][distant_rustc_lnk]
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.70.0][distant_rustc_img]][distant_rustc_lnk]
[distant_crates_img]: https://img.shields.io/crates/v/distant-protocol.svg
[distant_crates_lnk]: https://crates.io/crates/distant-protocol
[distant_doc_img]: https://docs.rs/distant-protocol/badge.svg
[distant_doc_lnk]: https://docs.rs/distant-protocol
[distant_rustc_img]: https://img.shields.io/badge/distant_protocol-rustc_1.68+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/03/09/Rust-1.68.0.html
[distant_rustc_img]: https://img.shields.io/badge/distant_protocol-rustc_1.70+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/06/01/Rust-1.70.0.html
## Details

@ -1,4 +1,3 @@
mod capabilities;
mod change;
mod cmd;
mod error;
@ -10,7 +9,6 @@ mod search;
mod system;
mod version;
pub use capabilities::*;
pub use change::*;
pub use cmd::*;
pub use error::*;
@ -24,6 +22,3 @@ pub use version::*;
/// Id for a remote process
pub type ProcessId = u32;
/// Version indicated by the tuple of (major, minor, patch).
pub type SemVer = (u8, u8, u8);

@ -1,380 +0,0 @@
use std::cmp::Ordering;
use std::collections::HashSet;
use std::hash::{Hash, Hasher};
use std::ops::{BitAnd, BitOr, BitXor, Deref, DerefMut};
use std::str::FromStr;
use derive_more::{From, Into, IntoIterator};
use serde::{Deserialize, Serialize};
use strum::{EnumMessage, IntoEnumIterator};
/// Represents the kinds of capabilities available.
pub use crate::request::RequestKind as CapabilityKind;
/// Set of supported capabilities for a server
#[derive(Clone, Debug, From, Into, PartialEq, Eq, IntoIterator, Serialize, Deserialize)]
#[serde(transparent)]
pub struct Capabilities(#[into_iterator(owned, ref)] HashSet<Capability>);
impl Capabilities {
/// Return set of capabilities encompassing all possible capabilities
pub fn all() -> Self {
Self(CapabilityKind::iter().map(Capability::from).collect())
}
/// Return empty set of capabilities
pub fn none() -> Self {
Self(HashSet::new())
}
/// Returns true if the capability with described kind is included
pub fn contains(&self, kind: impl AsRef<str>) -> bool {
let cap = Capability {
kind: kind.as_ref().to_string(),
description: String::new(),
};
self.0.contains(&cap)
}
/// Adds the specified capability to the set of capabilities
///
/// * If the set did not have this capability, returns `true`
/// * If the set did have this capability, returns `false`
pub fn insert(&mut self, cap: impl Into<Capability>) -> bool {
self.0.insert(cap.into())
}
/// Removes the capability with the described kind, returning the capability
pub fn take(&mut self, kind: impl AsRef<str>) -> Option<Capability> {
let cap = Capability {
kind: kind.as_ref().to_string(),
description: String::new(),
};
self.0.take(&cap)
}
/// Removes the capability with the described kind, returning true if it existed
pub fn remove(&mut self, kind: impl AsRef<str>) -> bool {
let cap = Capability {
kind: kind.as_ref().to_string(),
description: String::new(),
};
self.0.remove(&cap)
}
/// Converts into vec of capabilities sorted by kind
pub fn into_sorted_vec(self) -> Vec<Capability> {
let mut this = self.0.into_iter().collect::<Vec<_>>();
this.sort_unstable();
this
}
}
impl AsRef<HashSet<Capability>> for Capabilities {
fn as_ref(&self) -> &HashSet<Capability> {
&self.0
}
}
impl AsMut<HashSet<Capability>> for Capabilities {
fn as_mut(&mut self) -> &mut HashSet<Capability> {
&mut self.0
}
}
impl Deref for Capabilities {
type Target = HashSet<Capability>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for Capabilities {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl BitAnd for &Capabilities {
type Output = Capabilities;
fn bitand(self, rhs: Self) -> Self::Output {
Capabilities(self.0.bitand(&rhs.0))
}
}
impl BitOr for &Capabilities {
type Output = Capabilities;
fn bitor(self, rhs: Self) -> Self::Output {
Capabilities(self.0.bitor(&rhs.0))
}
}
impl BitOr<Capability> for &Capabilities {
type Output = Capabilities;
fn bitor(self, rhs: Capability) -> Self::Output {
let mut other = Capabilities::none();
other.0.insert(rhs);
self.bitor(&other)
}
}
impl BitXor for &Capabilities {
type Output = Capabilities;
fn bitxor(self, rhs: Self) -> Self::Output {
Capabilities(self.0.bitxor(&rhs.0))
}
}
impl FromIterator<Capability> for Capabilities {
fn from_iter<I: IntoIterator<Item = Capability>>(iter: I) -> Self {
let mut this = Capabilities::none();
for capability in iter {
this.0.insert(capability);
}
this
}
}
/// Capability tied to a server. A capability is equivalent based on its kind and not description.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
pub struct Capability {
/// Label describing the kind of capability
pub kind: String,
/// Information about the capability
pub description: String,
}
impl Capability {
/// Will convert the [`Capability`]'s `kind` into a known [`CapabilityKind`] if possible,
/// returning None if the capability is unknown
pub fn to_capability_kind(&self) -> Option<CapabilityKind> {
CapabilityKind::from_str(&self.kind).ok()
}
/// Returns true if the described capability is unknown
pub fn is_unknown(&self) -> bool {
self.to_capability_kind().is_none()
}
}
impl PartialEq for Capability {
fn eq(&self, other: &Self) -> bool {
self.kind.eq_ignore_ascii_case(&other.kind)
}
}
impl Eq for Capability {}
impl PartialOrd for Capability {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for Capability {
fn cmp(&self, other: &Self) -> Ordering {
self.kind
.to_ascii_lowercase()
.cmp(&other.kind.to_ascii_lowercase())
}
}
impl Hash for Capability {
fn hash<H: Hasher>(&self, state: &mut H) {
self.kind.to_ascii_lowercase().hash(state);
}
}
impl From<CapabilityKind> for Capability {
/// Creates a new capability using the kind's default message
fn from(kind: CapabilityKind) -> Self {
Self {
kind: kind.to_string(),
description: kind
.get_message()
.map(ToString::to_string)
.unwrap_or_default(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
mod capabilities {
use super::*;
#[test]
fn should_be_able_to_serialize_to_json() {
let capabilities: Capabilities = [Capability {
kind: "some kind".to_string(),
description: "some description".to_string(),
}]
.into_iter()
.collect();
let value = serde_json::to_value(capabilities).unwrap();
assert_eq!(
value,
serde_json::json!([
{
"kind": "some kind",
"description": "some description",
}
])
);
}
#[test]
fn should_be_able_to_deserialize_from_json() {
let value = serde_json::json!([
{
"kind": "some kind",
"description": "some description",
}
]);
let capabilities: Capabilities = serde_json::from_value(value).unwrap();
assert_eq!(
capabilities,
[Capability {
kind: "some kind".to_string(),
description: "some description".to_string(),
}]
.into_iter()
.collect()
);
}
#[test]
fn should_be_able_to_serialize_to_msgpack() {
let capabilities: Capabilities = [Capability {
kind: "some kind".to_string(),
description: "some description".to_string(),
}]
.into_iter()
.collect();
// NOTE: We don't actually check the output here because it's an implementation detail
// and could change as we change how serialization is done. This is merely to verify
// that we can serialize since there are times when serde fails to serialize at
// runtime.
let _ = rmp_serde::encode::to_vec_named(&capabilities).unwrap();
}
#[test]
fn should_be_able_to_deserialize_from_msgpack() {
// NOTE: It may seem odd that we are serializing just to deserialize, but this is to
// verify that we are not corrupting or preventing issues when serializing on a
// client/server and then trying to deserialize on the other side. This has happened
// enough times with minor changes that we need tests to verify.
let buf = rmp_serde::encode::to_vec_named(
&[Capability {
kind: "some kind".to_string(),
description: "some description".to_string(),
}]
.into_iter()
.collect::<Capabilities>(),
)
.unwrap();
let capabilities: Capabilities = rmp_serde::decode::from_slice(&buf).unwrap();
assert_eq!(
capabilities,
[Capability {
kind: "some kind".to_string(),
description: "some description".to_string(),
}]
.into_iter()
.collect()
);
}
}
mod capability {
use super::*;
#[test]
fn should_be_able_to_serialize_to_json() {
let capability = Capability {
kind: "some kind".to_string(),
description: "some description".to_string(),
};
let value = serde_json::to_value(capability).unwrap();
assert_eq!(
value,
serde_json::json!({
"kind": "some kind",
"description": "some description",
})
);
}
#[test]
fn should_be_able_to_deserialize_from_json() {
let value = serde_json::json!({
"kind": "some kind",
"description": "some description",
});
let capability: Capability = serde_json::from_value(value).unwrap();
assert_eq!(
capability,
Capability {
kind: "some kind".to_string(),
description: "some description".to_string(),
}
);
}
#[test]
fn should_be_able_to_serialize_to_msgpack() {
let capability = Capability {
kind: "some kind".to_string(),
description: "some description".to_string(),
};
// NOTE: We don't actually check the output here because it's an implementation detail
// and could change as we change how serialization is done. This is merely to verify
// that we can serialize since there are times when serde fails to serialize at
// runtime.
let _ = rmp_serde::encode::to_vec_named(&capability).unwrap();
}
#[test]
fn should_be_able_to_deserialize_from_msgpack() {
// NOTE: It may seem odd that we are serializing just to deserialize, but this is to
// verify that we are not corrupting or causing issues when serializing on a
// client/server and then trying to deserialize on the other side. This has happened
// enough times with minor changes that we need tests to verify.
let buf = rmp_serde::encode::to_vec_named(&Capability {
kind: "some kind".to_string(),
description: "some description".to_string(),
})
.unwrap();
let capability: Capability = rmp_serde::decode::from_slice(&buf).unwrap();
assert_eq!(
capability,
Capability {
kind: "some kind".to_string(),
description: "some description".to_string(),
}
);
}
}
}

@ -10,35 +10,47 @@ use derive_more::{Deref, DerefMut, IntoIterator};
use serde::{Deserialize, Serialize};
use strum::{EnumString, EnumVariantNames, VariantNames};
/// Change to one or more paths on the filesystem.
/// Change to a path on the filesystem.
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
pub struct Change {
/// Unix timestamp (in seconds) when the server was notified of this change (not when the
/// change occurred)
#[serde(rename = "ts")]
pub timestamp: u64,
/// Label describing the kind of change
pub kind: ChangeKind,
/// Paths that were changed
pub paths: Vec<PathBuf>,
/// Path that was changed
pub path: PathBuf,
/// Additional details associated with the change
#[serde(default, skip_serializing_if = "ChangeDetails::is_empty")]
pub details: ChangeDetails,
}
/// Details about a change
/// Optional details about a change.
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
#[serde(default, rename_all = "snake_case", deny_unknown_fields)]
pub struct ChangeDetails {
/// Clarity on type of attribute changes that have occurred (for kind == attribute)
#[serde(skip_serializing_if = "Vec::is_empty")]
pub attributes: Vec<ChangeDetailsAttributes>,
/// Clarity on type of attribute change that occurred (for kind == attribute).
#[serde(skip_serializing_if = "Option::is_none")]
pub attribute: Option<ChangeDetailsAttribute>,
/// When event is renaming, this will be populated with the resulting name
/// when we know both the old and new names (for kind == rename)
#[serde(skip_serializing_if = "Option::is_none")]
pub renamed: Option<PathBuf>,
/// Unix timestamps (in seconds) related to the change. For other platforms, their timestamps
/// are converted into a Unix timestamp format.
///
/// * For create events, this represents the `ctime` field from stat (or equivalent on other platforms).
/// * For modify events, this represents the `mtime` field from stat (or equivalent on other platforms).
#[serde(skip_serializing_if = "Option::is_none")]
pub timestamp: Option<u64>,
/// Optional information about the change that is typically platform-specific
/// Optional information about the change that is typically platform-specific.
#[serde(skip_serializing_if = "Option::is_none")]
pub extra: Option<String>,
}
@ -46,14 +58,18 @@ pub struct ChangeDetails {
impl ChangeDetails {
/// Returns true if no details are contained within.
pub fn is_empty(&self) -> bool {
self.attributes.is_empty() && self.extra.is_none()
self.attribute.is_none()
&& self.renamed.is_none()
&& self.timestamp.is_none()
&& self.extra.is_none()
}
}
/// Specific details about modification
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
pub enum ChangeDetailsAttributes {
pub enum ChangeDetailsAttribute {
Ownership,
Permissions,
Timestamp,
}

@ -12,22 +12,6 @@ impl Cmd {
pub fn new(cmd: impl Into<String>) -> Self {
Self(cmd.into())
}
/// Returns reference to the program portion of the command
pub fn program(&self) -> &str {
match self.0.split_once(' ') {
Some((program, _)) => program.trim(),
None => self.0.trim(),
}
}
/// Returns reference to the arguments portion of the command
pub fn arguments(&self) -> &str {
match self.0.split_once(' ') {
Some((_, arguments)) => arguments.trim(),
None => "",
}
}
}
impl Deref for Cmd {

@ -4,7 +4,6 @@ use bitflags::bitflags;
use serde::{Deserialize, Serialize};
use crate::common::FileType;
use crate::utils::{deserialize_u128_option, serialize_u128_option};
/// Represents metadata about some path on a remote machine.
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
@ -23,41 +22,20 @@ pub struct Metadata {
/// Whether or not the file/directory/symlink is marked as unwriteable.
pub readonly: bool,
/// Represents the last time (in milliseconds) when the file/directory/symlink was accessed;
/// Represents the last time (in seconds) when the file/directory/symlink was accessed;
/// can be optional as certain systems don't support this.
///
/// Note that this is represented as a string and not a number when serialized!
#[serde(
default,
skip_serializing_if = "Option::is_none",
serialize_with = "serialize_u128_option",
deserialize_with = "deserialize_u128_option"
)]
pub accessed: Option<u128>,
/// Represents when (in milliseconds) the file/directory/symlink was created;
#[serde(default, skip_serializing_if = "Option::is_none")]
pub accessed: Option<u64>,
/// Represents when (in seconds) the file/directory/symlink was created;
/// can be optional as certain systems don't support this.
///
/// Note that this is represented as a string and not a number when serialized!
#[serde(
default,
skip_serializing_if = "Option::is_none",
serialize_with = "serialize_u128_option",
deserialize_with = "deserialize_u128_option"
)]
pub created: Option<u128>,
/// Represents the last time (in milliseconds) when the file/directory/symlink was modified;
#[serde(default, skip_serializing_if = "Option::is_none")]
pub created: Option<u64>,
/// Represents the last time (in seconds) when the file/directory/symlink was modified;
/// can be optional as certain systems don't support this.
///
/// Note that this is represented as a string and not a number when serialized!
#[serde(
default,
skip_serializing_if = "Option::is_none",
serialize_with = "serialize_u128_option",
deserialize_with = "deserialize_u128_option"
)]
pub modified: Option<u128>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub modified: Option<u64>,
/// Represents metadata that is specific to a unix remote machine.
#[serde(default, skip_serializing_if = "Option::is_none")]
@ -369,9 +347,9 @@ mod tests {
file_type: FileType::Dir,
len: 999,
readonly: true,
accessed: Some(u128::MAX),
created: Some(u128::MAX),
modified: Some(u128::MAX),
accessed: Some(u64::MAX),
created: Some(u64::MAX),
modified: Some(u64::MAX),
unix: Some(UnixMetadata {
owner_read: true,
owner_write: false,
@ -402,10 +380,6 @@ mod tests {
}),
};
// NOTE: These values are too big to normally serialize, so we have to convert them to
// a string type, which is why the value here also needs to be a string.
let max_u128_str = u128::MAX.to_string();
let value = serde_json::to_value(metadata).unwrap();
assert_eq!(
value,
@ -414,9 +388,9 @@ mod tests {
"file_type": "dir",
"len": 999,
"readonly": true,
"accessed": max_u128_str,
"created": max_u128_str,
"modified": max_u128_str,
"accessed": u64::MAX,
"created": u64::MAX,
"modified": u64::MAX,
"unix": {
"owner_read": true,
"owner_write": false,
@ -476,18 +450,14 @@ mod tests {
#[test]
fn should_be_able_to_deserialize_full_metadata_from_json() {
// NOTE: These values are too big to normally serialize, so we have to convert them to
// a string type, which is why the value here also needs to be a string.
let max_u128_str = u128::MAX.to_string();
let value = serde_json::json!({
"canonicalized_path": "test-dir",
"file_type": "dir",
"len": 999,
"readonly": true,
"accessed": max_u128_str,
"created": max_u128_str,
"modified": max_u128_str,
"accessed": u64::MAX,
"created": u64::MAX,
"modified": u64::MAX,
"unix": {
"owner_read": true,
"owner_write": false,
@ -526,9 +496,9 @@ mod tests {
file_type: FileType::Dir,
len: 999,
readonly: true,
accessed: Some(u128::MAX),
created: Some(u128::MAX),
modified: Some(u128::MAX),
accessed: Some(u64::MAX),
created: Some(u64::MAX),
modified: Some(u64::MAX),
unix: Some(UnixMetadata {
owner_read: true,
owner_write: false,
@ -589,9 +559,9 @@ mod tests {
file_type: FileType::Dir,
len: 999,
readonly: true,
accessed: Some(u128::MAX),
created: Some(u128::MAX),
modified: Some(u128::MAX),
accessed: Some(u64::MAX),
created: Some(u64::MAX),
modified: Some(u64::MAX),
unix: Some(UnixMetadata {
owner_read: true,
owner_write: false,
@ -676,9 +646,9 @@ mod tests {
file_type: FileType::Dir,
len: 999,
readonly: true,
accessed: Some(u128::MAX),
created: Some(u128::MAX),
modified: Some(u128::MAX),
accessed: Some(u64::MAX),
created: Some(u64::MAX),
modified: Some(u64::MAX),
unix: Some(UnixMetadata {
owner_read: true,
owner_write: false,
@ -718,9 +688,9 @@ mod tests {
file_type: FileType::Dir,
len: 999,
readonly: true,
accessed: Some(u128::MAX),
created: Some(u128::MAX),
modified: Some(u128::MAX),
accessed: Some(u64::MAX),
created: Some(u64::MAX),
modified: Some(u64::MAX),
unix: Some(UnixMetadata {
owner_read: true,
owner_write: false,

@ -296,7 +296,7 @@ impl Permissions {
/// Converts a Unix `mode` into the permission set.
pub fn from_unix_mode(mode: u32) -> Self {
let flags = UnixFilePermissionFlags::from_bits_truncate(mode);
let flags = UnixFilePermissionFlags::from_bits_truncate(mode & 0o777);
Self {
owner_read: Some(flags.contains(UnixFilePermissionFlags::OWNER_READ)),
owner_write: Some(flags.contains(UnixFilePermissionFlags::OWNER_WRITE)),
@ -426,15 +426,15 @@ impl From<Permissions> for std::fs::Permissions {
bitflags! {
struct UnixFilePermissionFlags: u32 {
const OWNER_READ = 0o400;
const OWNER_WRITE = 0o200;
const OWNER_EXEC = 0o100;
const GROUP_READ = 0o40;
const GROUP_WRITE = 0o20;
const GROUP_EXEC = 0o10;
const OTHER_READ = 0o4;
const OTHER_WRITE = 0o2;
const OTHER_EXEC = 0o1;
const OWNER_READ = 0o400;
const OWNER_WRITE = 0o200;
const OWNER_EXEC = 0o100;
const GROUP_READ = 0o040;
const GROUP_WRITE = 0o020;
const GROUP_EXEC = 0o010;
const OTHER_READ = 0o004;
const OTHER_WRITE = 0o002;
const OTHER_EXEC = 0o001;
}
}
@ -442,6 +442,364 @@ bitflags! {
mod tests {
use super::*;
#[test]
fn should_properly_parse_unix_mode_into_permissions() {
let permissions = Permissions::from_unix_mode(0o400);
assert_eq!(
permissions,
Permissions {
owner_read: Some(true),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(false),
}
);
let permissions = Permissions::from_unix_mode(0o200);
assert_eq!(
permissions,
Permissions {
owner_read: Some(false),
owner_write: Some(true),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(false),
}
);
let permissions = Permissions::from_unix_mode(0o100);
assert_eq!(
permissions,
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(true),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(false),
}
);
let permissions = Permissions::from_unix_mode(0o040);
assert_eq!(
permissions,
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(true),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(false),
}
);
let permissions = Permissions::from_unix_mode(0o020);
assert_eq!(
permissions,
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(true),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(false),
}
);
let permissions = Permissions::from_unix_mode(0o010);
assert_eq!(
permissions,
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(true),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(false),
}
);
let permissions = Permissions::from_unix_mode(0o004);
assert_eq!(
permissions,
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(true),
other_write: Some(false),
other_exec: Some(false),
}
);
let permissions = Permissions::from_unix_mode(0o002);
assert_eq!(
permissions,
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(true),
other_exec: Some(false),
}
);
let permissions = Permissions::from_unix_mode(0o001);
assert_eq!(
permissions,
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(true),
}
);
let permissions = Permissions::from_unix_mode(0o000);
assert_eq!(
permissions,
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(false),
}
);
let permissions = Permissions::from_unix_mode(0o777);
assert_eq!(
permissions,
Permissions {
owner_read: Some(true),
owner_write: Some(true),
owner_exec: Some(true),
group_read: Some(true),
group_write: Some(true),
group_exec: Some(true),
other_read: Some(true),
other_write: Some(true),
other_exec: Some(true),
}
);
}
#[test]
fn should_properly_convert_into_unix_mode() {
assert_eq!(
Permissions {
owner_read: Some(true),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(false),
}
.to_unix_mode(),
0o400
);
assert_eq!(
Permissions {
owner_read: Some(false),
owner_write: Some(true),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(false),
}
.to_unix_mode(),
0o200
);
assert_eq!(
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(true),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(false),
}
.to_unix_mode(),
0o100
);
assert_eq!(
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(true),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(false),
}
.to_unix_mode(),
0o040
);
assert_eq!(
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(true),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(false),
}
.to_unix_mode(),
0o020
);
assert_eq!(
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(true),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(false),
}
.to_unix_mode(),
0o010
);
assert_eq!(
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(true),
other_write: Some(false),
other_exec: Some(false),
}
.to_unix_mode(),
0o004
);
assert_eq!(
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(true),
other_exec: Some(false),
}
.to_unix_mode(),
0o002
);
assert_eq!(
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(true),
}
.to_unix_mode(),
0o001
);
assert_eq!(
Permissions {
owner_read: Some(false),
owner_write: Some(false),
owner_exec: Some(false),
group_read: Some(false),
group_write: Some(false),
group_exec: Some(false),
other_read: Some(false),
other_write: Some(false),
other_exec: Some(false),
}
.to_unix_mode(),
0o000
);
assert_eq!(
Permissions {
owner_read: Some(true),
owner_write: Some(true),
owner_exec: Some(true),
group_read: Some(true),
group_write: Some(true),
group_exec: Some(true),
other_read: Some(true),
other_write: Some(true),
other_exec: Some(true),
}
.to_unix_mode(),
0o777
);
}
#[test]
fn should_be_able_to_serialize_minimal_permissions_to_json() {
let permissions = Permissions {

@ -230,6 +230,35 @@ pub struct SearchQueryOptions {
/// include the remaining results even if less than pagination request.
#[serde(skip_serializing_if = "Option::is_none")]
pub pagination: Option<u64>,
/// If true, will skip searching hidden files.
#[serde(skip_serializing_if = "utils::is_false")]
pub ignore_hidden: bool,
/// If true, will read `.ignore` files that are used by `ripgrep` and `The Silver Searcher`
/// to determine which files and directories to not search.
#[serde(skip_serializing_if = "utils::is_false")]
pub use_ignore_files: bool,
/// If true, will read `.ignore` files from parent directories that are used by `ripgrep` and
/// `The Silver Searcher` to determine which files and directories to not search.
#[serde(skip_serializing_if = "utils::is_false")]
pub use_parent_ignore_files: bool,
/// If true, will read `.gitignore` files to determine which files and directories to not
/// search.
#[serde(skip_serializing_if = "utils::is_false")]
pub use_git_ignore_files: bool,
/// If true, will read global `.gitignore` files to determine which files and directories to
/// not search.
#[serde(skip_serializing_if = "utils::is_false")]
pub use_global_git_ignore_files: bool,
/// If true, will read `.git/info/exclude` files to determine which files and directories to
/// not search.
#[serde(skip_serializing_if = "utils::is_false")]
pub use_git_exclude_files: bool,
}
/// Represents a match for a search query
@ -929,6 +958,12 @@ mod tests {
limit: None,
max_depth: None,
pagination: None,
ignore_hidden: false,
use_ignore_files: false,
use_parent_ignore_files: false,
use_git_ignore_files: false,
use_global_git_ignore_files: false,
use_git_exclude_files: false,
};
let value = serde_json::to_value(options).unwrap();
@ -950,6 +985,12 @@ mod tests {
limit: Some(u64::MAX),
max_depth: Some(u64::MAX),
pagination: Some(u64::MAX),
ignore_hidden: true,
use_ignore_files: true,
use_parent_ignore_files: true,
use_git_ignore_files: true,
use_global_git_ignore_files: true,
use_git_exclude_files: true,
};
let value = serde_json::to_value(options).unwrap();
@ -970,6 +1011,12 @@ mod tests {
"limit": u64::MAX,
"max_depth": u64::MAX,
"pagination": u64::MAX,
"ignore_hidden": true,
"use_ignore_files": true,
"use_parent_ignore_files": true,
"use_git_ignore_files": true,
"use_global_git_ignore_files": true,
"use_git_exclude_files": true,
})
);
}
@ -990,6 +1037,12 @@ mod tests {
limit: None,
max_depth: None,
pagination: None,
ignore_hidden: false,
use_ignore_files: false,
use_parent_ignore_files: false,
use_git_ignore_files: false,
use_global_git_ignore_files: false,
use_git_exclude_files: false,
}
);
}
@ -1011,6 +1064,12 @@ mod tests {
"limit": u64::MAX,
"max_depth": u64::MAX,
"pagination": u64::MAX,
"ignore_hidden": true,
"use_ignore_files": true,
"use_parent_ignore_files": true,
"use_git_ignore_files": true,
"use_global_git_ignore_files": true,
"use_git_exclude_files": true,
});
let options: SearchQueryOptions = serde_json::from_value(value).unwrap();
@ -1029,6 +1088,12 @@ mod tests {
limit: Some(u64::MAX),
max_depth: Some(u64::MAX),
pagination: Some(u64::MAX),
ignore_hidden: true,
use_ignore_files: true,
use_parent_ignore_files: true,
use_git_ignore_files: true,
use_global_git_ignore_files: true,
use_git_exclude_files: true,
}
);
}
@ -1044,6 +1109,12 @@ mod tests {
limit: None,
max_depth: None,
pagination: None,
ignore_hidden: false,
use_ignore_files: false,
use_parent_ignore_files: false,
use_git_ignore_files: false,
use_global_git_ignore_files: false,
use_git_exclude_files: false,
};
// NOTE: We don't actually check the output here because it's an implementation detail
@ -1068,6 +1139,12 @@ mod tests {
limit: Some(u64::MAX),
max_depth: Some(u64::MAX),
pagination: Some(u64::MAX),
ignore_hidden: true,
use_ignore_files: true,
use_parent_ignore_files: true,
use_git_ignore_files: true,
use_global_git_ignore_files: true,
use_git_exclude_files: true,
};
// NOTE: We don't actually check the output here because it's an implementation detail
@ -1092,6 +1169,12 @@ mod tests {
limit: None,
max_depth: None,
pagination: None,
ignore_hidden: false,
use_ignore_files: false,
use_parent_ignore_files: false,
use_git_ignore_files: false,
use_global_git_ignore_files: false,
use_git_exclude_files: false,
})
.unwrap();
@ -1107,6 +1190,12 @@ mod tests {
limit: None,
max_depth: None,
pagination: None,
ignore_hidden: false,
use_ignore_files: false,
use_parent_ignore_files: false,
use_git_ignore_files: false,
use_global_git_ignore_files: false,
use_git_exclude_files: false,
}
);
}
@ -1130,6 +1219,12 @@ mod tests {
limit: Some(u64::MAX),
max_depth: Some(u64::MAX),
pagination: Some(u64::MAX),
ignore_hidden: true,
use_ignore_files: true,
use_parent_ignore_files: true,
use_git_ignore_files: true,
use_global_git_ignore_files: true,
use_git_exclude_files: true,
})
.unwrap();
@ -1149,6 +1244,12 @@ mod tests {
limit: Some(u64::MAX),
max_depth: Some(u64::MAX),
pagination: Some(u64::MAX),
ignore_hidden: true,
use_ignore_files: true,
use_parent_ignore_files: true,
use_git_ignore_files: true,
use_global_git_ignore_files: true,
use_git_exclude_files: true,
}
);
}

@ -1,48 +1,80 @@
use serde::{Deserialize, Serialize};
use crate::common::{Capabilities, SemVer};
use crate::semver;
/// Represents version information.
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct Version {
/// General version of server (arbitrary format)
pub server_version: String,
/// Server version.
pub server_version: semver::Version,
/// Protocol version
pub protocol_version: SemVer,
/// Protocol version.
pub protocol_version: semver::Version,
/// Capabilities of the server
pub capabilities: Capabilities,
/// Additional features available.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub capabilities: Vec<String>,
}
impl Version {
/// Supports executing processes.
pub const CAP_EXEC: &'static str = "exec";
/// Supports reading and writing via filesystem IO.
pub const CAP_FS_IO: &'static str = "fs_io";
/// Supports modifying permissions of filesystem.
pub const CAP_FS_PERM: &'static str = "fs_perm";
/// Supports searching filesystem.
pub const CAP_FS_SEARCH: &'static str = "fs_search";
/// Supports watching filesystem for changes.
pub const CAP_FS_WATCH: &'static str = "fs_watch";
/// Supports TCP tunneling.
// pub const CAP_TCP_TUNNEL: &'static str = "tcp_tunnel";
/// Supports TCP reverse tunneling.
// pub const CAP_TCP_REV_TUNNEL: &'static str = "tcp_rev_tunnel";
/// Supports retrieving system information.
pub const CAP_SYS_INFO: &'static str = "sys_info";
pub const fn capabilities() -> &'static [&'static str] {
&[
Self::CAP_EXEC,
Self::CAP_FS_IO,
Self::CAP_FS_PERM,
Self::CAP_FS_SEARCH,
Self::CAP_FS_WATCH,
/* Self::CAP_TCP_TUNNEL,
Self::CAP_TCP_REV_TUNNEL, */
Self::CAP_SYS_INFO,
]
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::common::Capability;
use semver::Version as SemVer;
#[test]
fn should_be_able_to_serialize_to_json() {
let version = Version {
server_version: String::from("some version"),
protocol_version: (1, 2, 3),
capabilities: [Capability {
kind: String::from("some kind"),
description: String::from("some description"),
}]
.into_iter()
.collect(),
server_version: "123.456.789-rc+build".parse().unwrap(),
protocol_version: SemVer::new(1, 2, 3),
capabilities: vec![String::from("cap")],
};
let value = serde_json::to_value(version).unwrap();
assert_eq!(
value,
serde_json::json!({
"server_version": "some version",
"protocol_version": [1, 2, 3],
"capabilities": [{
"kind": "some kind",
"description": "some description",
}]
"server_version": "123.456.789-rc+build",
"protocol_version": "1.2.3",
"capabilities": ["cap"]
})
);
}
@ -50,26 +82,18 @@ mod tests {
#[test]
fn should_be_able_to_deserialize_from_json() {
let value = serde_json::json!({
"server_version": "some version",
"protocol_version": [1, 2, 3],
"capabilities": [{
"kind": "some kind",
"description": "some description",
}]
"server_version": "123.456.789-rc+build",
"protocol_version": "1.2.3",
"capabilities": ["cap"]
});
let version: Version = serde_json::from_value(value).unwrap();
assert_eq!(
version,
Version {
server_version: String::from("some version"),
protocol_version: (1, 2, 3),
capabilities: [Capability {
kind: String::from("some kind"),
description: String::from("some description"),
}]
.into_iter()
.collect(),
server_version: "123.456.789-rc+build".parse().unwrap(),
protocol_version: SemVer::new(1, 2, 3),
capabilities: vec![String::from("cap")],
}
);
}
@ -77,14 +101,9 @@ mod tests {
#[test]
fn should_be_able_to_serialize_to_msgpack() {
let version = Version {
server_version: String::from("some version"),
protocol_version: (1, 2, 3),
capabilities: [Capability {
kind: String::from("some kind"),
description: String::from("some description"),
}]
.into_iter()
.collect(),
server_version: "123.456.789-rc+build".parse().unwrap(),
protocol_version: SemVer::new(1, 2, 3),
capabilities: vec![String::from("cap")],
};
// NOTE: We don't actually check the output here because it's an implementation detail
@ -101,14 +120,9 @@ mod tests {
// client/server and then trying to deserialize on the other side. This has happened
// enough times with minor changes that we need tests to verify.
let buf = rmp_serde::encode::to_vec_named(&Version {
server_version: String::from("some version"),
protocol_version: (1, 2, 3),
capabilities: [Capability {
kind: String::from("some kind"),
description: String::from("some description"),
}]
.into_iter()
.collect(),
server_version: "123.456.789-rc+build".parse().unwrap(),
protocol_version: SemVer::new(1, 2, 3),
capabilities: vec![String::from("cap")],
})
.unwrap();
@ -116,14 +130,9 @@ mod tests {
assert_eq!(
version,
Version {
server_version: String::from("some version"),
protocol_version: (1, 2, 3),
capabilities: [Capability {
kind: String::from("some kind"),
description: String::from("some description"),
}]
.into_iter()
.collect(),
server_version: "123.456.789-rc+build".parse().unwrap(),
protocol_version: SemVer::new(1, 2, 3),
capabilities: vec![String::from("cap")],
}
);
}

@ -14,10 +14,95 @@ pub use common::*;
pub use msg::*;
pub use request::*;
pub use response::*;
pub use semver;
/// Protocol version indicated by the tuple of (major, minor, patch).
/// Protocol version of major/minor/patch.
///
/// This is different from the crate version, which matches that of the complete suite of distant
/// crates. Rather, this verison is used to provide stability indicators when the protocol itself
/// changes across crate versions.
pub const PROTOCOL_VERSION: SemVer = (0, 1, 0);
/// This should match the version of this crate such that any significant change to the crate
/// version will also be reflected in this constant that can be used to verify compatibility across
/// the wire.
pub const PROTOCOL_VERSION: semver::Version = semver::Version::new(
const_str::parse!(env!("CARGO_PKG_VERSION_MAJOR"), u64),
const_str::parse!(env!("CARGO_PKG_VERSION_MINOR"), u64),
const_str::parse!(env!("CARGO_PKG_VERSION_PATCH"), u64),
);
/// Comparators used to indicate the [lower, upper) bounds of supported protocol versions.
const PROTOCOL_VERSION_COMPAT: (semver::Comparator, semver::Comparator) = (
semver::Comparator {
op: semver::Op::GreaterEq,
major: const_str::parse!(env!("CARGO_PKG_VERSION_MAJOR"), u64),
minor: Some(const_str::parse!(env!("CARGO_PKG_VERSION_MINOR"), u64)),
patch: Some(const_str::parse!(env!("CARGO_PKG_VERSION_PATCH"), u64)),
pre: semver::Prerelease::EMPTY,
},
semver::Comparator {
op: semver::Op::Less,
major: {
let major = const_str::parse!(env!("CARGO_PKG_VERSION_MAJOR"), u64);
// If we have a version like 0.20, then the upper bound is 0.21,
// otherwise if we have a version like 1.2, then the upper bound is 2.0
//
// So only increment the major if it is greater than 0
if major > 0 {
major + 1
} else {
major
}
},
minor: {
let major = const_str::parse!(env!("CARGO_PKG_VERSION_MAJOR"), u64);
let minor = const_str::parse!(env!("CARGO_PKG_VERSION_MINOR"), u64);
// If we have a version like 0.20, then the upper bound is 0.21,
// otherwise if we have a version like 1.2, then the upper bound is 2.0
//
// So only increment the minor if major is 0
if major > 0 {
None
} else {
Some(minor + 1)
}
},
patch: None,
pre: semver::Prerelease::EMPTY,
},
);
/// Returns true if the provided version is compatible with the protocol version.
///
/// ```
/// use distant_protocol::{is_compatible_with, PROTOCOL_VERSION};
/// use distant_protocol::semver::Version;
///
/// // The current protocol version tied to this crate is always compatible
/// assert!(is_compatible_with(&PROTOCOL_VERSION));
///
/// // Major bumps in distant's protocol version are always considered incompatible
/// assert!(!is_compatible_with(&Version::new(
/// PROTOCOL_VERSION.major + 1,
/// PROTOCOL_VERSION.minor,
/// PROTOCOL_VERSION.patch,
/// )));
///
/// // While distant's protocol is being stabilized, minor version bumps
/// // are also considered incompatible!
/// assert!(!is_compatible_with(&Version::new(
/// PROTOCOL_VERSION.major,
/// PROTOCOL_VERSION.minor + 1,
/// PROTOCOL_VERSION.patch,
/// )));
///
/// // Patch bumps in distant's protocol are always considered compatible
/// assert!(is_compatible_with(&Version::new(
/// PROTOCOL_VERSION.major,
/// PROTOCOL_VERSION.minor,
/// PROTOCOL_VERSION.patch + 1,
/// )));
/// ```
pub fn is_compatible_with(version: &semver::Version) -> bool {
let (lower, upper) = PROTOCOL_VERSION_COMPAT;
lower.matches(version) && upper.matches(version)
}

@ -3,7 +3,6 @@ use std::path::PathBuf;
use derive_more::IsVariant;
use serde::{Deserialize, Serialize};
use strum::{AsRefStr, EnumDiscriminants, EnumIter, EnumMessage, EnumString};
use crate::common::{
ChangeKind, Cmd, Permissions, ProcessId, PtySize, SearchId, SearchQuery, SetPermissionsOptions,
@ -14,26 +13,10 @@ use crate::utils;
pub type Environment = HashMap<String, String>;
/// Represents the payload of a request to be performed on the remote machine
#[derive(Clone, Debug, PartialEq, Eq, EnumDiscriminants, IsVariant, Serialize, Deserialize)]
#[strum_discriminants(derive(
AsRefStr,
strum::Display,
EnumIter,
EnumMessage,
EnumString,
Hash,
PartialOrd,
Ord,
IsVariant,
Serialize,
Deserialize
))]
#[strum_discriminants(name(RequestKind))]
#[strum_discriminants(strum(serialize_all = "snake_case"))]
#[derive(Clone, Debug, PartialEq, Eq, IsVariant, Serialize, Deserialize)]
#[serde(rename_all = "snake_case", deny_unknown_fields, tag = "type")]
pub enum Request {
/// Reads a file from the specified path on the remote machine
#[strum_discriminants(strum(message = "Supports reading binary file"))]
FileRead {
/// The path to the file on the remote machine
path: PathBuf,
@ -41,7 +24,6 @@ pub enum Request {
/// Reads a file from the specified path on the remote machine
/// and treats the contents as text
#[strum_discriminants(strum(message = "Supports reading text file"))]
FileReadText {
/// The path to the file on the remote machine
path: PathBuf,
@ -49,7 +31,6 @@ pub enum Request {
/// Writes a file, creating it if it does not exist, and overwriting any existing content
/// on the remote machine
#[strum_discriminants(strum(message = "Supports writing binary file"))]
FileWrite {
/// The path to the file on the remote machine
path: PathBuf,
@ -61,7 +42,6 @@ pub enum Request {
/// Writes a file using text instead of bytes, creating it if it does not exist,
/// and overwriting any existing content on the remote machine
#[strum_discriminants(strum(message = "Supports writing text file"))]
FileWriteText {
/// The path to the file on the remote machine
path: PathBuf,
@ -71,7 +51,6 @@ pub enum Request {
},
/// Appends to a file, creating it if it does not exist, on the remote machine
#[strum_discriminants(strum(message = "Supports appending to binary file"))]
FileAppend {
/// The path to the file on the remote machine
path: PathBuf,
@ -82,7 +61,6 @@ pub enum Request {
},
/// Appends text to a file, creating it if it does not exist, on the remote machine
#[strum_discriminants(strum(message = "Supports appending to text file"))]
FileAppendText {
/// The path to the file on the remote machine
path: PathBuf,
@ -92,7 +70,6 @@ pub enum Request {
},
/// Reads a directory from the specified path on the remote machine
#[strum_discriminants(strum(message = "Supports reading directory"))]
DirRead {
/// The path to the directory on the remote machine
path: PathBuf,
@ -126,7 +103,6 @@ pub enum Request {
},
/// Creates a directory on the remote machine
#[strum_discriminants(strum(message = "Supports creating directory"))]
DirCreate {
/// The path to the directory on the remote machine
path: PathBuf,
@ -137,7 +113,6 @@ pub enum Request {
},
/// Removes a file or directory on the remote machine
#[strum_discriminants(strum(message = "Supports removing files, directories, and symlinks"))]
Remove {
/// The path to the file or directory on the remote machine
path: PathBuf,
@ -149,7 +124,6 @@ pub enum Request {
},
/// Copies a file or directory on the remote machine
#[strum_discriminants(strum(message = "Supports copying files, directories, and symlinks"))]
Copy {
/// The path to the file or directory on the remote machine
src: PathBuf,
@ -159,7 +133,6 @@ pub enum Request {
},
/// Moves/renames a file or directory on the remote machine
#[strum_discriminants(strum(message = "Supports renaming files, directories, and symlinks"))]
Rename {
/// The path to the file or directory on the remote machine
src: PathBuf,
@ -169,7 +142,6 @@ pub enum Request {
},
/// Watches a path for changes
#[strum_discriminants(strum(message = "Supports watching filesystem for changes"))]
Watch {
/// The path to the file, directory, or symlink on the remote machine
path: PathBuf,
@ -189,23 +161,18 @@ pub enum Request {
},
/// Unwatches a path for changes, meaning no additional changes will be reported
#[strum_discriminants(strum(message = "Supports unwatching filesystem for changes"))]
Unwatch {
/// The path to the file, directory, or symlink on the remote machine
path: PathBuf,
},
/// Checks whether the given path exists
#[strum_discriminants(strum(message = "Supports checking if a path exists"))]
Exists {
/// The path to the file or directory on the remote machine
path: PathBuf,
},
/// Retrieves filesystem metadata for the specified path on the remote machine
#[strum_discriminants(strum(
message = "Supports retrieving metadata about a file, directory, or symlink"
))]
Metadata {
/// The path to the file, directory, or symlink on the remote machine
path: PathBuf,
@ -222,9 +189,6 @@ pub enum Request {
},
/// Sets permissions on a file, directory, or symlink on the remote machine
#[strum_discriminants(strum(
message = "Supports setting permissions on a file, directory, or symlink"
))]
SetPermissions {
/// The path to the file, directory, or symlink on the remote machine
path: PathBuf,
@ -238,23 +202,18 @@ pub enum Request {
},
/// Searches filesystem using the provided query
#[strum_discriminants(strum(message = "Supports searching filesystem using queries"))]
Search {
/// Query to perform against the filesystem
query: SearchQuery,
},
/// Cancels an active search being run against the filesystem
#[strum_discriminants(strum(
message = "Supports canceling an active search against the filesystem"
))]
CancelSearch {
/// Id of the search to cancel
id: SearchId,
},
/// Spawns a new process on the remote machine
#[strum_discriminants(strum(message = "Supports spawning a process"))]
ProcSpawn {
/// The full command to run including arguments
cmd: Cmd,
@ -273,14 +232,12 @@ pub enum Request {
},
/// Kills a process running on the remote machine
#[strum_discriminants(strum(message = "Supports killing a spawned process"))]
ProcKill {
/// Id of the actively-running process
id: ProcessId,
},
/// Sends additional data to stdin of running process
#[strum_discriminants(strum(message = "Supports sending stdin to a spawned process"))]
ProcStdin {
/// Id of the actively-running process to send stdin data
id: ProcessId,
@ -291,7 +248,6 @@ pub enum Request {
},
/// Resize pty of remote process
#[strum_discriminants(strum(message = "Supports resizing the pty of a spawned process"))]
ProcResizePty {
/// Id of the actively-running process whose pty to resize
id: ProcessId,
@ -301,11 +257,9 @@ pub enum Request {
},
/// Retrieve information about the server and the system it is on
#[strum_discriminants(strum(message = "Supports retrieving system information"))]
SystemInfo {},
/// Retrieve information about the server's protocol version
#[strum_discriminants(strum(message = "Supports retrieving version"))]
Version {},
}
@ -2114,6 +2068,12 @@ mod tests {
limit: Some(u64::MAX),
max_depth: Some(u64::MAX),
pagination: Some(u64::MAX),
ignore_hidden: true,
use_ignore_files: true,
use_parent_ignore_files: true,
use_git_ignore_files: true,
use_global_git_ignore_files: true,
use_git_exclude_files: true,
},
},
};
@ -2145,6 +2105,12 @@ mod tests {
"limit": u64::MAX,
"max_depth": u64::MAX,
"pagination": u64::MAX,
"ignore_hidden": true,
"use_ignore_files": true,
"use_parent_ignore_files": true,
"use_git_ignore_files": true,
"use_global_git_ignore_files": true,
"use_git_exclude_files": true,
},
},
})
@ -2205,6 +2171,12 @@ mod tests {
"limit": u64::MAX,
"max_depth": u64::MAX,
"pagination": u64::MAX,
"ignore_hidden": true,
"use_ignore_files": true,
"use_parent_ignore_files": true,
"use_git_ignore_files": true,
"use_global_git_ignore_files": true,
"use_git_exclude_files": true,
},
},
});
@ -2230,6 +2202,12 @@ mod tests {
limit: Some(u64::MAX),
max_depth: Some(u64::MAX),
pagination: Some(u64::MAX),
ignore_hidden: true,
use_ignore_files: true,
use_parent_ignore_files: true,
use_git_ignore_files: true,
use_global_git_ignore_files: true,
use_git_exclude_files: true,
},
},
}
@ -2274,6 +2252,12 @@ mod tests {
limit: Some(u64::MAX),
max_depth: Some(u64::MAX),
pagination: Some(u64::MAX),
ignore_hidden: true,
use_ignore_files: true,
use_parent_ignore_files: true,
use_git_ignore_files: true,
use_global_git_ignore_files: true,
use_git_exclude_files: true,
},
},
};
@ -2339,6 +2323,12 @@ mod tests {
limit: Some(u64::MAX),
max_depth: Some(u64::MAX),
pagination: Some(u64::MAX),
ignore_hidden: true,
use_ignore_files: true,
use_parent_ignore_files: true,
use_git_ignore_files: true,
use_global_git_ignore_files: true,
use_git_exclude_files: true,
},
},
})
@ -2365,6 +2355,12 @@ mod tests {
limit: Some(u64::MAX),
max_depth: Some(u64::MAX),
pagination: Some(u64::MAX),
ignore_hidden: true,
use_ignore_files: true,
use_parent_ignore_files: true,
use_git_ignore_files: true,
use_global_git_ignore_files: true,
use_git_exclude_files: true,
},
},
}

@ -615,14 +615,14 @@ mod tests {
use std::path::PathBuf;
use super::*;
use crate::common::{ChangeDetails, ChangeDetailsAttributes, ChangeKind};
use crate::common::{ChangeDetails, ChangeDetailsAttribute, ChangeKind};
#[test]
fn should_be_able_to_serialize_minimal_payload_to_json() {
let payload = Response::Changed(Change {
timestamp: u64::MAX,
kind: ChangeKind::Access,
paths: vec![PathBuf::from("path")],
path: PathBuf::from("path"),
details: ChangeDetails::default(),
});
@ -631,9 +631,9 @@ mod tests {
value,
serde_json::json!({
"type": "changed",
"ts": u64::MAX,
"timestamp": u64::MAX,
"kind": "access",
"paths": ["path"],
"path": "path",
})
);
}
@ -643,9 +643,11 @@ mod tests {
let payload = Response::Changed(Change {
timestamp: u64::MAX,
kind: ChangeKind::Access,
paths: vec![PathBuf::from("path")],
path: PathBuf::from("path"),
details: ChangeDetails {
attributes: vec![ChangeDetailsAttributes::Permissions],
attribute: Some(ChangeDetailsAttribute::Permissions),
renamed: Some(PathBuf::from("renamed")),
timestamp: Some(u64::MAX),
extra: Some(String::from("info")),
},
});
@ -655,11 +657,13 @@ mod tests {
value,
serde_json::json!({
"type": "changed",
"ts": u64::MAX,
"timestamp": u64::MAX,
"kind": "access",
"paths": ["path"],
"path": "path",
"details": {
"attributes": ["permissions"],
"attribute": "permissions",
"renamed": "renamed",
"timestamp": u64::MAX,
"extra": "info",
},
})
@ -670,9 +674,9 @@ mod tests {
fn should_be_able_to_deserialize_minimal_payload_from_json() {
let value = serde_json::json!({
"type": "changed",
"ts": u64::MAX,
"timestamp": u64::MAX,
"kind": "access",
"paths": ["path"],
"path": "path",
});
let payload: Response = serde_json::from_value(value).unwrap();
@ -681,7 +685,7 @@ mod tests {
Response::Changed(Change {
timestamp: u64::MAX,
kind: ChangeKind::Access,
paths: vec![PathBuf::from("path")],
path: PathBuf::from("path"),
details: ChangeDetails::default(),
})
);
@ -691,11 +695,13 @@ mod tests {
fn should_be_able_to_deserialize_full_payload_from_json() {
let value = serde_json::json!({
"type": "changed",
"ts": u64::MAX,
"timestamp": u64::MAX,
"kind": "access",
"paths": ["path"],
"path": "path",
"details": {
"attributes": ["permissions"],
"attribute": "permissions",
"renamed": "renamed",
"timestamp": u64::MAX,
"extra": "info",
},
});
@ -706,9 +712,11 @@ mod tests {
Response::Changed(Change {
timestamp: u64::MAX,
kind: ChangeKind::Access,
paths: vec![PathBuf::from("path")],
path: PathBuf::from("path"),
details: ChangeDetails {
attributes: vec![ChangeDetailsAttributes::Permissions],
attribute: Some(ChangeDetailsAttribute::Permissions),
renamed: Some(PathBuf::from("renamed")),
timestamp: Some(u64::MAX),
extra: Some(String::from("info")),
},
})
@ -720,7 +728,7 @@ mod tests {
let payload = Response::Changed(Change {
timestamp: u64::MAX,
kind: ChangeKind::Access,
paths: vec![PathBuf::from("path")],
path: PathBuf::from("path"),
details: ChangeDetails::default(),
});
@ -736,9 +744,11 @@ mod tests {
let payload = Response::Changed(Change {
timestamp: u64::MAX,
kind: ChangeKind::Access,
paths: vec![PathBuf::from("path")],
path: PathBuf::from("path"),
details: ChangeDetails {
attributes: vec![ChangeDetailsAttributes::Permissions],
attribute: Some(ChangeDetailsAttribute::Permissions),
renamed: Some(PathBuf::from("renamed")),
timestamp: Some(u64::MAX),
extra: Some(String::from("info")),
},
});
@ -759,7 +769,7 @@ mod tests {
let buf = rmp_serde::encode::to_vec_named(&Response::Changed(Change {
timestamp: u64::MAX,
kind: ChangeKind::Access,
paths: vec![PathBuf::from("path")],
path: PathBuf::from("path"),
details: ChangeDetails::default(),
}))
.unwrap();
@ -770,7 +780,7 @@ mod tests {
Response::Changed(Change {
timestamp: u64::MAX,
kind: ChangeKind::Access,
paths: vec![PathBuf::from("path")],
path: PathBuf::from("path"),
details: ChangeDetails::default(),
})
);
@ -785,9 +795,11 @@ mod tests {
let buf = rmp_serde::encode::to_vec_named(&Response::Changed(Change {
timestamp: u64::MAX,
kind: ChangeKind::Access,
paths: vec![PathBuf::from("path")],
path: PathBuf::from("path"),
details: ChangeDetails {
attributes: vec![ChangeDetailsAttributes::Permissions],
attribute: Some(ChangeDetailsAttribute::Permissions),
renamed: Some(PathBuf::from("renamed")),
timestamp: Some(u64::MAX),
extra: Some(String::from("info")),
},
}))
@ -799,9 +811,11 @@ mod tests {
Response::Changed(Change {
timestamp: u64::MAX,
kind: ChangeKind::Access,
paths: vec![PathBuf::from("path")],
path: PathBuf::from("path"),
details: ChangeDetails {
attributes: vec![ChangeDetailsAttributes::Permissions],
attribute: Some(ChangeDetailsAttribute::Permissions),
renamed: Some(PathBuf::from("renamed")),
timestamp: Some(u64::MAX),
extra: Some(String::from("info")),
},
})
@ -900,9 +914,9 @@ mod tests {
file_type: FileType::File,
len: u64::MAX,
readonly: true,
accessed: Some(u128::MAX),
created: Some(u128::MAX),
modified: Some(u128::MAX),
accessed: Some(u64::MAX),
created: Some(u64::MAX),
modified: Some(u64::MAX),
unix: Some(UnixMetadata {
owner_read: true,
owner_write: false,
@ -933,10 +947,6 @@ mod tests {
}),
});
// NOTE: These values are too big to normally serialize, so we have to convert them to
// a string type, which is why the value here also needs to be a string.
let u128_max_str = u128::MAX.to_string();
let value = serde_json::to_value(payload).unwrap();
assert_eq!(
value,
@ -946,9 +956,9 @@ mod tests {
"file_type": "file",
"len": u64::MAX,
"readonly": true,
"accessed": u128_max_str,
"created": u128_max_str,
"modified": u128_max_str,
"accessed": u64::MAX,
"created": u64::MAX,
"modified": u64::MAX,
"unix": {
"owner_read": true,
"owner_write": false,
@ -1009,16 +1019,15 @@ mod tests {
#[test]
fn should_be_able_to_deserialize_full_payload_from_json() {
let u128_max_str = u128::MAX.to_string();
let value = serde_json::json!({
"type": "metadata",
"canonicalized_path": "path",
"file_type": "file",
"len": u64::MAX,
"readonly": true,
"accessed": u128_max_str,
"created": u128_max_str,
"modified": u128_max_str,
"accessed": u64::MAX,
"created": u64::MAX,
"modified": u64::MAX,
"unix": {
"owner_read": true,
"owner_write": false,
@ -1057,9 +1066,9 @@ mod tests {
file_type: FileType::File,
len: u64::MAX,
readonly: true,
accessed: Some(u128::MAX),
created: Some(u128::MAX),
modified: Some(u128::MAX),
accessed: Some(u64::MAX),
created: Some(u64::MAX),
modified: Some(u64::MAX),
unix: Some(UnixMetadata {
owner_read: true,
owner_write: false,
@ -1120,9 +1129,9 @@ mod tests {
file_type: FileType::File,
len: u64::MAX,
readonly: true,
accessed: Some(u128::MAX),
created: Some(u128::MAX),
modified: Some(u128::MAX),
accessed: Some(u64::MAX),
created: Some(u64::MAX),
modified: Some(u64::MAX),
unix: Some(UnixMetadata {
owner_read: true,
owner_write: false,
@ -1207,9 +1216,9 @@ mod tests {
file_type: FileType::File,
len: u64::MAX,
readonly: true,
accessed: Some(u128::MAX),
created: Some(u128::MAX),
modified: Some(u128::MAX),
accessed: Some(u64::MAX),
created: Some(u64::MAX),
modified: Some(u64::MAX),
unix: Some(UnixMetadata {
owner_read: true,
owner_write: false,
@ -1249,9 +1258,9 @@ mod tests {
file_type: FileType::File,
len: u64::MAX,
readonly: true,
accessed: Some(u128::MAX),
created: Some(u128::MAX),
modified: Some(u128::MAX),
accessed: Some(u64::MAX),
created: Some(u64::MAX),
modified: Some(u64::MAX),
unix: Some(UnixMetadata {
owner_read: true,
owner_write: false,
@ -2004,19 +2013,14 @@ mod tests {
mod version {
use super::*;
use crate::common::{Capabilities, Capability};
use crate::semver::Version as SemVer;
#[test]
fn should_be_able_to_serialize_to_json() {
let payload = Response::Version(Version {
server_version: String::from("some version"),
protocol_version: (1, 2, 3),
capabilities: [Capability {
kind: String::from("some kind"),
description: String::from("some description"),
}]
.into_iter()
.collect(),
server_version: "123.456.789-rc+build".parse().unwrap(),
protocol_version: SemVer::new(1, 2, 3),
capabilities: vec![String::from("cap")],
});
let value = serde_json::to_value(payload).unwrap();
@ -2024,12 +2028,9 @@ mod tests {
value,
serde_json::json!({
"type": "version",
"server_version": "some version",
"protocol_version": [1, 2, 3],
"capabilities": [{
"kind": "some kind",
"description": "some description",
}],
"server_version": "123.456.789-rc+build",
"protocol_version": "1.2.3",
"capabilities": ["cap"],
})
);
}
@ -2038,18 +2039,18 @@ mod tests {
fn should_be_able_to_deserialize_from_json() {
let value = serde_json::json!({
"type": "version",
"server_version": "some version",
"protocol_version": [1, 2, 3],
"capabilities": Capabilities::all(),
"server_version": "123.456.789-rc+build",
"protocol_version": "1.2.3",
"capabilities": ["cap"],
});
let payload: Response = serde_json::from_value(value).unwrap();
assert_eq!(
payload,
Response::Version(Version {
server_version: String::from("some version"),
protocol_version: (1, 2, 3),
capabilities: Capabilities::all(),
server_version: "123.456.789-rc+build".parse().unwrap(),
protocol_version: SemVer::new(1, 2, 3),
capabilities: vec![String::from("cap")],
})
);
}
@ -2057,9 +2058,9 @@ mod tests {
#[test]
fn should_be_able_to_serialize_to_msgpack() {
let payload = Response::Version(Version {
server_version: String::from("some version"),
protocol_version: (1, 2, 3),
capabilities: Capabilities::all(),
server_version: "123.456.789-rc+build".parse().unwrap(),
protocol_version: SemVer::new(1, 2, 3),
capabilities: vec![String::from("cap")],
});
// NOTE: We don't actually check the errput here because it's an implementation detail
@ -2076,9 +2077,9 @@ mod tests {
// client/server and then trying to deserialize on the other side. This has happened
// enough times with minor changes that we need tests to verify.
let buf = rmp_serde::encode::to_vec_named(&Response::Version(Version {
server_version: String::from("some version"),
protocol_version: (1, 2, 3),
capabilities: Capabilities::all(),
server_version: "123.456.789-rc+build".parse().unwrap(),
protocol_version: SemVer::new(1, 2, 3),
capabilities: vec![String::from("cap")],
}))
.unwrap();
@ -2086,9 +2087,9 @@ mod tests {
assert_eq!(
payload,
Response::Version(Version {
server_version: String::from("some version"),
protocol_version: (1, 2, 3),
capabilities: Capabilities::all(),
server_version: "123.456.789-rc+build".parse().unwrap(),
protocol_version: SemVer::new(1, 2, 3),
capabilities: vec![String::from("cap")],
})
);
}

@ -1,5 +1,3 @@
use serde::{Deserialize, Serialize};
/// Used purely for skipping serialization of values that are false by default.
#[inline]
pub const fn is_false(value: &bool) -> bool {
@ -17,28 +15,3 @@ pub const fn is_one(value: &usize) -> bool {
pub const fn one() -> usize {
1
}
pub fn deserialize_u128_option<'de, D>(deserializer: D) -> Result<Option<u128>, D::Error>
where
D: serde::Deserializer<'de>,
{
match Option::<String>::deserialize(deserializer)? {
Some(s) => match s.parse::<u128>() {
Ok(value) => Ok(Some(value)),
Err(error) => Err(serde::de::Error::custom(format!(
"Cannot convert to u128 with error: {error:?}"
))),
},
None => Ok(None),
}
}
pub fn serialize_u128_option<S: serde::Serializer>(
val: &Option<u128>,
s: S,
) -> Result<S::Ok, S::Error> {
match val {
Some(v) => format!("{}", *v).serialize(s),
None => s.serialize_unit(),
}
}

@ -2,7 +2,7 @@
name = "distant-ssh2"
description = "Library to enable native ssh-2 protocol for use with distant sessions"
categories = ["network-programming"]
version = "0.20.0-alpha.8"
version = "0.20.0"
authors = ["Chip Senkbeil <chip@senkbeil.org>"]
edition = "2021"
homepage = "https://github.com/chipsenkbeil/distant"
@ -20,7 +20,7 @@ async-compat = "0.2.1"
async-once-cell = "0.5.2"
async-trait = "0.1.68"
derive_more = { version = "0.99.17", default-features = false, features = ["display", "error"] }
distant-core = { version = "=0.20.0-alpha.8", path = "../distant-core" }
distant-core = { version = "=0.20.0", path = "../distant-core" }
futures = "0.3.28"
hex = "0.4.3"
log = "0.4.18"

@ -1,13 +1,13 @@
# distant ssh2
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.68.0][distant_rustc_img]][distant_rustc_lnk]
[![Crates.io][distant_crates_img]][distant_crates_lnk] [![Docs.rs][distant_doc_img]][distant_doc_lnk] [![Rustc 1.70.0][distant_rustc_img]][distant_rustc_lnk]
[distant_crates_img]: https://img.shields.io/crates/v/distant-ssh2.svg
[distant_crates_lnk]: https://crates.io/crates/distant-ssh2
[distant_doc_img]: https://docs.rs/distant-ssh2/badge.svg
[distant_doc_lnk]: https://docs.rs/distant-ssh2
[distant_rustc_img]: https://img.shields.io/badge/distant_ssh2-rustc_1.68+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/03/09/Rust-1.68.0.html
[distant_rustc_img]: https://img.shields.io/badge/distant_ssh2-rustc_1.70+-lightgray.svg
[distant_rustc_lnk]: https://blog.rust-lang.org/2023/06/01/Rust-1.70.0.html
Library provides native ssh integration into the
[`distant`](https://github.com/chipsenkbeil/distant) binary.

@ -1,4 +1,4 @@
use std::collections::{HashMap, HashSet, VecDeque};
use std::collections::{HashMap, VecDeque};
use std::io;
use std::path::PathBuf;
use std::sync::{Arc, Weak};
@ -7,10 +7,10 @@ use std::time::Duration;
use async_compat::CompatExt;
use async_once_cell::OnceCell;
use async_trait::async_trait;
use distant_core::net::server::ConnectionCtx;
use distant_core::protocol::semver;
use distant_core::protocol::{
Capabilities, CapabilityKind, DirEntry, Environment, FileType, Metadata, Permissions,
ProcessId, PtySize, SetPermissionsOptions, SystemInfo, UnixMetadata, Version, PROTOCOL_VERSION,
DirEntry, Environment, FileType, Metadata, Permissions, ProcessId, PtySize,
SetPermissionsOptions, SystemInfo, UnixMetadata, Version, PROTOCOL_VERSION,
};
use distant_core::{DistantApi, DistantCtx};
use log::*;
@ -25,16 +25,6 @@ use crate::utils::{self, to_other_error};
/// Time after copy completes to wait for stdout/stderr to close
const COPY_COMPLETE_TIMEOUT: Duration = Duration::from_secs(1);
#[derive(Default)]
pub struct ConnectionState {
/// List of process ids that will be killed when the connection terminates
processes: Arc<RwLock<HashSet<ProcessId>>>,
/// Internal reference to global process list for removals
/// NOTE: Initialized during `on_accept` of [`DistantApi`]
global_processes: Weak<RwLock<HashMap<ProcessId, Process>>>,
}
struct Process {
stdin_tx: mpsc::Sender<Vec<u8>>,
kill_tx: mpsc::Sender<()>,
@ -72,18 +62,7 @@ impl SshDistantApi {
#[async_trait]
impl DistantApi for SshDistantApi {
type LocalData = ConnectionState;
async fn on_accept(&self, ctx: ConnectionCtx<'_, Self::LocalData>) -> io::Result<()> {
ctx.local_data.global_processes = Arc::downgrade(&self.processes);
Ok(())
}
async fn read_file(
&self,
ctx: DistantCtx<Self::LocalData>,
path: PathBuf,
) -> io::Result<Vec<u8>> {
async fn read_file(&self, ctx: DistantCtx, path: PathBuf) -> io::Result<Vec<u8>> {
debug!(
"[Conn {}] Reading bytes from file {:?}",
ctx.connection_id, path
@ -103,11 +82,7 @@ impl DistantApi for SshDistantApi {
Ok(contents.into_bytes())
}
async fn read_file_text(
&self,
ctx: DistantCtx<Self::LocalData>,
path: PathBuf,
) -> io::Result<String> {
async fn read_file_text(&self, ctx: DistantCtx, path: PathBuf) -> io::Result<String> {
debug!(
"[Conn {}] Reading text from file {:?}",
ctx.connection_id, path
@ -127,12 +102,7 @@ impl DistantApi for SshDistantApi {
Ok(contents)
}
async fn write_file(
&self,
ctx: DistantCtx<Self::LocalData>,
path: PathBuf,
data: Vec<u8>,
) -> io::Result<()> {
async fn write_file(&self, ctx: DistantCtx, path: PathBuf, data: Vec<u8>) -> io::Result<()> {
debug!(
"[Conn {}] Writing bytes to file {:?}",
ctx.connection_id, path
@ -154,7 +124,7 @@ impl DistantApi for SshDistantApi {
async fn write_file_text(
&self,
ctx: DistantCtx<Self::LocalData>,
ctx: DistantCtx,
path: PathBuf,
data: String,
) -> io::Result<()> {
@ -177,12 +147,7 @@ impl DistantApi for SshDistantApi {
Ok(())
}
async fn append_file(
&self,
ctx: DistantCtx<Self::LocalData>,
path: PathBuf,
data: Vec<u8>,
) -> io::Result<()> {
async fn append_file(&self, ctx: DistantCtx, path: PathBuf, data: Vec<u8>) -> io::Result<()> {
debug!(
"[Conn {}] Appending bytes to file {:?}",
ctx.connection_id, path
@ -213,7 +178,7 @@ impl DistantApi for SshDistantApi {
async fn append_file_text(
&self,
ctx: DistantCtx<Self::LocalData>,
ctx: DistantCtx,
path: PathBuf,
data: String,
) -> io::Result<()> {
@ -247,7 +212,7 @@ impl DistantApi for SshDistantApi {
async fn read_dir(
&self,
ctx: DistantCtx<Self::LocalData>,
ctx: DistantCtx,
path: PathBuf,
depth: usize,
absolute: bool,
@ -375,12 +340,7 @@ impl DistantApi for SshDistantApi {
Ok((entries, errors))
}
async fn create_dir(
&self,
ctx: DistantCtx<Self::LocalData>,
path: PathBuf,
all: bool,
) -> io::Result<()> {
async fn create_dir(&self, ctx: DistantCtx, path: PathBuf, all: bool) -> io::Result<()> {
debug!(
"[Conn {}] Creating directory {:?} {{all: {}}}",
ctx.connection_id, path, all
@ -436,12 +396,7 @@ impl DistantApi for SshDistantApi {
Ok(())
}
async fn remove(
&self,
ctx: DistantCtx<Self::LocalData>,
path: PathBuf,
force: bool,
) -> io::Result<()> {
async fn remove(&self, ctx: DistantCtx, path: PathBuf, force: bool) -> io::Result<()> {
debug!(
"[Conn {}] Removing {:?} {{force: {}}}",
ctx.connection_id, path, force
@ -526,12 +481,7 @@ impl DistantApi for SshDistantApi {
Ok(())
}
async fn copy(
&self,
ctx: DistantCtx<Self::LocalData>,
src: PathBuf,
dst: PathBuf,
) -> io::Result<()> {
async fn copy(&self, ctx: DistantCtx, src: PathBuf, dst: PathBuf) -> io::Result<()> {
debug!(
"[Conn {}] Copying {:?} to {:?}",
ctx.connection_id, src, dst
@ -573,12 +523,7 @@ impl DistantApi for SshDistantApi {
}
}
async fn rename(
&self,
ctx: DistantCtx<Self::LocalData>,
src: PathBuf,
dst: PathBuf,
) -> io::Result<()> {
async fn rename(&self, ctx: DistantCtx, src: PathBuf, dst: PathBuf) -> io::Result<()> {
debug!(
"[Conn {}] Renaming {:?} to {:?}",
ctx.connection_id, src, dst
@ -594,7 +539,7 @@ impl DistantApi for SshDistantApi {
Ok(())
}
async fn exists(&self, ctx: DistantCtx<Self::LocalData>, path: PathBuf) -> io::Result<bool> {
async fn exists(&self, ctx: DistantCtx, path: PathBuf) -> io::Result<bool> {
debug!("[Conn {}] Checking if {:?} exists", ctx.connection_id, path);
// NOTE: SFTP does not provide a means to check if a path exists that can be performed
@ -612,7 +557,7 @@ impl DistantApi for SshDistantApi {
async fn metadata(
&self,
ctx: DistantCtx<Self::LocalData>,
ctx: DistantCtx,
path: PathBuf,
canonicalize: bool,
resolve_file_type: bool,
@ -655,8 +600,8 @@ impl DistantApi for SshDistantApi {
.permissions
.map(|x| !x.owner_write && !x.group_write && !x.other_write)
.unwrap_or(true),
accessed: metadata.accessed.map(u128::from),
modified: metadata.modified.map(u128::from),
accessed: metadata.accessed,
modified: metadata.modified,
created: None,
unix: metadata.permissions.as_ref().map(|p| UnixMetadata {
owner_read: p.owner_read,
@ -676,7 +621,7 @@ impl DistantApi for SshDistantApi {
#[allow(unreachable_code)]
async fn set_permissions(
&self,
ctx: DistantCtx<Self::LocalData>,
ctx: DistantCtx,
path: PathBuf,
permissions: Permissions,
options: SetPermissionsOptions,
@ -805,7 +750,7 @@ impl DistantApi for SshDistantApi {
async fn proc_spawn(
&self,
ctx: DistantCtx<Self::LocalData>,
ctx: DistantCtx,
cmd: String,
environment: Environment,
current_dir: Option<PathBuf>,
@ -817,14 +762,10 @@ impl DistantApi for SshDistantApi {
);
let global_processes = Arc::downgrade(&self.processes);
let local_processes = Arc::downgrade(&ctx.local_data.processes);
let cleanup = |id: ProcessId| async move {
if let Some(processes) = Weak::upgrade(&global_processes) {
processes.write().await.remove(&id);
}
if let Some(processes) = Weak::upgrade(&local_processes) {
processes.write().await.remove(&id);
}
};
let SpawnResult {
@ -874,7 +815,7 @@ impl DistantApi for SshDistantApi {
Ok(id)
}
async fn proc_kill(&self, ctx: DistantCtx<Self::LocalData>, id: ProcessId) -> io::Result<()> {
async fn proc_kill(&self, ctx: DistantCtx, id: ProcessId) -> io::Result<()> {
debug!("[Conn {}] Killing process {}", ctx.connection_id, id);
if let Some(process) = self.processes.read().await.get(&id) {
@ -892,12 +833,7 @@ impl DistantApi for SshDistantApi {
))
}
async fn proc_stdin(
&self,
ctx: DistantCtx<Self::LocalData>,
id: ProcessId,
data: Vec<u8>,
) -> io::Result<()> {
async fn proc_stdin(&self, ctx: DistantCtx, id: ProcessId, data: Vec<u8>) -> io::Result<()> {
debug!(
"[Conn {}] Sending stdin to process {}",
ctx.connection_id, id
@ -920,7 +856,7 @@ impl DistantApi for SshDistantApi {
async fn proc_resize_pty(
&self,
ctx: DistantCtx<Self::LocalData>,
ctx: DistantCtx,
id: ProcessId,
size: PtySize,
) -> io::Result<()> {
@ -944,7 +880,7 @@ impl DistantApi for SshDistantApi {
))
}
async fn system_info(&self, ctx: DistantCtx<Self::LocalData>) -> io::Result<SystemInfo> {
async fn system_info(&self, ctx: DistantCtx) -> io::Result<SystemInfo> {
// We cache each of these requested values since they should not change for the
// lifetime of the ssh connection
static CURRENT_DIR: OnceCell<PathBuf> = OnceCell::new();
@ -998,21 +934,36 @@ impl DistantApi for SshDistantApi {
})
}
async fn version(&self, ctx: DistantCtx<Self::LocalData>) -> io::Result<Version> {
async fn version(&self, ctx: DistantCtx) -> io::Result<Version> {
debug!("[Conn {}] Querying capabilities", ctx.connection_id);
let mut capabilities = Capabilities::all();
// Searching is not supported by ssh implementation
// TODO: Could we have external search using ripgrep's JSON lines API?
capabilities.take(CapabilityKind::Search);
capabilities.take(CapabilityKind::CancelSearch);
// Broken via wezterm-ssh, so not supported right now
capabilities.take(CapabilityKind::SetPermissions);
let capabilities = vec![
Version::CAP_EXEC.to_string(),
Version::CAP_FS_IO.to_string(),
Version::CAP_SYS_INFO.to_string(),
];
// Parse our server's version
let mut server_version: semver::Version = env!("CARGO_PKG_VERSION")
.parse()
.map_err(|x| io::Error::new(io::ErrorKind::Other, x))?;
// Add the package name to the version information
if server_version.build.is_empty() {
server_version.build = semver::BuildMetadata::new(env!("CARGO_PKG_NAME"))
.map_err(|x| io::Error::new(io::ErrorKind::Other, x))?;
} else {
let raw_build_str = format!(
"{}.{}",
server_version.build.as_str(),
env!("CARGO_PKG_NAME")
);
server_version.build = semver::BuildMetadata::new(&raw_build_str)
.map_err(|x| io::Error::new(io::ErrorKind::Other, x))?;
}
Ok(Version {
server_version: format!("{} {}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION")),
server_version,
protocol_version: PROTOCOL_VERSION,
capabilities,
})

@ -16,15 +16,16 @@ use std::str::FromStr;
use std::time::Duration;
use async_compat::CompatExt;
use async_once_cell::OnceCell;
use async_trait::async_trait;
use distant_core::net::auth::{AuthHandlerMap, DummyAuthHandler, Verifier};
use distant_core::net::client::{Client, ClientConfig};
use distant_core::net::common::{Host, InmemoryTransport, OneshotListener};
use distant_core::net::common::{Host, InmemoryTransport, OneshotListener, Version};
use distant_core::net::server::{Server, ServerRef};
use distant_core::protocol::PROTOCOL_VERSION;
use distant_core::{DistantApiServerHandler, DistantClient, DistantSingleKeyCredentials};
use log::*;
use smol::channel::Receiver as SmolReceiver;
use tokio::sync::Mutex;
use wezterm_ssh::{
ChildKiller, Config as WezConfig, MasterPty, PtySize, Session as WezSession,
SessionEvent as WezSessionEvent,
@ -325,17 +326,20 @@ impl SshAuthHandler for LocalSshAuthHandler {
}
}
/// Represents an ssh2 client
/// Represents an ssh2 client.
pub struct Ssh {
session: WezSession,
events: SmolReceiver<WezSessionEvent>,
host: String,
port: u16,
authenticated: bool,
/// Cached copy of the family representing the remote machine.
cached_family: Mutex<Option<SshFamily>>,
}
impl Ssh {
/// Connect to a remote TCP server using SSH
/// Connect to a remote TCP server using SSH.
pub fn connect(host: impl AsRef<str>, opts: SshOpts) -> io::Result<Self> {
debug!(
"Establishing ssh connection to {} using {:?}",
@ -416,15 +420,16 @@ impl Ssh {
host: host.as_ref().to_string(),
port,
authenticated: false,
cached_family: Mutex::new(None),
})
}
/// Host this client is connected to
/// Host this client is connected to.
pub fn host(&self) -> &str {
&self.host
}
/// Port this client is connected to on remote host
/// Port this client is connected to on remote host.
pub fn port(&self) -> u16 {
self.port
}
@ -434,7 +439,7 @@ impl Ssh {
self.authenticated
}
/// Authenticates the [`Ssh`] if not already authenticated
/// Authenticates the [`Ssh`] if not already authenticated.
pub async fn authenticate(&mut self, handler: impl SshAuthHandler) -> io::Result<()> {
// If already authenticated, exit
if self.authenticated {
@ -499,10 +504,10 @@ impl Ssh {
Ok(())
}
/// Detects the family of operating system on the remote machine
/// Detects the family of operating system on the remote machine.
///
/// Caches the result such that subsequent checks will return the same family.
pub async fn detect_family(&self) -> io::Result<SshFamily> {
static INSTANCE: OnceCell<SshFamily> = OnceCell::new();
// Exit early if not authenticated as this is a requirement
if !self.authenticated {
return Err(io::Error::new(
@ -511,18 +516,23 @@ impl Ssh {
));
}
INSTANCE
.get_or_try_init(async move {
let is_windows = utils::is_windows(&self.session).await?;
let mut family = self.cached_family.lock().await;
Ok(if is_windows {
SshFamily::Windows
} else {
SshFamily::Unix
})
})
.await
.copied()
// Family value is not present, so we retrieve it now and populate our cache
if family.is_none() {
// Check if we are windows, otherwise assume unix, returning an error if encountered,
// which will also drop our lock on the cache
let is_windows = utils::is_windows(&self.session).await?;
*family = Some(if is_windows {
SshFamily::Windows
} else {
SshFamily::Unix
});
}
// Cache should always be Some(...) by this point
Ok(family.unwrap())
}
/// Consume [`Ssh`] and produce a [`DistantClient`] that is connected to a remote
@ -579,6 +589,11 @@ impl Ssh {
match Client::tcp(addr)
.auth_handler(AuthHandlerMap::new().with_static_key(key.clone()))
.connect_timeout(timeout)
.version(Version::new(
PROTOCOL_VERSION.major,
PROTOCOL_VERSION.minor,
PROTOCOL_VERSION.patch,
))
.connect()
.await
{
@ -722,7 +737,7 @@ impl Ssh {
}
/// Consumes [`Ssh`] and produces a [`DistantClient`] and [`ServerRef`] pair.
pub async fn into_distant_pair(self) -> io::Result<(DistantClient, Box<dyn ServerRef>)> {
pub async fn into_distant_pair(self) -> io::Result<(DistantClient, ServerRef)> {
// Exit early if not authenticated as this is a requirement
if !self.authenticated {
return Err(io::Error::new(

@ -216,7 +216,7 @@ fn spawn_blocking_stdout_task(
id,
data: buf[..n].to_vec(),
};
if reply.blocking_send(payload).is_err() {
if reply.send(payload).is_err() {
error!("[Ssh | Proc {}] Stdout channel closed", id);
break;
}
@ -247,7 +247,7 @@ fn spawn_nonblocking_stdout_task(
id,
data: buf[..n].to_vec(),
};
if reply.send(payload).await.is_err() {
if reply.send(payload).is_err() {
error!("[Ssh | Proc {}] Stdout channel closed", id);
break;
}
@ -281,7 +281,7 @@ fn spawn_nonblocking_stderr_task(
id,
data: buf[..n].to_vec(),
};
if reply.send(payload).await.is_err() {
if reply.send(payload).is_err() {
error!("[Ssh | Proc {}] Stderr channel closed", id);
break;
}
@ -423,7 +423,7 @@ where
code: if success { Some(0) } else { None },
};
if reply.send(payload).await.is_err() {
if reply.send(payload).is_err() {
error!("[Ssh | Proc {}] Failed to send done", id,);
}
})

@ -1,6 +1,6 @@
use std::ffi::OsString;
use crate::options::DistantSubcommand;
use crate::options::{DistantSubcommand, OptionsError};
use crate::{CliResult, Options};
mod commands;
@ -13,17 +13,17 @@ pub(crate) use common::{Cache, Client, Manager};
/// Represents the primary CLI entrypoint
#[derive(Debug)]
pub struct Cli {
options: Options,
pub options: Options,
}
impl Cli {
/// Creates a new CLI instance by parsing command-line arguments
pub fn initialize() -> anyhow::Result<Self> {
pub fn initialize() -> Result<Self, OptionsError> {
Self::initialize_from(std::env::args_os())
}
/// Creates a new CLI instance by parsing providing arguments
pub fn initialize_from<I, T>(args: I) -> anyhow::Result<Self>
pub fn initialize_from<I, T>(args: I) -> Result<Self, OptionsError>
where
I: IntoIterator<Item = T>,
T: Into<OsString> + Clone,

@ -1,14 +1,16 @@
use std::collections::HashMap;
use std::io;
use std::io::Write;
use std::path::Path;
use std::path::{Path, PathBuf};
use std::time::Duration;
use anyhow::Context;
use distant_core::net::common::{ConnectionId, Host, Map, Request, Response};
use distant_core::net::manager::ManagerClient;
use distant_core::protocol::semver;
use distant_core::protocol::{
self, Capabilities, ChangeKindSet, FileType, Permissions, SearchQuery, SetPermissionsOptions,
SystemInfo,
self, ChangeKind, ChangeKindSet, FileType, Permissions, SearchQuery, SearchQueryContentsMatch,
SearchQueryMatch, SearchQueryPathMatch, SetPermissionsOptions, SystemInfo, Version,
};
use distant_core::{DistantChannel, DistantChannelExt, RemoteCommand, Searcher, Watcher};
use log::*;
@ -23,7 +25,10 @@ use crate::cli::common::{
Cache, Client, JsonAuthHandler, MsgReceiver, MsgSender, PromptAuthHandler,
};
use crate::constants::MAX_PIPE_CHUNK_SIZE;
use crate::options::{ClientFileSystemSubcommand, ClientSubcommand, Format, NetworkSettings};
use crate::options::{
ClientFileSystemSubcommand, ClientSubcommand, Format, NetworkSettings, ParseShellError,
Shell as ShellOption,
};
use crate::{CliError, CliResult};
mod lsp;
@ -32,7 +37,7 @@ mod shell;
use lsp::Lsp;
use shell::Shell;
use super::common::{Formatter, RemoteProcessLink};
use super::common::RemoteProcessLink;
const SLEEP_DURATION: Duration = Duration::from_millis(1);
@ -362,10 +367,12 @@ async fn async_run(cmd: ClientSubcommand) -> CliResult {
cache,
connection,
cmd,
cmd_str,
current_dir,
environment,
lsp,
pty,
shell,
network,
} => {
debug!("Connecting to manager");
@ -380,20 +387,55 @@ async fn async_run(cmd: ClientSubcommand) -> CliResult {
use_or_lookup_connection_id(&mut cache, connection, &mut client).await?;
debug!("Opening channel to connection {}", connection_id);
let channel = client
let mut channel: DistantChannel = client
.open_raw_channel(connection_id)
.await
.with_context(|| format!("Failed to open channel to connection {connection_id}"))?;
.with_context(|| format!("Failed to open channel to connection {connection_id}"))?
.into_client()
.into_channel();
// Convert cmd into string
let cmd = cmd.join(" ");
let cmd = cmd_str.unwrap_or_else(|| cmd.join(" "));
// Check if we should attempt to run the command in a shell
let cmd = match shell {
None => cmd,
// Use default shell, which we need to figure out
Some(None) => {
let system_info = channel
.system_info()
.await
.context("Failed to detect remote operating system")?;
// If system reports a default shell, use it, otherwise pick a default based on the
// operating system being windows or non-windows
let shell: ShellOption = if !system_info.shell.is_empty() {
system_info.shell.parse()
} else if system_info.family.eq_ignore_ascii_case("windows") {
"cmd.exe".parse()
} else {
"/bin/sh".parse()
}
.map_err(|x: ParseShellError| anyhow::anyhow!(x))?;
shell
.make_cmd_string(&cmd)
.map_err(|x| anyhow::anyhow!(x))?
}
// Use explicit shell
Some(Some(shell)) => shell
.make_cmd_string(&cmd)
.map_err(|x| anyhow::anyhow!(x))?,
};
if let Some(scheme) = lsp {
debug!(
"Spawning LSP server (pty = {}, cwd = {:?}): {}",
pty, current_dir, cmd
);
Lsp::new(channel.into_client().into_channel())
Lsp::new(channel)
.spawn(cmd, current_dir, scheme, pty, MAX_PIPE_CHUNK_SIZE)
.await?;
} else if pty {
@ -401,7 +443,7 @@ async fn async_run(cmd: ClientSubcommand) -> CliResult {
"Spawning pty process (environment = {:?}, cwd = {:?}): {}",
environment, current_dir, cmd
);
Shell::new(channel.into_client().into_channel())
Shell::new(channel)
.spawn(
cmd,
environment.into_map(),
@ -418,7 +460,7 @@ async fn async_run(cmd: ClientSubcommand) -> CliResult {
.environment(environment.into_map())
.current_dir(current_dir)
.pty(None)
.spawn(channel.into_client().into_channel(), &cmd)
.spawn(channel, &cmd)
.await
.with_context(|| format!("Failed to spawn {cmd}"))?;
@ -539,32 +581,51 @@ async fn async_run(cmd: ClientSubcommand) -> CliResult {
match format {
Format::Shell => {
let (major, minor, patch) = distant_core::protocol::PROTOCOL_VERSION;
let mut client_version: semver::Version = env!("CARGO_PKG_VERSION")
.parse()
.context("Failed to parse client version")?;
// Add the package name to the version information
if client_version.build.is_empty() {
client_version.build = semver::BuildMetadata::new(env!("CARGO_PKG_NAME"))
.context("Failed to define client build metadata")?;
} else {
let raw_build_str = format!(
"{}.{}",
client_version.build.as_str(),
env!("CARGO_PKG_NAME")
);
client_version.build = semver::BuildMetadata::new(&raw_build_str)
.context("Failed to define client build metadata")?;
}
println!(
"Client: {} {} (Protocol {major}.{minor}.{patch})",
env!("CARGO_PKG_NAME"),
env!("CARGO_PKG_VERSION")
"Client: {client_version} (Protocol {})",
distant_core::protocol::PROTOCOL_VERSION
);
let (major, minor, patch) = version.protocol_version;
println!(
"Server: {} (Protocol {major}.{minor}.{patch})",
version.server_version
"Server: {} (Protocol {})",
version.server_version, version.protocol_version
);
// Build a complete set of capabilities to show which ones we support
let client_capabilities = Capabilities::all();
let server_capabilities = version.capabilities;
let mut capabilities: Vec<String> = client_capabilities
.union(server_capabilities.as_ref())
.map(|cap| {
let kind = &cap.kind;
if client_capabilities.contains(kind)
&& server_capabilities.contains(kind)
{
format!("+{kind}")
let mut capabilities: HashMap<String, u8> = Version::capabilities()
.iter()
.map(|cap| (cap.to_string(), 1))
.collect();
for cap in version.capabilities {
*capabilities.entry(cap).or_default() += 1;
}
let mut capabilities: Vec<String> = capabilities
.into_iter()
.map(|(cap, cnt)| {
if cnt > 1 {
format!("+{cap}")
} else {
format!("-{kind}")
format!("-{cap}")
}
})
.collect();
@ -1049,7 +1110,6 @@ async fn async_run(cmd: ClientSubcommand) -> CliResult {
.await
.with_context(|| format!("Failed to open channel to connection {connection_id}"))?;
let mut formatter = Formatter::shell();
let query = SearchQuery {
target: target.into(),
condition,
@ -1062,17 +1122,60 @@ async fn async_run(cmd: ClientSubcommand) -> CliResult {
.context("Failed to start search")?;
// Continue to receive and process matches
let mut last_searched_path: Option<PathBuf> = None;
while let Some(m) = searcher.next().await {
// TODO: Provide a cleaner way to print just a match
let res = Response::new(
"".to_string(),
protocol::Msg::Single(protocol::Response::SearchResults {
id: 0,
matches: vec![m],
}),
);
let mut files: HashMap<_, Vec<String>> = HashMap::new();
let mut is_targeting_paths = false;
match m {
SearchQueryMatch::Path(SearchQueryPathMatch { path, .. }) => {
// Create the entry with no lines called out
files.entry(path).or_default();
is_targeting_paths = true;
}
SearchQueryMatch::Contents(SearchQueryContentsMatch {
path,
lines,
line_number,
..
}) => {
let file_matches = files.entry(path).or_default();
file_matches.push(format!(
"{line_number}:{}",
lines.to_string_lossy().trim_end()
));
}
}
let mut output = String::new();
for (path, lines) in files {
use std::fmt::Write;
// If we are seeing a new path, print it out
if last_searched_path.as_deref() != Some(path.as_path()) {
// If we have already seen some path before, we would have printed it, and
// we want to add a space between it and the current path, but only if we are
// printing out file content matches and not paths
if last_searched_path.is_some() && !is_targeting_paths {
writeln!(&mut output).unwrap();
}
formatter.print(res).context("Failed to print match")?;
writeln!(&mut output, "{}", path.to_string_lossy()).unwrap();
}
for line in lines {
writeln!(&mut output, "{line}").unwrap();
}
// Update our last seen path
last_searched_path = Some(path);
}
if !output.is_empty() {
print!("{}", output);
}
}
}
ClientSubcommand::FileSystem(ClientFileSystemSubcommand::SetPermissions {
@ -1084,6 +1187,25 @@ async fn async_run(cmd: ClientSubcommand) -> CliResult {
mode,
path,
}) => {
debug!("Connecting to manager");
let mut client = Client::new(network)
.using_prompt_auth_handler()
.connect()
.await
.context("Failed to connect to manager")?;
let mut cache = read_cache(&cache).await;
let connection_id =
use_or_lookup_connection_id(&mut cache, connection, &mut client).await?;
debug!("Opening channel to connection {}", connection_id);
let mut channel: DistantChannel = client
.open_raw_channel(connection_id)
.await
.with_context(|| format!("Failed to open channel to connection {connection_id}"))?
.into_client()
.into_channel();
debug!("Parsing {mode:?} into a proper set of permissions");
let permissions = {
if mode.trim().eq_ignore_ascii_case("readonly") {
@ -1093,37 +1215,61 @@ async fn async_run(cmd: ClientSubcommand) -> CliResult {
} else {
// Attempt to parse an octal number (chmod absolute), falling back to
// parsing the mode string similar to chmod's symbolic mode
let mode = match u32::from_str_radix(&mode, 8) {
Ok(absolute) => file_mode::Mode::from(absolute),
match u32::from_str_radix(&mode, 8) {
Ok(absolute) => {
Permissions::from_unix_mode(file_mode::Mode::from(absolute).mode())
}
Err(_) => {
let mut new_mode = file_mode::Mode::empty();
new_mode
// The way parsing works, we need to parse and apply to two different
// situations
//
// 1. A mode that is all 1s so we can see if the mask would remove
// permission to some of the bits
// 2. A mode that is all 0s so we can see if the mask would add
// permission to some of the bits
let mut removals = file_mode::Mode::from(0o777);
removals
.set_str(&mode)
.context("Failed to parse mode string")?;
let removals_mask = !removals.mode();
let mut additions = file_mode::Mode::empty();
additions
.set_str(&mode)
.context("Failed to parse mode string")?;
new_mode
let additions_mask = additions.mode();
macro_rules! get_mode {
($mask:expr) => {{
let is_false = removals_mask & $mask > 0;
let is_true = additions_mask & $mask > 0;
match (is_true, is_false) {
(true, false) => Some(true),
(false, true) => Some(false),
(false, false) => None,
(true, true) => {
unreachable!("Mask cannot be adding and removing")
}
}
}};
}
Permissions {
owner_read: get_mode!(0o400),
owner_write: get_mode!(0o200),
owner_exec: get_mode!(0o100),
group_read: get_mode!(0o040),
group_write: get_mode!(0o020),
group_exec: get_mode!(0o010),
other_read: get_mode!(0o004),
other_write: get_mode!(0o002),
other_exec: get_mode!(0o001),
}
}
};
Permissions::from_unix_mode(mode.mode())
}
}
};
debug!("Connecting to manager");
let mut client = Client::new(network)
.using_prompt_auth_handler()
.connect()
.await
.context("Failed to connect to manager")?;
let mut cache = read_cache(&cache).await;
let connection_id =
use_or_lookup_connection_id(&mut cache, connection, &mut client).await?;
debug!("Opening channel to connection {}", connection_id);
let channel = client
.open_raw_channel(connection_id)
.await
.with_context(|| format!("Failed to open channel to connection {connection_id}"))?;
let options = SetPermissionsOptions {
recursive,
follow_symlinks,
@ -1131,8 +1277,6 @@ async fn async_run(cmd: ClientSubcommand) -> CliResult {
};
debug!("Setting permissions for {path:?} as (permissions = {permissions:?}, options = {options:?})");
channel
.into_client()
.into_channel()
.set_permissions(path.as_path(), permissions, options)
.await
.with_context(|| {
@ -1179,15 +1323,19 @@ async fn async_run(cmd: ClientSubcommand) -> CliResult {
.with_context(|| format!("Failed to watch {path:?}"))?;
// Continue to receive and process changes
let mut formatter = Formatter::shell();
while let Some(change) = watcher.next().await {
// TODO: Provide a cleaner way to print just a change
let res = Response::new(
"".to_string(),
protocol::Msg::Single(protocol::Response::Changed(change)),
println!(
"{} {}",
match change.kind {
ChangeKind::Create => "(Created)",
ChangeKind::Delete => "(Removed)",
x if x.is_access() => "(Accessed)",
x if x.is_modify() => "(Modified)",
x if x.is_rename() => "(Renamed)",
_ => "(Affected)",
},
change.path.to_string_lossy()
);
formatter.print(res).context("Failed to print change")?;
}
}
ClientSubcommand::FileSystem(ClientFileSystemSubcommand::Write {

@ -1,8 +1,6 @@
mod buf;
mod format;
mod link;
pub mod stdin;
pub use buf::*;
pub use format::*;
pub use link::*;

@ -1,404 +0,0 @@
use std::collections::HashMap;
use std::io::{self, Write};
use std::path::PathBuf;
use distant_core::net::common::Response;
use distant_core::protocol::{
self, ChangeKind, Error, FileType, Metadata, SearchQueryContentsMatch, SearchQueryMatch,
SearchQueryPathMatch, SystemInfo,
};
use log::*;
use tabled::settings::object::Rows;
use tabled::settings::style::Style;
use tabled::settings::{Alignment, Disable, Modify};
use tabled::{Table, Tabled};
use crate::options::Format;
#[derive(Default)]
struct FormatterState {
/// Last seen path during search
pub last_searched_path: Option<PathBuf>,
}
pub struct Formatter {
format: Format,
state: FormatterState,
}
impl Formatter {
/// Create a new output message for the given response based on the specified format
pub fn new(format: Format) -> Self {
Self {
format,
state: Default::default(),
}
}
/// Creates a new [`Formatter`] using [`Format`] of `Format::Shell`
pub fn shell() -> Self {
Self::new(Format::Shell)
}
/// Consumes the output message, printing it based on its configuration
pub fn print(&mut self, res: Response<protocol::Msg<protocol::Response>>) -> io::Result<()> {
let output = match self.format {
Format::Json => Output::StdoutLine(
serde_json::to_vec(&res)
.map_err(|x| io::Error::new(io::ErrorKind::InvalidData, x))?,
),
// NOTE: For shell, we assume a singular entry in the response's payload
Format::Shell if res.payload.is_batch() => {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Shell does not support batch responses",
))
}
Format::Shell => format_shell(&mut self.state, res.payload.into_single().unwrap()),
};
match output {
Output::Stdout(x) => {
// NOTE: Because we are not including a newline in the output,
// it is not guaranteed to be written out. In the case of
// LSP protocol, the JSON content is not followed by a
// newline and was not picked up when the response was
// sent back to the client; so, we need to manually flush
if let Err(x) = io::stdout().lock().write_all(&x) {
error!("Failed to write stdout: {}", x);
}
if let Err(x) = io::stdout().lock().flush() {
error!("Failed to flush stdout: {}", x);
}
}
Output::StdoutLine(x) => {
if let Err(x) = io::stdout().lock().write_all(&x) {
error!("Failed to write stdout: {}", x);
}
if let Err(x) = io::stdout().lock().write(b"\n") {
error!("Failed to write stdout newline: {}", x);
}
}
Output::Stderr(x) => {
// NOTE: Because we are not including a newline in the output,
// it is not guaranteed to be written out. In the case of
// LSP protocol, the JSON content is not followed by a
// newline and was not picked up when the response was
// sent back to the client; so, we need to manually flush
if let Err(x) = io::stderr().lock().write_all(&x) {
error!("Failed to write stderr: {}", x);
}
if let Err(x) = io::stderr().lock().flush() {
error!("Failed to flush stderr: {}", x);
}
}
Output::StderrLine(x) => {
if let Err(x) = io::stderr().lock().write_all(&x) {
error!("Failed to write stderr: {}", x);
}
if let Err(x) = io::stderr().lock().write(b"\n") {
error!("Failed to write stderr newline: {}", x);
}
}
Output::None => {}
}
Ok(())
}
}
/// Represents the output content and destination
enum Output {
Stdout(Vec<u8>),
StdoutLine(Vec<u8>),
Stderr(Vec<u8>),
StderrLine(Vec<u8>),
None,
}
fn format_shell(state: &mut FormatterState, data: protocol::Response) -> Output {
match data {
protocol::Response::Ok => Output::None,
protocol::Response::Error(Error { description, .. }) => {
Output::StderrLine(description.into_bytes())
}
protocol::Response::Blob { data } => Output::StdoutLine(data),
protocol::Response::Text { data } => Output::StdoutLine(data.into_bytes()),
protocol::Response::DirEntries { entries, .. } => {
#[derive(Tabled)]
struct EntryRow {
ty: String,
path: String,
}
let table = Table::new(entries.into_iter().map(|entry| EntryRow {
ty: String::from(match entry.file_type {
FileType::Dir => "<DIR>",
FileType::File => "",
FileType::Symlink => "<SYMLINK>",
}),
path: entry.path.to_string_lossy().to_string(),
}))
.with(Style::blank())
.with(Disable::row(Rows::new(..1)))
.with(Modify::new(Rows::new(..)).with(Alignment::left()))
.to_string()
.into_bytes();
Output::Stdout(table)
}
protocol::Response::Changed(change) => Output::StdoutLine(
format!(
"{}{}",
match change.kind {
ChangeKind::Create => "Following paths were created:\n",
ChangeKind::Delete => "Following paths were removed:\n",
x if x.is_access() => "Following paths were accessed:\n",
x if x.is_modify() => "Following paths were modified:\n",
x if x.is_rename() => "Following paths were renamed:\n",
_ => "Following paths were affected:\n",
},
change
.paths
.into_iter()
.map(|p| format!("* {}", p.to_string_lossy()))
.collect::<Vec<String>>()
.join("\n")
)
.into_bytes(),
),
protocol::Response::Exists { value: exists } => {
if exists {
Output::StdoutLine(b"true".to_vec())
} else {
Output::StdoutLine(b"false".to_vec())
}
}
protocol::Response::Metadata(Metadata {
canonicalized_path,
file_type,
len,
readonly,
accessed,
created,
modified,
unix,
windows,
}) => Output::StdoutLine(
format!(
concat!(
"{}",
"Type: {}\n",
"Len: {}\n",
"Readonly: {}\n",
"Created: {}\n",
"Last Accessed: {}\n",
"Last Modified: {}\n",
"{}",
"{}",
"{}",
),
canonicalized_path
.map(|p| format!("Canonicalized Path: {p:?}\n"))
.unwrap_or_default(),
file_type.as_ref(),
len,
readonly,
created.unwrap_or_default(),
accessed.unwrap_or_default(),
modified.unwrap_or_default(),
unix.map(|u| format!(
concat!(
"Owner Read: {}\n",
"Owner Write: {}\n",
"Owner Exec: {}\n",
"Group Read: {}\n",
"Group Write: {}\n",
"Group Exec: {}\n",
"Other Read: {}\n",
"Other Write: {}\n",
"Other Exec: {}",
),
u.owner_read,
u.owner_write,
u.owner_exec,
u.group_read,
u.group_write,
u.group_exec,
u.other_read,
u.other_write,
u.other_exec
))
.unwrap_or_default(),
windows
.map(|w| format!(
concat!(
"Archive: {}\n",
"Compressed: {}\n",
"Encrypted: {}\n",
"Hidden: {}\n",
"Integrity Stream: {}\n",
"Normal: {}\n",
"Not Content Indexed: {}\n",
"No Scrub Data: {}\n",
"Offline: {}\n",
"Recall on Data Access: {}\n",
"Recall on Open: {}\n",
"Reparse Point: {}\n",
"Sparse File: {}\n",
"System: {}\n",
"Temporary: {}",
),
w.archive,
w.compressed,
w.encrypted,
w.hidden,
w.integrity_stream,
w.normal,
w.not_content_indexed,
w.no_scrub_data,
w.offline,
w.recall_on_data_access,
w.recall_on_open,
w.reparse_point,
w.sparse_file,
w.system,
w.temporary,
))
.unwrap_or_default(),
if unix.is_none() && windows.is_none() {
String::from("\n")
} else {
String::new()
}
)
.into_bytes(),
),
protocol::Response::SearchStarted { id } => {
Output::StdoutLine(format!("Query {id} started").into_bytes())
}
protocol::Response::SearchDone { .. } => Output::None,
protocol::Response::SearchResults { matches, .. } => {
let mut files: HashMap<_, Vec<String>> = HashMap::new();
let mut is_targeting_paths = false;
for m in matches {
match m {
SearchQueryMatch::Path(SearchQueryPathMatch { path, .. }) => {
// Create the entry with no lines called out
files.entry(path).or_default();
is_targeting_paths = true;
}
SearchQueryMatch::Contents(SearchQueryContentsMatch {
path,
lines,
line_number,
..
}) => {
let file_matches = files.entry(path).or_default();
file_matches.push(format!(
"{line_number}:{}",
lines.to_string_lossy().trim_end()
));
}
}
}
let mut output = String::new();
for (path, lines) in files {
use std::fmt::Write;
// If we are seening a new path, print it out
if state.last_searched_path.as_deref() != Some(path.as_path()) {
// If we have already seen some path before, we would have printed it, and
// we want to add a space between it and the current path, but only if we are
// printing out file content matches and not paths
if state.last_searched_path.is_some() && !is_targeting_paths {
writeln!(&mut output).unwrap();
}
writeln!(&mut output, "{}", path.to_string_lossy()).unwrap();
}
for line in lines {
writeln!(&mut output, "{line}").unwrap();
}
// Update our last seen path
state.last_searched_path = Some(path);
}
if !output.is_empty() {
Output::Stdout(output.into_bytes())
} else {
Output::None
}
}
protocol::Response::ProcSpawned { .. } => Output::None,
protocol::Response::ProcStdout { data, .. } => Output::Stdout(data),
protocol::Response::ProcStderr { data, .. } => Output::Stderr(data),
protocol::Response::ProcDone { id, success, code } => {
if success {
Output::None
} else if let Some(code) = code {
Output::StderrLine(format!("Proc {id} failed with code {code}").into_bytes())
} else {
Output::StderrLine(format!("Proc {id} failed").into_bytes())
}
}
protocol::Response::SystemInfo(SystemInfo {
family,
os,
arch,
current_dir,
main_separator,
username,
shell,
}) => Output::StdoutLine(
format!(
concat!(
"Family: {:?}\n",
"Operating System: {:?}\n",
"Arch: {:?}\n",
"Cwd: {:?}\n",
"Path Sep: {:?}\n",
"Username: {:?}\n",
"Shell: {:?}"
),
family, os, arch, current_dir, main_separator, username, shell
)
.into_bytes(),
),
protocol::Response::Version(version) => {
#[derive(Tabled)]
struct EntryRow {
kind: String,
description: String,
}
let table = Table::new(
version
.capabilities
.into_sorted_vec()
.into_iter()
.map(|cap| EntryRow {
kind: cap.kind,
description: cap.description,
}),
)
.with(Style::ascii())
.with(Modify::new(Rows::new(..)).with(Alignment::left()))
.to_string()
.into_bytes();
Output::StdoutLine(table)
}
}
}

@ -14,15 +14,18 @@ pub fn run(cmd: GenerateSubcommand) -> CliResult {
async fn async_run(cmd: GenerateSubcommand) -> CliResult {
match cmd {
GenerateSubcommand::Config { file } => tokio::fs::write(file, Config::default_raw_str())
.await
.context("Failed to write default config to {file:?}")?,
GenerateSubcommand::Config { output } => match output {
Some(path) => tokio::fs::write(path, Config::default_raw_str())
.await
.context("Failed to write default config to {path:?}")?,
None => println!("{}", Config::default_raw_str()),
},
GenerateSubcommand::Completion { file, shell } => {
GenerateSubcommand::Completion { output, shell } => {
let name = "distant";
let mut cmd = Options::command();
if let Some(path) = file {
if let Some(path) = output {
clap_generate(
shell,
&mut cmd,

@ -185,7 +185,7 @@ async fn async_run(cmd: ManagerSubcommand) -> CliResult {
"global".to_string()
}
);
let manager_ref = Manager {
let manager = Manager {
access,
config: NetManagerConfig {
user,
@ -223,50 +223,29 @@ async fn async_run(cmd: ManagerSubcommand) -> CliResult {
.context("Failed to start manager")?;
// Let our server run to completion
manager_ref
.as_ref()
.polling_wait()
.await
.context("Failed to wait on manager")?;
manager.await.context("Failed to wait on manager")?;
info!("Manager is shutting down");
Ok(())
}
ManagerSubcommand::Capabilities { format, network } => {
ManagerSubcommand::Version { format, network } => {
debug!("Connecting to manager");
let mut client = connect_to_manager(format, network).await?;
debug!("Getting list of capabilities");
let caps = client
.capabilities()
.await
.context("Failed to get list of capabilities")?;
debug!("Got capabilities: {caps:?}");
debug!("Getting version");
let version = client.version().await.context("Failed to get version")?;
debug!("Got version: {version}");
match format {
Format::Json => {
println!(
"{}",
serde_json::to_string(&caps)
.context("Failed to format capabilities as json")?
serde_json::to_string(&serde_json::json!({ "version": version }))
.context("Failed to format version as json")?
);
}
Format::Shell => {
#[derive(Tabled)]
struct CapabilityRow {
kind: String,
description: String,
}
println!(
"{}",
Table::new(caps.into_sorted_vec().into_iter().map(|cap| {
CapabilityRow {
kind: cap.kind,
description: cap.description,
}
}))
);
println!("{version}");
}
}

@ -11,12 +11,13 @@ use distant_core::net::auth::{
StaticKeyAuthMethodHandler,
};
use distant_core::net::client::{Client, ClientConfig, ReconnectStrategy, UntypedClient};
use distant_core::net::common::{Destination, Map, SecretKey32};
use distant_core::net::common::{Destination, Map, SecretKey32, Version};
use distant_core::net::manager::{ConnectHandler, LaunchHandler};
use distant_core::protocol::PROTOCOL_VERSION;
use log::*;
use tokio::io::{AsyncBufReadExt, BufReader};
use tokio::process::{Child, Command};
use tokio::sync::Mutex;
use tokio::process::Command;
use tokio::sync::{watch, Mutex};
use crate::options::{BindAddress, ClientLaunchConfig};
@ -32,15 +33,28 @@ fn invalid(label: &str) -> io::Error {
/// Supports launching locally through the manager as defined by `manager://...`
pub struct ManagerLaunchHandler {
servers: Mutex<Vec<Child>>,
shutdown: watch::Sender<bool>,
}
impl ManagerLaunchHandler {
pub fn new() -> Self {
Self {
servers: Mutex::new(Vec::new()),
shutdown: watch::channel(false).0,
}
}
/// Triggers shutdown of any tasks still checking that spawned servers have terminated.
pub fn shutdown(&self) {
let _ = self.shutdown.send(true);
}
}
impl Drop for ManagerLaunchHandler {
/// Terminates waiting for any servers spawned by this handler, which in turn should
/// shut them down.
fn drop(&mut self) {
self.shutdown();
}
}
#[async_trait]
@ -137,9 +151,34 @@ impl LaunchHandler for ManagerLaunchHandler {
match stdout.read_line(&mut line).await {
Ok(n) if n > 0 => {
if let Ok(destination) = line[..n].trim().parse::<Destination>() {
// Store a reference to the server so we can terminate them
// when this handler is dropped
self.servers.lock().await.push(child);
let mut rx = self.shutdown.subscribe();
// Wait for the process to complete in a task. We have to do this
// to properly check the exit status, otherwise if the server
// self-terminates then we get a ZOMBIE process! Oh no!
//
// This also replaces the need to store the children within the
// handler itself and instead uses a watch update to kill the
// task in advance in the case where the child hasn't terminated.
tokio::spawn(async move {
// We don't actually care about the result, just that we're done
loop {
tokio::select! {
result = rx.changed() => {
if result.is_err() {
break;
}
if *rx.borrow_and_update() {
break;
}
}
_ = child.wait() => {
break;
}
}
}
});
break Ok(destination);
} else {
@ -247,6 +286,11 @@ impl DistantConnectHandler {
..Default::default()
})
.connect_timeout(Duration::from_secs(180))
.version(Version::new(
PROTOCOL_VERSION.major,
PROTOCOL_VERSION.minor,
PROTOCOL_VERSION.patch,
))
.connect_untyped()
.await
{

@ -2,8 +2,9 @@ use std::io::{self, Read, Write};
use anyhow::Context;
use distant_core::net::auth::Verifier;
use distant_core::net::common::{Host, SecretKey32};
use distant_core::net::server::{Server, ServerConfig as NetServerConfig, ServerRef};
use distant_core::net::common::{Host, SecretKey32, Version};
use distant_core::net::server::{Server, ServerConfig as NetServerConfig};
use distant_core::protocol::PROTOCOL_VERSION;
use distant_core::DistantSingleKeyCredentials;
use distant_local::{Config as LocalConfig, WatchConfig as LocalWatchConfig};
use log::*;
@ -159,6 +160,11 @@ async fn async_run(cmd: ServerSubcommand, _is_forked: bool) -> CliResult {
})
.handler(handler)
.verifier(Verifier::static_key(key.clone()))
.version(Version::new(
PROTOCOL_VERSION.major,
PROTOCOL_VERSION.minor,
PROTOCOL_VERSION.patch,
))
.start(addr, port)
.await
.with_context(|| format!("Failed to start server @ {addr} with {port}"))?;
@ -212,7 +218,7 @@ async fn async_run(cmd: ServerSubcommand, _is_forked: bool) -> CliResult {
}
// Let our server run to completion
server.wait().await.context("Failed to wait on server")?;
server.await.context("Failed to wait on server")?;
info!("Server is shutting down");
}
}

@ -7,7 +7,7 @@ use distant_core::net::auth::{
AuthHandler, AuthMethodHandler, PromptAuthMethodHandler, SingleAuthHandler,
};
use distant_core::net::client::{Client as NetClient, ClientConfig, ReconnectStrategy};
use distant_core::net::manager::ManagerClient;
use distant_core::net::manager::{ManagerClient, PROTOCOL_VERSION};
use log::*;
use crate::cli::common::{MsgReceiver, MsgSender};
@ -71,6 +71,7 @@ impl<T: AuthHandler + Clone> Client<T> {
},
..Default::default()
})
.version(PROTOCOL_VERSION)
.connect()
.await
{
@ -113,6 +114,7 @@ impl<T: AuthHandler + Clone> Client<T> {
},
..Default::default()
})
.version(PROTOCOL_VERSION)
.connect()
.await
{

@ -1,6 +1,6 @@
use anyhow::Context;
use distant_core::net::auth::Verifier;
use distant_core::net::manager::{Config as ManagerConfig, ManagerServer};
use distant_core::net::manager::{Config as ManagerConfig, ManagerServer, PROTOCOL_VERSION};
use distant_core::net::server::ServerRef;
use log::*;
@ -15,9 +15,12 @@ pub struct Manager {
impl Manager {
/// Begin listening on the network interface specified within [`NetworkConfig`]
pub async fn listen(self) -> anyhow::Result<Box<dyn ServerRef>> {
pub async fn listen(self) -> anyhow::Result<ServerRef> {
let user = self.config.user;
// Version we'll use to report compatibility in talking to the manager
let version = PROTOCOL_VERSION;
#[cfg(unix)]
{
use distant_core::net::common::UnixSocketListener;
@ -28,6 +31,7 @@ impl Manager {
global_paths::UNIX_SOCKET_PATH.as_path()
}
});
debug!("Manager wants to use unix socket @ {:?}", socket_path);
// Ensure that the path to the socket exists
if let Some(parent) = socket_path.parent() {
@ -36,8 +40,9 @@ impl Manager {
.with_context(|| format!("Failed to create socket directory {parent:?}"))?;
}
let boxed_ref = ManagerServer::new(self.config)
let server = ManagerServer::new(self.config)
.verifier(Verifier::none())
.version(version)
.start(
UnixSocketListener::bind_with_permissions(socket_path, self.access.into_mode())
.await?,
@ -45,7 +50,7 @@ impl Manager {
.with_context(|| format!("Failed to start manager at socket {socket_path:?}"))?;
info!("Manager listening using unix socket @ {:?}", socket_path);
Ok(boxed_ref)
Ok(server)
}
#[cfg(windows)]
@ -56,14 +61,16 @@ impl Manager {
} else {
global_paths::WINDOWS_PIPE_NAME.as_str()
});
debug!("Manager wants to use windows pipe @ {:?}", pipe_name);
let boxed_ref = ManagerServer::new(self.config)
let server = ManagerServer::new(self.config)
.verifier(Verifier::none())
.version(version)
.start(WindowsPipeListener::bind_local(pipe_name)?)
.with_context(|| format!("Failed to start manager at pipe {pipe_name:?}"))?;
info!("Manager listening using windows pipe @ {:?}", pipe_name);
Ok(boxed_ref)
Ok(server)
}
}
}

@ -6,6 +6,7 @@ pub struct ReadmeDoctests;
use std::process::{ExitCode, Termination};
use clap::error::ErrorKind;
use derive_more::{Display, Error, From};
mod cli;
@ -16,30 +17,83 @@ mod options;
pub mod win_service;
pub use cli::Cli;
pub use options::Options;
pub use options::{Format, Options, OptionsError};
/// Wrapper around a [`CliResult`] that provides [`Termination`] support
pub struct MainResult(CliResult);
/// Wrapper around a [`CliResult`] that provides [`Termination`] support and [`Format`]ing.
pub struct MainResult {
inner: CliResult,
format: Format,
}
impl MainResult {
pub const OK: MainResult = MainResult(Ok(()));
pub const OK: MainResult = MainResult {
inner: Ok(()),
format: Format::Shell,
};
/// Creates a new result that performs general shell formatting.
pub fn new(inner: CliResult) -> Self {
Self {
inner,
format: Format::Shell,
}
}
/// Converts to shell formatting for errors.
pub fn shell(self) -> Self {
Self {
inner: self.inner,
format: Format::Shell,
}
}
/// Converts to a JSON formatting for errors.
pub fn json(self) -> Self {
Self {
inner: self.inner,
format: Format::Json,
}
}
}
impl From<CliResult> for MainResult {
fn from(res: CliResult) -> Self {
Self(res)
Self::new(res)
}
}
impl From<OptionsError> for MainResult {
fn from(x: OptionsError) -> Self {
Self::new(match x {
OptionsError::Config(x) => Err(CliError::Error(x)),
OptionsError::Options(x) => match x.kind() {
// --help and --version should not actually exit with an error and instead display
// their related information while succeeding
ErrorKind::DisplayHelp | ErrorKind::DisplayVersion => {
// NOTE: We're causing a side effect here in constructing the main result,
// but seems cleaner than returning an error with an exit code of 0
// and a message to try to print. Plus, we leverage automatic color
// handling in this approach.
let _ = x.print();
Ok(())
}
// Everything else is an actual error and should fail
_ => Err(CliError::Error(anyhow::anyhow!(x))),
},
})
}
}
impl From<anyhow::Error> for MainResult {
fn from(x: anyhow::Error) -> Self {
Self(Err(CliError::Error(x)))
Self::new(Err(CliError::Error(x)))
}
}
impl From<anyhow::Result<()>> for MainResult {
fn from(res: anyhow::Result<()>) -> Self {
Self(res.map_err(CliError::Error))
Self::new(res.map_err(CliError::Error))
}
}
@ -62,14 +116,33 @@ impl CliError {
impl Termination for MainResult {
fn report(self) -> ExitCode {
match self.0 {
match self.inner {
Ok(_) => ExitCode::SUCCESS,
Err(x) => match x {
CliError::Exit(code) => ExitCode::from(code),
CliError::Error(x) => {
eprintln!("{x:?}");
match self.format {
// For anyhow, we want to print with debug information, which includes the
// full stack of information that anyhow collects; otherwise, we would only
// include the top-level context.
Format::Shell => eprintln!("{x:?}"),
Format::Json => println!(
"{}",
serde_json::to_string(&serde_json::json!({
"type": "error",
"msg": format!("{x:?}"),
}),)
.expect("Failed to format error to JSON")
),
}
// For anyhow, we want to log with debug information, which includes the full
// stack of information that anyhow collects; otherwise, we would only include
// the top-level context.
::log::error!("{x:?}");
::log::logger().flush();
ExitCode::FAILURE
}
},

@ -1,4 +1,4 @@
use distant::{Cli, MainResult};
use distant::{Cli, Format, MainResult};
#[cfg(unix)]
fn main() -> MainResult {
@ -7,7 +7,13 @@ fn main() -> MainResult {
Err(x) => return MainResult::from(x),
};
let _logger = cli.init_logger();
MainResult::from(cli.run())
let format = cli.options.command.format();
let result = MainResult::from(cli.run());
match format {
Format::Shell => result.shell(),
Format::Json => result.json(),
}
}
#[cfg(windows)]
@ -17,6 +23,7 @@ fn main() -> MainResult {
Err(x) => return MainResult::from(x),
};
let _logger = cli.init_logger();
let format = cli.options.command.format();
// If we are trying to listen as a manager, try as a service first
if cli.is_manager_listen_command() {
@ -35,5 +42,9 @@ fn main() -> MainResult {
}
// Otherwise, execute as a non-service CLI
MainResult::from(cli.run())
let result = MainResult::from(cli.run());
match format {
Format::Shell => result.shell(),
Format::Json => result.json(),
}
}

@ -4,7 +4,7 @@ use std::path::{Path, PathBuf};
use clap::builder::TypedValueParser as _;
use clap::{Args, Parser, Subcommand, ValueEnum, ValueHint};
use clap_complete::Shell as ClapCompleteShell;
use derive_more::IsVariant;
use derive_more::{Display, Error, From, IsVariant};
use distant_core::net::common::{ConnectionId, Destination, Map, PortRange};
use distant_core::net::server::Shutdown;
use distant_core::protocol::ChangeKind;
@ -29,21 +29,31 @@ pub struct Options {
pub logging: LoggingSettings,
/// Configuration file to load instead of the default paths
#[clap(short = 'c', long = "config", global = true, value_parser)]
#[clap(long = "config", global = true, value_parser)]
config_path: Option<PathBuf>,
#[clap(subcommand)]
pub command: DistantSubcommand,
}
/// Represents an error associated with parsing options.
#[derive(Debug, Display, From, Error)]
pub enum OptionsError {
// When configuration file fails to load
Config(#[error(not(source))] anyhow::Error),
// When parsing options fails (or is something like --version or --help)
Options(#[error(not(source))] clap::Error),
}
impl Options {
/// Creates a new CLI instance by parsing command-line arguments
pub fn load() -> anyhow::Result<Self> {
pub fn load() -> Result<Self, OptionsError> {
Self::load_from(std::env::args_os())
}
/// Creates a new CLI instance by parsing providing arguments
pub fn load_from<I, T>(args: I) -> anyhow::Result<Self>
pub fn load_from<I, T>(args: I) -> Result<Self, OptionsError>
where
I: IntoIterator<Item = T>,
T: Into<OsString> + Clone,
@ -161,7 +171,7 @@ impl Options {
DistantSubcommand::Manager(cmd) => {
update_logging!(manager);
match cmd {
ManagerSubcommand::Capabilities { network, .. } => {
ManagerSubcommand::Version { network, .. } => {
network.merge(config.manager.network);
}
ManagerSubcommand::Info { network, .. } => {
@ -271,6 +281,19 @@ pub enum DistantSubcommand {
Generate(GenerateSubcommand),
}
impl DistantSubcommand {
/// Format used by the subcommand.
#[inline]
pub fn format(&self) -> Format {
match self {
Self::Client(x) => x.format(),
Self::Manager(x) => x.format(),
Self::Server(x) => x.format(),
Self::Generate(x) => x.format(),
}
}
}
/// Subcommands for `distant client`.
#[derive(Debug, PartialEq, Subcommand, IsVariant)]
pub enum ClientSubcommand {
@ -440,6 +463,11 @@ pub enum ClientSubcommand {
#[clap(long)]
pty: bool,
/// If specified, will spawn the process in the specified shell, defaulting to the
/// user-configured shell.
#[clap(long, name = "SHELL")]
shell: Option<Option<Shell>>,
/// Alternative current directory for the remote process
#[clap(long)]
current_dir: Option<PathBuf>,
@ -448,8 +476,17 @@ pub enum ClientSubcommand {
#[clap(long, default_value_t)]
environment: Map,
/// If present, commands are read from the provided string
#[clap(short = 'c', long = "cmd", conflicts_with = "CMD")]
cmd_str: Option<String>,
/// Command to run
#[clap(name = "CMD", num_args = 1.., last = true)]
#[clap(
name = "CMD",
num_args = 1..,
last = true,
conflicts_with = "cmd_str"
)]
cmd: Vec<String>,
},
@ -520,6 +557,21 @@ impl ClientSubcommand {
Self::Version { network, .. } => network,
}
}
/// Format used by the subcommand.
#[inline]
pub fn format(&self) -> Format {
match self {
Self::Api { .. } => Format::Json,
Self::Connect { format, .. } => *format,
Self::FileSystem(fs) => fs.format(),
Self::Launch { format, .. } => *format,
Self::Shell { .. } => Format::Shell,
Self::Spawn { .. } => Format::Shell,
Self::SystemInfo { .. } => Format::Shell,
Self::Version { format, .. } => *format,
}
}
}
/// Subcommands for `distant fs`.
@ -917,6 +969,12 @@ impl ClientFileSystemSubcommand {
Self::Write { network, .. } => network,
}
}
/// Format used by the subcommand.
#[inline]
pub fn format(&self) -> Format {
Format::Shell
}
}
/// Subcommands for `distant generate`.
@ -924,15 +982,16 @@ impl ClientFileSystemSubcommand {
pub enum GenerateSubcommand {
/// Generate configuration file with base settings
Config {
/// Path to where the configuration file should be created
file: PathBuf,
/// Write output to a file instead of stdout
#[clap(short, long, value_name = "FILE")]
output: Option<PathBuf>,
},
// Generate completion info for CLI
Completion {
/// If specified, will output to the file at the given path instead of stdout
#[clap(long)]
file: Option<PathBuf>,
/// Write output to a file instead of stdout
#[clap(long, value_name = "FILE")]
output: Option<PathBuf>,
/// Specific shell to target for the generated output
#[clap(value_enum, value_parser)]
@ -940,6 +999,14 @@ pub enum GenerateSubcommand {
},
}
impl GenerateSubcommand {
/// Format used by the subcommand.
#[inline]
pub fn format(&self) -> Format {
Format::Shell
}
}
/// Subcommands for `distant manager`.
#[derive(Debug, PartialEq, Eq, Subcommand, IsVariant)]
pub enum ManagerSubcommand {
@ -987,7 +1054,7 @@ pub enum ManagerSubcommand {
},
/// Retrieve a list of capabilities that the manager supports
Capabilities {
Version {
#[clap(short, long, default_value_t, value_enum)]
format: Format,
@ -1036,6 +1103,22 @@ pub enum ManagerSubcommand {
},
}
impl ManagerSubcommand {
/// Format used by the subcommand.
#[inline]
pub fn format(&self) -> Format {
match self {
Self::Select { format, .. } => *format,
Self::Service(_) => Format::Shell,
Self::Listen { .. } => Format::Shell,
Self::Version { format, .. } => *format,
Self::Info { format, .. } => *format,
Self::List { format, .. } => *format,
Self::Kill { format, .. } => *format,
}
}
}
/// Subcommands for `distant manager service`.
#[derive(Debug, PartialEq, Eq, Subcommand, IsVariant)]
pub enum ManagerServiceSubcommand {
@ -1152,6 +1235,14 @@ pub enum ServerSubcommand {
},
}
impl ServerSubcommand {
/// Format used by the subcommand.
#[inline]
pub fn format(&self) -> Format {
Format::Shell
}
}
#[derive(Args, Debug, PartialEq)]
pub struct ServerListenWatchOptions {
/// If specified, will use the polling-based watcher for filesystem changes
@ -1852,7 +1943,9 @@ mod tests {
current_dir: None,
environment: map!(),
lsp: Some(None),
shell: Some(None),
pty: true,
cmd_str: None,
cmd: vec![String::from("cmd")],
}),
};
@ -1890,7 +1983,9 @@ mod tests {
current_dir: None,
environment: map!(),
lsp: Some(None),
shell: Some(None),
pty: true,
cmd_str: None,
cmd: vec![String::from("cmd")],
}),
}
@ -1915,7 +2010,9 @@ mod tests {
current_dir: None,
environment: map!(),
lsp: Some(None),
shell: Some(None),
pty: true,
cmd_str: None,
cmd: vec![String::from("cmd")],
}),
};
@ -1953,7 +2050,9 @@ mod tests {
current_dir: None,
environment: map!(),
lsp: Some(None),
shell: Some(None),
pty: true,
cmd_str: None,
cmd: vec![String::from("cmd")],
}),
}
@ -3327,7 +3426,7 @@ mod tests {
log_level: None,
},
command: DistantSubcommand::Generate(GenerateSubcommand::Completion {
file: None,
output: None,
shell: ClapCompleteShell::Bash,
}),
};
@ -3351,7 +3450,7 @@ mod tests {
log_level: Some(LogLevel::Trace),
},
command: DistantSubcommand::Generate(GenerateSubcommand::Completion {
file: None,
output: None,
shell: ClapCompleteShell::Bash,
}),
}
@ -3367,7 +3466,7 @@ mod tests {
log_level: Some(LogLevel::Info),
},
command: DistantSubcommand::Generate(GenerateSubcommand::Completion {
file: None,
output: None,
shell: ClapCompleteShell::Bash,
}),
};
@ -3391,7 +3490,7 @@ mod tests {
log_level: Some(LogLevel::Info),
},
command: DistantSubcommand::Generate(GenerateSubcommand::Completion {
file: None,
output: None,
shell: ClapCompleteShell::Bash,
}),
}
@ -3406,7 +3505,7 @@ mod tests {
log_file: None,
log_level: None,
},
command: DistantSubcommand::Manager(ManagerSubcommand::Capabilities {
command: DistantSubcommand::Manager(ManagerSubcommand::Version {
format: Format::Json,
network: NetworkSettings {
unix_socket: None,
@ -3438,7 +3537,7 @@ mod tests {
log_file: Some(PathBuf::from("config-log-file")),
log_level: Some(LogLevel::Trace),
},
command: DistantSubcommand::Manager(ManagerSubcommand::Capabilities {
command: DistantSubcommand::Manager(ManagerSubcommand::Version {
format: Format::Json,
network: NetworkSettings {
unix_socket: Some(PathBuf::from("config-unix-socket")),
@ -3457,7 +3556,7 @@ mod tests {
log_file: Some(PathBuf::from("cli-log-file")),
log_level: Some(LogLevel::Info),
},
command: DistantSubcommand::Manager(ManagerSubcommand::Capabilities {
command: DistantSubcommand::Manager(ManagerSubcommand::Version {
format: Format::Json,
network: NetworkSettings {
unix_socket: Some(PathBuf::from("cli-unix-socket")),
@ -3489,7 +3588,7 @@ mod tests {
log_file: Some(PathBuf::from("cli-log-file")),
log_level: Some(LogLevel::Info),
},
command: DistantSubcommand::Manager(ManagerSubcommand::Capabilities {
command: DistantSubcommand::Manager(ManagerSubcommand::Version {
format: Format::Json,
network: NetworkSettings {
unix_socket: Some(PathBuf::from("cli-unix-socket")),

@ -3,6 +3,7 @@ mod cmd;
mod logging;
mod network;
mod search;
mod shell;
mod time;
mod value;
@ -11,5 +12,6 @@ pub use cmd::*;
pub use logging::*;
pub use network::*;
pub use search::*;
pub use shell::*;
pub use time::*;
pub use value::*;

@ -55,6 +55,35 @@ pub struct CliSearchQueryOptions {
/// include the remaining results even if less than pagination request
#[clap(long)]
pub pagination: Option<u64>,
/// If true, will skip searching hidden files.
#[clap(long)]
pub ignore_hidden: bool,
/// If true, will read `.ignore` files that are used by `ripgrep` and `The Silver Searcher`
/// to determine which files and directories to not search.
#[clap(long)]
pub use_ignore_files: bool,
/// If true, will read `.ignore` files from parent directories that are used by `ripgrep` and
/// `The Silver Searcher` to determine which files and directories to not search.
#[clap(long)]
pub use_parent_ignore_files: bool,
/// If true, will read `.gitignore` files to determine which files and directories to not
/// search.
#[clap(long)]
pub use_git_ignore_files: bool,
/// If true, will read global `.gitignore` files to determine which files and directories to
/// not search.
#[clap(long)]
pub use_global_git_ignore_files: bool,
/// If true, will read `.git/info/exclude` files to determine which files and directories to
/// not search.
#[clap(long)]
pub use_git_exclude_files: bool,
}
impl From<CliSearchQueryOptions> for SearchQueryOptions {
@ -68,6 +97,12 @@ impl From<CliSearchQueryOptions> for SearchQueryOptions {
limit: x.limit,
max_depth: x.max_depth,
pagination: x.pagination,
ignore_hidden: x.ignore_hidden,
use_ignore_files: x.use_ignore_files,
use_parent_ignore_files: x.use_parent_ignore_files,
use_git_ignore_files: x.use_git_ignore_files,
use_global_git_ignore_files: x.use_global_git_ignore_files,
use_git_exclude_files: x.use_git_exclude_files,
}
}
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save