Refactor into protocol crate & change capabilities -> version (#189)

pull/191/head
Chip Senkbeil 12 months ago committed by GitHub
parent 95c0d0c0d1
commit 76dc7cf1fa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -13,6 +13,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- New `set_permissions` method available `DistantApi` and implemented by local - New `set_permissions` method available `DistantApi` and implemented by local
server (ssh unavailable due to https://github.com/wez/wezterm/issues/3784) server (ssh unavailable due to https://github.com/wez/wezterm/issues/3784)
- Implementation of `DistantChannelExt::set_permissions` - Implementation of `DistantChannelExt::set_permissions`
- `distant version` to display information about connected server
### Changed ### Changed
@ -22,6 +23,17 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
`distant_net::common::Keychain` `distant_net::common::Keychain`
- Moved `distant_net::common::transport::framed::codec::encryption::SecretKey` - Moved `distant_net::common::transport::framed::codec::encryption::SecretKey`
and similar to `distant_net::common::SecretKey` and similar to `distant_net::common::SecretKey`
- Search matches reported with `match` key are now inlined as either a byte
array or a string and no longer an object with a `type` and `value` field
- Unset options and values are not now returned in `JSON` serialization versus
the explicit `null` value provided
- `Capabilities` message type has been changed to `Version` with new struct to
report the version information that includes a server version string,
protocol version tuple, and capabilities
### Removed
- `distant capabilities` has been removed in favor of `distant version`
## [0.20.0-alpha.6] ## [0.20.0-alpha.6]

57
Cargo.lock generated

@ -872,6 +872,7 @@ dependencies = [
"bytes", "bytes",
"derive_more", "derive_more",
"distant-net", "distant-net",
"distant-protocol",
"env_logger", "env_logger",
"futures", "futures",
"grep", "grep",
@ -887,7 +888,6 @@ dependencies = [
"rand", "rand",
"regex", "regex",
"rstest", "rstest",
"schemars",
"serde", "serde",
"serde_bytes", "serde_bytes",
"serde_json", "serde_json",
@ -920,7 +920,6 @@ dependencies = [
"paste", "paste",
"rand", "rand",
"rmp-serde", "rmp-serde",
"schemars",
"serde", "serde",
"serde_bytes", "serde_bytes",
"serde_json", "serde_json",
@ -931,6 +930,21 @@ dependencies = [
"tokio", "tokio",
] ]
[[package]]
name = "distant-protocol"
version = "0.20.0-alpha.7"
dependencies = [
"bitflags 2.3.1",
"derive_more",
"regex",
"rmp",
"rmp-serde",
"serde",
"serde_bytes",
"serde_json",
"strum",
]
[[package]] [[package]]
name = "distant-ssh2" name = "distant-ssh2"
version = "0.20.0-alpha.7" version = "0.20.0-alpha.7"
@ -1934,9 +1948,9 @@ checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be"
[[package]] [[package]]
name = "notify" name = "notify"
version = "5.2.0" version = "6.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "729f63e1ca555a43fe3efa4f3efdf4801c479da85b432242a7b726f353c88486" checksum = "4d9ba6c734de18ca27c8cef5cd7058aa4ac9f63596131e4c7e41e579319032a2"
dependencies = [ dependencies = [
"bitflags 1.3.2", "bitflags 1.3.2",
"crossbeam-channel", "crossbeam-channel",
@ -2693,30 +2707,6 @@ dependencies = [
"winapi-util", "winapi-util",
] ]
[[package]]
name = "schemars"
version = "0.8.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02c613288622e5f0c3fdc5dbd4db1c5fbe752746b1d1a56a0630b78fd00de44f"
dependencies = [
"dyn-clone",
"schemars_derive",
"serde",
"serde_json",
]
[[package]]
name = "schemars_derive"
version = "0.8.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "109da1e6b197438deb6db99952990c7f959572794b80ff93707d55a232545e7c"
dependencies = [
"proc-macro2",
"quote",
"serde_derive_internals",
"syn 1.0.109",
]
[[package]] [[package]]
name = "scopeguard" name = "scopeguard"
version = "1.1.0" version = "1.1.0"
@ -2790,17 +2780,6 @@ dependencies = [
"syn 2.0.16", "syn 2.0.16",
] ]
[[package]]
name = "serde_derive_internals"
version = "0.26.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]] [[package]]
name = "serde_json" name = "serde_json"
version = "1.0.96" version = "1.0.96"

@ -12,7 +12,7 @@ readme = "README.md"
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
[workspace] [workspace]
members = ["distant-auth", "distant-core", "distant-net", "distant-ssh2"] members = ["distant-auth", "distant-core", "distant-net", "distant-protocol", "distant-ssh2"]
[profile.release] [profile.release]
opt-level = 'z' opt-level = 'z'
@ -32,7 +32,7 @@ clap_complete = "4.2.0"
config = { version = "0.13.3", default-features = false, features = ["toml"] } config = { version = "0.13.3", default-features = false, features = ["toml"] }
derive_more = { version = "0.99.17", default-features = false, features = ["display", "from", "error", "is_variant"] } derive_more = { version = "0.99.17", default-features = false, features = ["display", "from", "error", "is_variant"] }
dialoguer = { version = "0.10.3", default-features = false } dialoguer = { version = "0.10.3", default-features = false }
distant-core = { version = "=0.20.0-alpha.7", path = "distant-core", features = ["schemars"] } distant-core = { version = "=0.20.0-alpha.7", path = "distant-core" }
directories = "5.0.0" directories = "5.0.0"
file-mode = "0.1.2" file-mode = "0.1.2"
flexi_logger = "0.25.3" flexi_logger = "0.25.3"

@ -64,13 +64,13 @@ the available features and which backend supports each feature:
| Feature | distant | ssh | | Feature | distant | ssh |
| --------------------- | --------| ----| | --------------------- | --------| ----|
| Capabilities | ✅ | ✅ |
| Filesystem I/O | ✅ | ✅ | | Filesystem I/O | ✅ | ✅ |
| Filesystem Watching | ✅ | ✅ | | Filesystem Watching | ✅ | ✅ |
| Process Execution | ✅ | ✅ | | Process Execution | ✅ | ✅ |
| Reconnect | ✅ | ❌ | | Reconnect | ✅ | ❌ |
| Search | ✅ | ❌ | | Search | ✅ | ❌ |
| System Information | ✅ | ⚠ | | System Information | ✅ | ⚠ |
| Version | ✅ | ✅ |
* ✅ means full support * ✅ means full support
* ⚠ means partial support * ⚠ means partial support
@ -78,7 +78,6 @@ the available features and which backend supports each feature:
### Feature Details ### Feature Details
* `Capabilities` - able to report back what it is capable of performing
* `Filesystem I/O` - able to read from and write to the filesystem * `Filesystem I/O` - able to read from and write to the filesystem
* `Filesystem Watching` - able to receive notifications when changes to the * `Filesystem Watching` - able to receive notifications when changes to the
filesystem occur filesystem occur
@ -86,6 +85,7 @@ the available features and which backend supports each feature:
* `Reconnect` - able to reconnect after network outages * `Reconnect` - able to reconnect after network outages
* `Search` - able to search the filesystem * `Search` - able to search the filesystem
* `System Information` - able to retrieve information about the system * `System Information` - able to retrieve information about the system
* `Version` - able to report back version information
## Example ## Example

@ -11,21 +11,19 @@ repository = "https://github.com/chipsenkbeil/distant"
readme = "README.md" readme = "README.md"
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
[features]
schemars = ["dep:schemars", "distant-net/schemars"]
[dependencies] [dependencies]
async-trait = "0.1.68" async-trait = "0.1.68"
bitflags = "2.0.2" bitflags = "2.0.2"
bytes = "1.4.0" bytes = "1.4.0"
derive_more = { version = "0.99.17", default-features = false, features = ["as_mut", "as_ref", "deref", "deref_mut", "display", "from", "error", "into", "into_iterator", "is_variant", "try_into"] } derive_more = { version = "0.99.17", default-features = false, features = ["as_mut", "as_ref", "deref", "deref_mut", "display", "from", "error", "into", "into_iterator", "is_variant", "try_into"] }
distant-net = { version = "=0.20.0-alpha.7", path = "../distant-net" } distant-net = { version = "=0.20.0-alpha.7", path = "../distant-net" }
distant-protocol = { version = "=0.20.0-alpha.7", path = "../distant-protocol" }
futures = "0.3.28" futures = "0.3.28"
grep = "0.2.11" grep = "0.2.11"
hex = "0.4.3" hex = "0.4.3"
ignore = "0.4.20" ignore = "0.4.20"
log = "0.4.17" log = "0.4.17"
notify = { version = "5.1.0", features = ["serde"] } notify = { version = "6.0.0", features = ["serde"] }
num_cpus = "1.15.0" num_cpus = "1.15.0"
once_cell = "1.17.1" once_cell = "1.17.1"
portable-pty = "0.8.1" portable-pty = "0.8.1"
@ -42,9 +40,6 @@ walkdir = "2.3.3"
whoami = "1.4.0" whoami = "1.4.0"
winsplit = "0.1.0" winsplit = "0.1.0"
# Optional dependencies based on features
schemars = { version = "0.8.12", optional = true }
[dev-dependencies] [dev-dependencies]
assert_fs = "1.0.12" assert_fs = "1.0.12"
env_logger = "0.10.0" env_logger = "0.10.0"

@ -28,18 +28,9 @@ You can import the dependency by adding the following to your `Cargo.toml`:
```toml ```toml
[dependencies] [dependencies]
distant-core = "0.19" distant-core = "0.20"
``` ```
## Features
Currently, the library supports the following features:
- `schemars`: derives the `schemars::JsonSchema` interface on
`DistantMsg`, `DistantRequestData`, and `DistantResponseData` data types
By default, no features are enabled on the library.
## Examples ## Examples
Below is an example of connecting to a distant server over TCP without any Below is an example of connecting to a distant server over TCP without any

@ -8,8 +8,8 @@ use distant_net::server::{ConnectionCtx, Reply, ServerCtx, ServerHandler};
use log::*; use log::*;
use crate::protocol::{ use crate::protocol::{
self, Capabilities, ChangeKind, DirEntry, Environment, Error, Metadata, Permissions, ProcessId, self, ChangeKind, DirEntry, Environment, Error, Metadata, Permissions, ProcessId, PtySize,
PtySize, SearchId, SearchQuery, SetPermissionsOptions, SystemInfo, SearchId, SearchQuery, SetPermissionsOptions, SystemInfo, Version,
}; };
mod local; mod local;
@ -76,8 +76,8 @@ pub trait DistantApi {
/// ///
/// *Override this, otherwise it will return "unsupported" as an error.* /// *Override this, otherwise it will return "unsupported" as an error.*
#[allow(unused_variables)] #[allow(unused_variables)]
async fn capabilities(&self, ctx: DistantCtx<Self::LocalData>) -> io::Result<Capabilities> { async fn version(&self, ctx: DistantCtx<Self::LocalData>) -> io::Result<Version> {
unsupported("capabilities") unsupported("version")
} }
/// Reads bytes from a file. /// Reads bytes from a file.
@ -536,11 +536,11 @@ where
D: Send + Sync, D: Send + Sync,
{ {
match request { match request {
protocol::Request::Capabilities {} => server protocol::Request::Version {} => server
.api .api
.capabilities(ctx) .version(ctx)
.await .await
.map(|supported| protocol::Response::Capabilities { supported }) .map(protocol::Response::Version)
.unwrap_or_else(protocol::Response::from), .unwrap_or_else(protocol::Response::from),
protocol::Request::FileRead { path } => server protocol::Request::FileRead { path } => server
.api .api

@ -1,5 +1,6 @@
use std::io;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::time::SystemTime;
use std::{env, io};
use async_trait::async_trait; use async_trait::async_trait;
use ignore::{DirEntry as WalkDirEntry, WalkBuilder}; use ignore::{DirEntry as WalkDirEntry, WalkBuilder};
@ -10,6 +11,7 @@ use walkdir::WalkDir;
use crate::protocol::{ use crate::protocol::{
Capabilities, ChangeKind, ChangeKindSet, DirEntry, Environment, FileType, Metadata, Capabilities, ChangeKind, ChangeKindSet, DirEntry, Environment, FileType, Metadata,
Permissions, ProcessId, PtySize, SearchId, SearchQuery, SetPermissionsOptions, SystemInfo, Permissions, ProcessId, PtySize, SearchId, SearchQuery, SetPermissionsOptions, SystemInfo,
Version, PROTOCOL_VERSION,
}; };
use crate::{DistantApi, DistantCtx}; use crate::{DistantApi, DistantCtx};
@ -39,12 +41,6 @@ impl LocalDistantApi {
impl DistantApi for LocalDistantApi { impl DistantApi for LocalDistantApi {
type LocalData = (); type LocalData = ();
async fn capabilities(&self, ctx: DistantCtx<Self::LocalData>) -> io::Result<Capabilities> {
debug!("[Conn {}] Querying capabilities", ctx.connection_id);
Ok(Capabilities::all())
}
async fn read_file( async fn read_file(
&self, &self,
ctx: DistantCtx<Self::LocalData>, ctx: DistantCtx<Self::LocalData>,
@ -409,7 +405,66 @@ impl DistantApi for LocalDistantApi {
"[Conn {}] Reading metadata for {:?} {{canonicalize: {}, resolve_file_type: {}}}", "[Conn {}] Reading metadata for {:?} {{canonicalize: {}, resolve_file_type: {}}}",
ctx.connection_id, path, canonicalize, resolve_file_type ctx.connection_id, path, canonicalize, resolve_file_type
); );
Metadata::read(path, canonicalize, resolve_file_type).await let metadata = tokio::fs::symlink_metadata(path.as_path()).await?;
let canonicalized_path = if canonicalize {
Some(tokio::fs::canonicalize(path.as_path()).await?)
} else {
None
};
// If asking for resolved file type and current type is symlink, then we want to refresh
// our metadata to get the filetype for the resolved link
let file_type = if resolve_file_type && metadata.file_type().is_symlink() {
tokio::fs::metadata(path).await?.file_type()
} else {
metadata.file_type()
};
Ok(Metadata {
canonicalized_path,
accessed: metadata
.accessed()
.ok()
.and_then(|t| t.duration_since(SystemTime::UNIX_EPOCH).ok())
.map(|d| d.as_millis()),
created: metadata
.created()
.ok()
.and_then(|t| t.duration_since(SystemTime::UNIX_EPOCH).ok())
.map(|d| d.as_millis()),
modified: metadata
.modified()
.ok()
.and_then(|t| t.duration_since(SystemTime::UNIX_EPOCH).ok())
.map(|d| d.as_millis()),
len: metadata.len(),
readonly: metadata.permissions().readonly(),
file_type: if file_type.is_dir() {
FileType::Dir
} else if file_type.is_file() {
FileType::File
} else {
FileType::Symlink
},
#[cfg(unix)]
unix: Some({
use std::os::unix::prelude::*;
let mode = metadata.mode();
crate::protocol::UnixMetadata::from(mode)
}),
#[cfg(not(unix))]
unix: None,
#[cfg(windows)]
windows: Some({
use std::os::windows::prelude::*;
let attributes = metadata.file_attributes();
crate::protocol::WindowsMetadata::from(attributes)
}),
#[cfg(not(windows))]
windows: None,
})
} }
async fn set_permissions( async fn set_permissions(
@ -615,7 +670,29 @@ impl DistantApi for LocalDistantApi {
async fn system_info(&self, ctx: DistantCtx<Self::LocalData>) -> io::Result<SystemInfo> { async fn system_info(&self, ctx: DistantCtx<Self::LocalData>) -> io::Result<SystemInfo> {
debug!("[Conn {}] Reading system information", ctx.connection_id); debug!("[Conn {}] Reading system information", ctx.connection_id);
Ok(SystemInfo::default()) Ok(SystemInfo {
family: env::consts::FAMILY.to_string(),
os: env::consts::OS.to_string(),
arch: env::consts::ARCH.to_string(),
current_dir: env::current_dir().unwrap_or_default(),
main_separator: std::path::MAIN_SEPARATOR,
username: whoami::username(),
shell: if cfg!(windows) {
env::var("ComSpec").unwrap_or_else(|_| String::from("cmd.exe"))
} else {
env::var("SHELL").unwrap_or_else(|_| String::from("/bin/sh"))
},
})
}
async fn version(&self, ctx: DistantCtx<Self::LocalData>) -> io::Result<Version> {
debug!("[Conn {}] Querying version", ctx.connection_id);
Ok(Version {
server_version: format!("{} {}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION")),
protocol_version: PROTOCOL_VERSION,
capabilities: Capabilities::all(),
})
} }
} }

@ -5,12 +5,13 @@ use std::path::{Path, PathBuf};
use distant_net::common::ConnectionId; use distant_net::common::ConnectionId;
use log::*; use log::*;
use notify::event::{AccessKind, AccessMode, ModifyKind};
use notify::{ use notify::{
Config as WatcherConfig, Error as WatcherError, ErrorKind as WatcherErrorKind, Config as WatcherConfig, Error as WatcherError, ErrorKind as WatcherErrorKind,
Event as WatcherEvent, PollWatcher, RecursiveMode, Watcher, Event as WatcherEvent, EventKind, PollWatcher, RecursiveMode, Watcher,
}; };
use tokio::sync::mpsc;
use tokio::sync::mpsc::error::TrySendError; use tokio::sync::mpsc::error::TrySendError;
use tokio::sync::mpsc::{self};
use tokio::sync::oneshot; use tokio::sync::oneshot;
use tokio::task::JoinHandle; use tokio::task::JoinHandle;
@ -256,7 +257,20 @@ async fn watcher_task(mut watcher: impl Watcher, mut rx: mpsc::Receiver<InnerWat
} }
} }
InnerWatcherMsg::Event { ev } => { InnerWatcherMsg::Event { ev } => {
let kind = ChangeKind::from(ev.kind); let kind = match ev.kind {
EventKind::Access(AccessKind::Read) => ChangeKind::Access,
EventKind::Modify(ModifyKind::Metadata(_)) => ChangeKind::Attribute,
EventKind::Access(AccessKind::Close(AccessMode::Write)) => {
ChangeKind::CloseWrite
}
EventKind::Access(AccessKind::Close(_)) => ChangeKind::CloseNoWrite,
EventKind::Create(_) => ChangeKind::Create,
EventKind::Remove(_) => ChangeKind::Delete,
EventKind::Modify(ModifyKind::Data(_)) => ChangeKind::Modify,
EventKind::Access(AccessKind::Open(_)) => ChangeKind::Open,
EventKind::Modify(ModifyKind::Name(_)) => ChangeKind::Rename,
_ => ChangeKind::Unknown,
};
for registered_path in registered_paths.iter() { for registered_path in registered_paths.iter() {
match registered_path.filter_and_send(kind, &ev.paths).await { match registered_path.filter_and_send(kind, &ev.paths).await {

@ -11,8 +11,8 @@ use crate::client::{
Watcher, Watcher,
}; };
use crate::protocol::{ use crate::protocol::{
self, Capabilities, ChangeKindSet, DirEntry, Environment, Error as Failure, Metadata, self, ChangeKindSet, DirEntry, Environment, Error as Failure, Metadata, Permissions, PtySize,
Permissions, PtySize, SearchId, SearchQuery, SetPermissionsOptions, SystemInfo, SearchId, SearchQuery, SetPermissionsOptions, SystemInfo, Version,
}; };
pub type AsyncReturn<'a, T, E = io::Error> = pub type AsyncReturn<'a, T, E = io::Error> =
@ -38,9 +38,6 @@ pub trait DistantChannelExt {
data: impl Into<String>, data: impl Into<String>,
) -> AsyncReturn<'_, ()>; ) -> AsyncReturn<'_, ()>;
/// Retrieves server capabilities
fn capabilities(&mut self) -> AsyncReturn<'_, Capabilities>;
/// Copies a remote file or directory from src to dst /// Copies a remote file or directory from src to dst
fn copy(&mut self, src: impl Into<PathBuf>, dst: impl Into<PathBuf>) -> AsyncReturn<'_, ()>; fn copy(&mut self, src: impl Into<PathBuf>, dst: impl Into<PathBuf>) -> AsyncReturn<'_, ()>;
@ -136,6 +133,9 @@ pub trait DistantChannelExt {
/// Retrieves information about the remote system /// Retrieves information about the remote system
fn system_info(&mut self) -> AsyncReturn<'_, SystemInfo>; fn system_info(&mut self) -> AsyncReturn<'_, SystemInfo>;
/// Retrieves server version information
fn version(&mut self) -> AsyncReturn<'_, Version>;
/// Writes a remote file with the data from a collection of bytes /// Writes a remote file with the data from a collection of bytes
fn write_file( fn write_file(
&mut self, &mut self,
@ -204,18 +204,6 @@ impl DistantChannelExt
) )
} }
fn capabilities(&mut self) -> AsyncReturn<'_, Capabilities> {
make_body!(
self,
protocol::Request::Capabilities {},
|data| match data {
protocol::Response::Capabilities { supported } => Ok(supported),
protocol::Response::Error(x) => Err(io::Error::from(x)),
_ => Err(mismatched_response()),
}
)
}
fn copy(&mut self, src: impl Into<PathBuf>, dst: impl Into<PathBuf>) -> AsyncReturn<'_, ()> { fn copy(&mut self, src: impl Into<PathBuf>, dst: impl Into<PathBuf>) -> AsyncReturn<'_, ()> {
make_body!( make_body!(
self, self,
@ -457,6 +445,14 @@ impl DistantChannelExt
}) })
} }
fn version(&mut self) -> AsyncReturn<'_, Version> {
make_body!(self, protocol::Request::Version {}, |data| match data {
protocol::Response::Version(x) => Ok(x),
protocol::Response::Error(x) => Err(io::Error::from(x)),
_ => Err(mismatched_response()),
})
}
fn write_file( fn write_file(
&mut self, &mut self,
path: impl Into<PathBuf>, path: impl Into<PathBuf>,

@ -267,7 +267,7 @@ mod tests {
paths: vec![test_path.to_path_buf()], paths: vec![test_path.to_path_buf()],
}), }),
protocol::Response::Changed(Change { protocol::Response::Changed(Change {
kind: ChangeKind::Content, kind: ChangeKind::Modify,
paths: vec![test_path.to_path_buf()], paths: vec![test_path.to_path_buf()],
}), }),
], ],
@ -289,7 +289,7 @@ mod tests {
assert_eq!( assert_eq!(
change, change,
Change { Change {
kind: ChangeKind::Content, kind: ChangeKind::Modify,
paths: vec![test_path.to_path_buf()] paths: vec![test_path.to_path_buf()]
} }
); );
@ -342,7 +342,7 @@ mod tests {
.write_frame_for(&Response::new( .write_frame_for(&Response::new(
req.id.clone() + "1", req.id.clone() + "1",
protocol::Response::Changed(Change { protocol::Response::Changed(Change {
kind: ChangeKind::Content, kind: ChangeKind::Modify,
paths: vec![test_path.to_path_buf()], paths: vec![test_path.to_path_buf()],
}), }),
)) ))
@ -354,7 +354,7 @@ mod tests {
.write_frame_for(&Response::new( .write_frame_for(&Response::new(
req.id, req.id,
protocol::Response::Changed(Change { protocol::Response::Changed(Change {
kind: ChangeKind::Remove, kind: ChangeKind::Delete,
paths: vec![test_path.to_path_buf()], paths: vec![test_path.to_path_buf()],
}), }),
)) ))
@ -375,7 +375,7 @@ mod tests {
assert_eq!( assert_eq!(
change, change,
Change { Change {
kind: ChangeKind::Remove, kind: ChangeKind::Delete,
paths: vec![test_path.to_path_buf()] paths: vec![test_path.to_path_buf()]
} }
); );
@ -418,11 +418,11 @@ mod tests {
paths: vec![test_path.to_path_buf()], paths: vec![test_path.to_path_buf()],
}), }),
protocol::Response::Changed(Change { protocol::Response::Changed(Change {
kind: ChangeKind::Content, kind: ChangeKind::Modify,
paths: vec![test_path.to_path_buf()], paths: vec![test_path.to_path_buf()],
}), }),
protocol::Response::Changed(Change { protocol::Response::Changed(Change {
kind: ChangeKind::Remove, kind: ChangeKind::Delete,
paths: vec![test_path.to_path_buf()], paths: vec![test_path.to_path_buf()],
}), }),
], ],
@ -482,14 +482,14 @@ mod tests {
assert_eq!( assert_eq!(
watcher.lock().await.next().await, watcher.lock().await.next().await,
Some(Change { Some(Change {
kind: ChangeKind::Content, kind: ChangeKind::Modify,
paths: vec![test_path.to_path_buf()] paths: vec![test_path.to_path_buf()]
}) })
); );
assert_eq!( assert_eq!(
watcher.lock().await.next().await, watcher.lock().await.next().await,
Some(Change { Some(Change {
kind: ChangeKind::Remove, kind: ChangeKind::Delete,
paths: vec![test_path.to_path_buf()] paths: vec![test_path.to_path_buf()]
}) })
); );

@ -7,10 +7,11 @@ pub use client::*;
mod credentials; mod credentials;
pub use credentials::*; pub use credentials::*;
pub mod protocol;
mod constants; mod constants;
mod serde_str; mod serde_str;
/// Re-export of `distant-net` as `net` /// Network functionality.
pub use distant_net as net; pub use distant_net as net;
/// Protocol structures.
pub use distant_protocol as protocol;

@ -1,572 +0,0 @@
use std::io;
use std::path::PathBuf;
use derive_more::{From, IsVariant};
use serde::{Deserialize, Serialize};
use strum::{AsRefStr, EnumDiscriminants, EnumIter, EnumMessage, EnumString};
mod capabilities;
pub use capabilities::*;
mod change;
pub use change::*;
mod cmd;
pub use cmd::*;
mod error;
pub use error::*;
mod filesystem;
pub use filesystem::*;
mod metadata;
pub use metadata::*;
mod permissions;
pub use permissions::*;
mod pty;
pub use pty::*;
mod search;
pub use search::*;
mod system;
pub use system::*;
mod utils;
pub(crate) use utils::*;
/// Id for a remote process
pub type ProcessId = u32;
/// Mapping of environment variables
pub type Environment = distant_net::common::Map;
/// Represents a wrapper around a distant message, supporting single and batch requests
#[derive(Clone, Debug, From, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(untagged)]
pub enum Msg<T> {
Single(T),
Batch(Vec<T>),
}
impl<T> Msg<T> {
/// Returns true if msg has a single payload
pub fn is_single(&self) -> bool {
matches!(self, Self::Single(_))
}
/// Returns reference to single value if msg is single variant
pub fn as_single(&self) -> Option<&T> {
match self {
Self::Single(x) => Some(x),
_ => None,
}
}
/// Returns mutable reference to single value if msg is single variant
pub fn as_mut_single(&mut self) -> Option<&T> {
match self {
Self::Single(x) => Some(x),
_ => None,
}
}
/// Returns the single value if msg is single variant
pub fn into_single(self) -> Option<T> {
match self {
Self::Single(x) => Some(x),
_ => None,
}
}
/// Returns true if msg has a batch of payloads
pub fn is_batch(&self) -> bool {
matches!(self, Self::Batch(_))
}
/// Returns reference to batch value if msg is batch variant
pub fn as_batch(&self) -> Option<&[T]> {
match self {
Self::Batch(x) => Some(x),
_ => None,
}
}
/// Returns mutable reference to batch value if msg is batch variant
pub fn as_mut_batch(&mut self) -> Option<&mut [T]> {
match self {
Self::Batch(x) => Some(x),
_ => None,
}
}
/// Returns the batch value if msg is batch variant
pub fn into_batch(self) -> Option<Vec<T>> {
match self {
Self::Batch(x) => Some(x),
_ => None,
}
}
/// Convert into a collection of payload data
pub fn into_vec(self) -> Vec<T> {
match self {
Self::Single(x) => vec![x],
Self::Batch(x) => x,
}
}
}
#[cfg(feature = "schemars")]
impl<T: schemars::JsonSchema> Msg<T> {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(Msg<T>)
}
}
/// Represents the payload of a request to be performed on the remote machine
#[derive(Clone, Debug, PartialEq, Eq, EnumDiscriminants, IsVariant, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[strum_discriminants(derive(
AsRefStr,
strum::Display,
EnumIter,
EnumMessage,
EnumString,
Hash,
PartialOrd,
Ord,
IsVariant,
Serialize,
Deserialize
))]
#[cfg_attr(
feature = "schemars",
strum_discriminants(derive(schemars::JsonSchema))
)]
#[strum_discriminants(name(CapabilityKind))]
#[strum_discriminants(strum(serialize_all = "snake_case"))]
#[serde(rename_all = "snake_case", deny_unknown_fields, tag = "type")]
pub enum Request {
/// Retrieve information about the server's capabilities
#[strum_discriminants(strum(message = "Supports retrieving capabilities"))]
Capabilities {},
/// Reads a file from the specified path on the remote machine
#[strum_discriminants(strum(message = "Supports reading binary file"))]
FileRead {
/// The path to the file on the remote machine
path: PathBuf,
},
/// Reads a file from the specified path on the remote machine
/// and treats the contents as text
#[strum_discriminants(strum(message = "Supports reading text file"))]
FileReadText {
/// The path to the file on the remote machine
path: PathBuf,
},
/// Writes a file, creating it if it does not exist, and overwriting any existing content
/// on the remote machine
#[strum_discriminants(strum(message = "Supports writing binary file"))]
FileWrite {
/// The path to the file on the remote machine
path: PathBuf,
/// Data for server-side writing of content
#[serde(with = "serde_bytes")]
#[cfg_attr(feature = "schemars", schemars(with = "Vec<u8>"))]
data: Vec<u8>,
},
/// Writes a file using text instead of bytes, creating it if it does not exist,
/// and overwriting any existing content on the remote machine
#[strum_discriminants(strum(message = "Supports writing text file"))]
FileWriteText {
/// The path to the file on the remote machine
path: PathBuf,
/// Data for server-side writing of content
text: String,
},
/// Appends to a file, creating it if it does not exist, on the remote machine
#[strum_discriminants(strum(message = "Supports appending to binary file"))]
FileAppend {
/// The path to the file on the remote machine
path: PathBuf,
/// Data for server-side writing of content
#[serde(with = "serde_bytes")]
#[cfg_attr(feature = "schemars", schemars(with = "Vec<u8>"))]
data: Vec<u8>,
},
/// Appends text to a file, creating it if it does not exist, on the remote machine
#[strum_discriminants(strum(message = "Supports appending to text file"))]
FileAppendText {
/// The path to the file on the remote machine
path: PathBuf,
/// Data for server-side writing of content
text: String,
},
/// Reads a directory from the specified path on the remote machine
#[strum_discriminants(strum(message = "Supports reading directory"))]
DirRead {
/// The path to the directory on the remote machine
path: PathBuf,
/// Maximum depth to traverse with 0 indicating there is no maximum
/// depth and 1 indicating the most immediate children within the
/// directory
#[serde(default = "one")]
depth: usize,
/// Whether or not to return absolute or relative paths
#[serde(default)]
absolute: bool,
/// Whether or not to canonicalize the resulting paths, meaning
/// returning the canonical, absolute form of a path with all
/// intermediate components normalized and symbolic links resolved
///
/// Note that the flag absolute must be true to have absolute paths
/// returned, even if canonicalize is flagged as true
#[serde(default)]
canonicalize: bool,
/// Whether or not to include the root directory in the retrieved
/// entries
///
/// If included, the root directory will also be a canonicalized,
/// absolute path and will not follow any of the other flags
#[serde(default)]
include_root: bool,
},
/// Creates a directory on the remote machine
#[strum_discriminants(strum(message = "Supports creating directory"))]
DirCreate {
/// The path to the directory on the remote machine
path: PathBuf,
/// Whether or not to create all parent directories
#[serde(default)]
all: bool,
},
/// Removes a file or directory on the remote machine
#[strum_discriminants(strum(message = "Supports removing files, directories, and symlinks"))]
Remove {
/// The path to the file or directory on the remote machine
path: PathBuf,
/// Whether or not to remove all contents within directory if is a directory.
/// Does nothing different for files
#[serde(default)]
force: bool,
},
/// Copies a file or directory on the remote machine
#[strum_discriminants(strum(message = "Supports copying files, directories, and symlinks"))]
Copy {
/// The path to the file or directory on the remote machine
src: PathBuf,
/// New location on the remote machine for copy of file or directory
dst: PathBuf,
},
/// Moves/renames a file or directory on the remote machine
#[strum_discriminants(strum(message = "Supports renaming files, directories, and symlinks"))]
Rename {
/// The path to the file or directory on the remote machine
src: PathBuf,
/// New location on the remote machine for the file or directory
dst: PathBuf,
},
/// Watches a path for changes
#[strum_discriminants(strum(message = "Supports watching filesystem for changes"))]
Watch {
/// The path to the file, directory, or symlink on the remote machine
path: PathBuf,
/// If true, will recursively watch for changes within directories, othewise
/// will only watch for changes immediately within directories
#[serde(default)]
recursive: bool,
/// Filter to only report back specified changes
#[serde(default)]
only: Vec<ChangeKind>,
/// Filter to report back changes except these specified changes
#[serde(default)]
except: Vec<ChangeKind>,
},
/// Unwatches a path for changes, meaning no additional changes will be reported
#[strum_discriminants(strum(message = "Supports unwatching filesystem for changes"))]
Unwatch {
/// The path to the file, directory, or symlink on the remote machine
path: PathBuf,
},
/// Checks whether the given path exists
#[strum_discriminants(strum(message = "Supports checking if a path exists"))]
Exists {
/// The path to the file or directory on the remote machine
path: PathBuf,
},
/// Retrieves filesystem metadata for the specified path on the remote machine
#[strum_discriminants(strum(
message = "Supports retrieving metadata about a file, directory, or symlink"
))]
Metadata {
/// The path to the file, directory, or symlink on the remote machine
path: PathBuf,
/// Whether or not to include a canonicalized version of the path, meaning
/// returning the canonical, absolute form of a path with all
/// intermediate components normalized and symbolic links resolved
#[serde(default)]
canonicalize: bool,
/// Whether or not to follow symlinks to determine absolute file type (dir/file)
#[serde(default)]
resolve_file_type: bool,
},
/// Sets permissions on a file, directory, or symlink on the remote machine
#[strum_discriminants(strum(
message = "Supports setting permissions on a file, directory, or symlink"
))]
SetPermissions {
/// The path to the file, directory, or symlink on the remote machine
path: PathBuf,
/// New permissions to apply to the file, directory, or symlink
permissions: Permissions,
/// Additional options to supply when setting permissions
#[serde(default)]
options: SetPermissionsOptions,
},
/// Searches filesystem using the provided query
#[strum_discriminants(strum(message = "Supports searching filesystem using queries"))]
Search {
/// Query to perform against the filesystem
query: SearchQuery,
},
/// Cancels an active search being run against the filesystem
#[strum_discriminants(strum(
message = "Supports canceling an active search against the filesystem"
))]
CancelSearch {
/// Id of the search to cancel
id: SearchId,
},
/// Spawns a new process on the remote machine
#[strum_discriminants(strum(message = "Supports spawning a process"))]
ProcSpawn {
/// The full command to run including arguments
cmd: Cmd,
/// Environment to provide to the remote process
#[serde(default)]
environment: Environment,
/// Alternative current directory for the remote process
#[serde(default)]
current_dir: Option<PathBuf>,
/// If provided, will spawn process in a pty, otherwise spawns directly
#[serde(default)]
pty: Option<PtySize>,
},
/// Kills a process running on the remote machine
#[strum_discriminants(strum(message = "Supports killing a spawned process"))]
ProcKill {
/// Id of the actively-running process
id: ProcessId,
},
/// Sends additional data to stdin of running process
#[strum_discriminants(strum(message = "Supports sending stdin to a spawned process"))]
ProcStdin {
/// Id of the actively-running process to send stdin data
id: ProcessId,
/// Data to send to a process's stdin pipe
#[serde(with = "serde_bytes")]
#[cfg_attr(feature = "schemars", schemars(with = "Vec<u8>"))]
data: Vec<u8>,
},
/// Resize pty of remote process
#[strum_discriminants(strum(message = "Supports resizing the pty of a spawned process"))]
ProcResizePty {
/// Id of the actively-running process whose pty to resize
id: ProcessId,
/// The new pty dimensions
size: PtySize,
},
/// Retrieve information about the server and the system it is on
#[strum_discriminants(strum(message = "Supports retrieving system information"))]
SystemInfo {},
}
#[cfg(feature = "schemars")]
impl Request {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(Request)
}
}
/// Represents the payload of a successful response
#[derive(Clone, Debug, PartialEq, Eq, AsRefStr, IsVariant, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(rename_all = "snake_case", deny_unknown_fields, tag = "type")]
#[strum(serialize_all = "snake_case")]
pub enum Response {
/// General okay with no extra data, returned in cases like
/// creating or removing a directory, copying a file, or renaming
/// a file
Ok,
/// General-purpose failure that occurred from some request
Error(Error),
/// Response containing some arbitrary, binary data
Blob {
/// Binary data associated with the response
#[serde(with = "serde_bytes")]
#[cfg_attr(feature = "schemars", schemars(with = "Vec<u8>"))]
data: Vec<u8>,
},
/// Response containing some arbitrary, text data
Text {
/// Text data associated with the response
data: String,
},
/// Response to reading a directory
DirEntries {
/// Entries contained within the requested directory
entries: Vec<DirEntry>,
/// Errors encountered while scanning for entries
errors: Vec<Error>,
},
/// Response to a filesystem change for some watched file, directory, or symlink
Changed(Change),
/// Response to checking if a path exists
Exists { value: bool },
/// Represents metadata about some filesystem object (file, directory, symlink) on remote machine
Metadata(Metadata),
/// Represents a search being started
SearchStarted {
/// Arbitrary id associated with search
id: SearchId,
},
/// Represents some subset of results for a search query (may not be all of them)
SearchResults {
/// Arbitrary id associated with search
id: SearchId,
/// Collection of matches from performing a query
matches: Vec<SearchQueryMatch>,
},
/// Represents a search being completed
SearchDone {
/// Arbitrary id associated with search
id: SearchId,
},
/// Response to starting a new process
ProcSpawned {
/// Arbitrary id associated with running process
id: ProcessId,
},
/// Actively-transmitted stdout as part of running process
ProcStdout {
/// Arbitrary id associated with running process
id: ProcessId,
/// Data read from a process' stdout pipe
#[serde(with = "serde_bytes")]
#[cfg_attr(feature = "schemars", schemars(with = "Vec<u8>"))]
data: Vec<u8>,
},
/// Actively-transmitted stderr as part of running process
ProcStderr {
/// Arbitrary id associated with running process
id: ProcessId,
/// Data read from a process' stderr pipe
#[serde(with = "serde_bytes")]
#[cfg_attr(feature = "schemars", schemars(with = "Vec<u8>"))]
data: Vec<u8>,
},
/// Response to a process finishing
ProcDone {
/// Arbitrary id associated with running process
id: ProcessId,
/// Whether or not termination was successful
success: bool,
/// Exit code associated with termination, will be missing if terminated by signal
code: Option<i32>,
},
/// Response to retrieving information about the server and the system it is on
SystemInfo(SystemInfo),
/// Response to retrieving information about the server's capabilities
Capabilities { supported: Capabilities },
}
#[cfg(feature = "schemars")]
impl Response {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(Response)
}
}
impl From<io::Error> for Response {
fn from(x: io::Error) -> Self {
Self::Error(Error::from(x))
}
}
/// Used to provide a default serde value of 1
const fn one() -> usize {
1
}

@ -1,207 +0,0 @@
use std::cmp::Ordering;
use std::collections::HashSet;
use std::hash::{Hash, Hasher};
use std::ops::{BitAnd, BitOr, BitXor};
use std::str::FromStr;
use derive_more::{From, Into, IntoIterator};
use serde::{Deserialize, Serialize};
use strum::{EnumMessage, IntoEnumIterator};
use super::CapabilityKind;
/// Set of supported capabilities for a server
#[derive(Clone, Debug, From, Into, PartialEq, Eq, IntoIterator, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(transparent)]
pub struct Capabilities(#[into_iterator(owned, ref)] HashSet<Capability>);
impl Capabilities {
/// Return set of capabilities encompassing all possible capabilities
pub fn all() -> Self {
Self(CapabilityKind::iter().map(Capability::from).collect())
}
/// Return empty set of capabilities
pub fn none() -> Self {
Self(HashSet::new())
}
/// Returns true if the capability with described kind is included
pub fn contains(&self, kind: impl AsRef<str>) -> bool {
let cap = Capability {
kind: kind.as_ref().to_string(),
description: String::new(),
};
self.0.contains(&cap)
}
/// Adds the specified capability to the set of capabilities
///
/// * If the set did not have this capability, returns `true`
/// * If the set did have this capability, returns `false`
pub fn insert(&mut self, cap: impl Into<Capability>) -> bool {
self.0.insert(cap.into())
}
/// Removes the capability with the described kind, returning the capability
pub fn take(&mut self, kind: impl AsRef<str>) -> Option<Capability> {
let cap = Capability {
kind: kind.as_ref().to_string(),
description: String::new(),
};
self.0.take(&cap)
}
/// Removes the capability with the described kind, returning true if it existed
pub fn remove(&mut self, kind: impl AsRef<str>) -> bool {
let cap = Capability {
kind: kind.as_ref().to_string(),
description: String::new(),
};
self.0.remove(&cap)
}
/// Converts into vec of capabilities sorted by kind
pub fn into_sorted_vec(self) -> Vec<Capability> {
let mut this = self.0.into_iter().collect::<Vec<_>>();
this.sort_unstable();
this
}
}
#[cfg(feature = "schemars")]
impl Capabilities {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(Capabilities)
}
}
impl BitAnd for &Capabilities {
type Output = Capabilities;
fn bitand(self, rhs: Self) -> Self::Output {
Capabilities(self.0.bitand(&rhs.0))
}
}
impl BitOr for &Capabilities {
type Output = Capabilities;
fn bitor(self, rhs: Self) -> Self::Output {
Capabilities(self.0.bitor(&rhs.0))
}
}
impl BitOr<Capability> for &Capabilities {
type Output = Capabilities;
fn bitor(self, rhs: Capability) -> Self::Output {
let mut other = Capabilities::none();
other.0.insert(rhs);
self.bitor(&other)
}
}
impl BitXor for &Capabilities {
type Output = Capabilities;
fn bitxor(self, rhs: Self) -> Self::Output {
Capabilities(self.0.bitxor(&rhs.0))
}
}
impl FromIterator<Capability> for Capabilities {
fn from_iter<I: IntoIterator<Item = Capability>>(iter: I) -> Self {
let mut this = Capabilities::none();
for capability in iter {
this.0.insert(capability);
}
this
}
}
/// Capability tied to a server. A capability is equivalent based on its kind and not description.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
pub struct Capability {
/// Label describing the kind of capability
pub kind: String,
/// Information about the capability
pub description: String,
}
impl Capability {
/// Will convert the [`Capability`]'s `kind` into a known [`CapabilityKind`] if possible,
/// returning None if the capability is unknown
pub fn to_capability_kind(&self) -> Option<CapabilityKind> {
CapabilityKind::from_str(&self.kind).ok()
}
/// Returns true if the described capability is unknown
pub fn is_unknown(&self) -> bool {
self.to_capability_kind().is_none()
}
}
impl PartialEq for Capability {
fn eq(&self, other: &Self) -> bool {
self.kind.eq_ignore_ascii_case(&other.kind)
}
}
impl Eq for Capability {}
impl PartialOrd for Capability {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for Capability {
fn cmp(&self, other: &Self) -> Ordering {
self.kind
.to_ascii_lowercase()
.cmp(&other.kind.to_ascii_lowercase())
}
}
impl Hash for Capability {
fn hash<H: Hasher>(&self, state: &mut H) {
self.kind.to_ascii_lowercase().hash(state);
}
}
impl From<CapabilityKind> for Capability {
/// Creates a new capability using the kind's default message
fn from(kind: CapabilityKind) -> Self {
Self {
kind: kind.to_string(),
description: kind
.get_message()
.map(ToString::to_string)
.unwrap_or_default(),
}
}
}
#[cfg(feature = "schemars")]
impl Capability {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(Capability)
}
}
#[cfg(feature = "schemars")]
impl CapabilityKind {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(CapabilityKind)
}
}

@ -1,516 +0,0 @@
use std::collections::HashSet;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::iter::FromIterator;
use std::ops::{BitOr, Sub};
use std::path::PathBuf;
use std::str::FromStr;
use derive_more::{Deref, DerefMut, IntoIterator};
use notify::event::Event as NotifyEvent;
use notify::EventKind as NotifyEventKind;
use serde::{Deserialize, Serialize};
use strum::{EnumString, EnumVariantNames, VariantNames};
/// Change to one or more paths on the filesystem
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
pub struct Change {
/// Label describing the kind of change
pub kind: ChangeKind,
/// Paths that were changed
pub paths: Vec<PathBuf>,
}
#[cfg(feature = "schemars")]
impl Change {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(Change)
}
}
impl From<NotifyEvent> for Change {
fn from(x: NotifyEvent) -> Self {
Self {
kind: x.kind.into(),
paths: x.paths,
}
}
}
#[derive(
Copy,
Clone,
Debug,
strum::Display,
EnumString,
EnumVariantNames,
Hash,
PartialEq,
Eq,
PartialOrd,
Ord,
Serialize,
Deserialize,
)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
#[strum(serialize_all = "snake_case")]
pub enum ChangeKind {
/// Something about a file or directory was accessed, but
/// no specific details were known
Access,
/// A file was closed for executing
AccessCloseExecute,
/// A file was closed for reading
AccessCloseRead,
/// A file was closed for writing
AccessCloseWrite,
/// A file was opened for executing
AccessOpenExecute,
/// A file was opened for reading
AccessOpenRead,
/// A file was opened for writing
AccessOpenWrite,
/// A file or directory was read
AccessRead,
/// The access time of a file or directory was changed
AccessTime,
/// A file, directory, or something else was created
Create,
/// The content of a file or directory changed
Content,
/// The data of a file or directory was modified, but
/// no specific details were known
Data,
/// The metadata of a file or directory was modified, but
/// no specific details were known
Metadata,
/// Something about a file or directory was modified, but
/// no specific details were known
Modify,
/// A file, directory, or something else was removed
Remove,
/// A file or directory was renamed, but no specific details were known
Rename,
/// A file or directory was renamed, and the provided paths
/// are the source and target in that order (from, to)
RenameBoth,
/// A file or directory was renamed, and the provided path
/// is the origin of the rename (before being renamed)
RenameFrom,
/// A file or directory was renamed, and the provided path
/// is the result of the rename
RenameTo,
/// A file's size changed
Size,
/// The ownership of a file or directory was changed
Ownership,
/// The permissions of a file or directory was changed
Permissions,
/// The write or modify time of a file or directory was changed
WriteTime,
// Catchall in case we have no insight as to the type of change
Unknown,
}
impl ChangeKind {
/// Returns a list of all variants as str names
pub const fn variants() -> &'static [&'static str] {
Self::VARIANTS
}
/// Returns a list of all variants as a vec
pub fn all() -> Vec<ChangeKind> {
ChangeKindSet::all().into_sorted_vec()
}
/// Returns true if the change is a kind of access
pub fn is_access_kind(&self) -> bool {
self.is_open_access_kind()
|| self.is_close_access_kind()
|| matches!(self, Self::Access | Self::AccessRead)
}
/// Returns true if the change is a kind of open access
pub fn is_open_access_kind(&self) -> bool {
matches!(
self,
Self::AccessOpenExecute | Self::AccessOpenRead | Self::AccessOpenWrite
)
}
/// Returns true if the change is a kind of close access
pub fn is_close_access_kind(&self) -> bool {
matches!(
self,
Self::AccessCloseExecute | Self::AccessCloseRead | Self::AccessCloseWrite
)
}
/// Returns true if the change is a kind of creation
pub fn is_create_kind(&self) -> bool {
matches!(self, Self::Create)
}
/// Returns true if the change is a kind of modification
pub fn is_modify_kind(&self) -> bool {
self.is_data_modify_kind() || self.is_metadata_modify_kind() || matches!(self, Self::Modify)
}
/// Returns true if the change is a kind of data modification
pub fn is_data_modify_kind(&self) -> bool {
matches!(self, Self::Content | Self::Data | Self::Size)
}
/// Returns true if the change is a kind of metadata modification
pub fn is_metadata_modify_kind(&self) -> bool {
matches!(
self,
Self::AccessTime
| Self::Metadata
| Self::Ownership
| Self::Permissions
| Self::WriteTime
)
}
/// Returns true if the change is a kind of rename
pub fn is_rename_kind(&self) -> bool {
matches!(
self,
Self::Rename | Self::RenameBoth | Self::RenameFrom | Self::RenameTo
)
}
/// Returns true if the change is a kind of removal
pub fn is_remove_kind(&self) -> bool {
matches!(self, Self::Remove)
}
/// Returns true if the change kind is unknown
pub fn is_unknown_kind(&self) -> bool {
matches!(self, Self::Unknown)
}
}
#[cfg(feature = "schemars")]
impl ChangeKind {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(ChangeKind)
}
}
impl BitOr for ChangeKind {
type Output = ChangeKindSet;
fn bitor(self, rhs: Self) -> Self::Output {
let mut set = ChangeKindSet::empty();
set.insert(self);
set.insert(rhs);
set
}
}
impl From<NotifyEventKind> for ChangeKind {
fn from(x: NotifyEventKind) -> Self {
use notify::event::{
AccessKind, AccessMode, DataChange, MetadataKind, ModifyKind, RenameMode,
};
match x {
// File/directory access events
NotifyEventKind::Access(AccessKind::Read) => Self::AccessRead,
NotifyEventKind::Access(AccessKind::Open(AccessMode::Execute)) => {
Self::AccessOpenExecute
}
NotifyEventKind::Access(AccessKind::Open(AccessMode::Read)) => Self::AccessOpenRead,
NotifyEventKind::Access(AccessKind::Open(AccessMode::Write)) => Self::AccessOpenWrite,
NotifyEventKind::Access(AccessKind::Close(AccessMode::Execute)) => {
Self::AccessCloseExecute
}
NotifyEventKind::Access(AccessKind::Close(AccessMode::Read)) => Self::AccessCloseRead,
NotifyEventKind::Access(AccessKind::Close(AccessMode::Write)) => Self::AccessCloseWrite,
NotifyEventKind::Access(_) => Self::Access,
// File/directory creation events
NotifyEventKind::Create(_) => Self::Create,
// Rename-oriented events
NotifyEventKind::Modify(ModifyKind::Name(RenameMode::Both)) => Self::RenameBoth,
NotifyEventKind::Modify(ModifyKind::Name(RenameMode::From)) => Self::RenameFrom,
NotifyEventKind::Modify(ModifyKind::Name(RenameMode::To)) => Self::RenameTo,
NotifyEventKind::Modify(ModifyKind::Name(_)) => Self::Rename,
// Data-modification events
NotifyEventKind::Modify(ModifyKind::Data(DataChange::Content)) => Self::Content,
NotifyEventKind::Modify(ModifyKind::Data(DataChange::Size)) => Self::Size,
NotifyEventKind::Modify(ModifyKind::Data(_)) => Self::Data,
// Metadata-modification events
NotifyEventKind::Modify(ModifyKind::Metadata(MetadataKind::AccessTime)) => {
Self::AccessTime
}
NotifyEventKind::Modify(ModifyKind::Metadata(MetadataKind::WriteTime)) => {
Self::WriteTime
}
NotifyEventKind::Modify(ModifyKind::Metadata(MetadataKind::Permissions)) => {
Self::Permissions
}
NotifyEventKind::Modify(ModifyKind::Metadata(MetadataKind::Ownership)) => {
Self::Ownership
}
NotifyEventKind::Modify(ModifyKind::Metadata(_)) => Self::Metadata,
// General modification events
NotifyEventKind::Modify(_) => Self::Modify,
// File/directory removal events
NotifyEventKind::Remove(_) => Self::Remove,
// Catch-all for other events
NotifyEventKind::Any | NotifyEventKind::Other => Self::Unknown,
}
}
}
/// Represents a distinct set of different change kinds
#[derive(Clone, Debug, Deref, DerefMut, IntoIterator, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct ChangeKindSet(HashSet<ChangeKind>);
impl ChangeKindSet {
/// Produces an empty set of [`ChangeKind`]
pub fn empty() -> Self {
Self(HashSet::new())
}
/// Produces a set of all [`ChangeKind`]
pub fn all() -> Self {
vec![
ChangeKind::Access,
ChangeKind::AccessCloseExecute,
ChangeKind::AccessCloseRead,
ChangeKind::AccessCloseWrite,
ChangeKind::AccessOpenExecute,
ChangeKind::AccessOpenRead,
ChangeKind::AccessOpenWrite,
ChangeKind::AccessRead,
ChangeKind::AccessTime,
ChangeKind::Create,
ChangeKind::Content,
ChangeKind::Data,
ChangeKind::Metadata,
ChangeKind::Modify,
ChangeKind::Remove,
ChangeKind::Rename,
ChangeKind::RenameBoth,
ChangeKind::RenameFrom,
ChangeKind::RenameTo,
ChangeKind::Size,
ChangeKind::Ownership,
ChangeKind::Permissions,
ChangeKind::WriteTime,
ChangeKind::Unknown,
]
.into_iter()
.collect()
}
/// Produces a changeset containing all of the access kinds
pub fn access_set() -> Self {
Self::access_open_set()
| Self::access_close_set()
| ChangeKind::AccessRead
| ChangeKind::Access
}
/// Produces a changeset containing all of the open access kinds
pub fn access_open_set() -> Self {
ChangeKind::AccessOpenExecute | ChangeKind::AccessOpenRead | ChangeKind::AccessOpenWrite
}
/// Produces a changeset containing all of the close access kinds
pub fn access_close_set() -> Self {
ChangeKind::AccessCloseExecute | ChangeKind::AccessCloseRead | ChangeKind::AccessCloseWrite
}
// Produces a changeset containing all of the modification kinds
pub fn modify_set() -> Self {
Self::modify_data_set() | Self::modify_metadata_set() | ChangeKind::Modify
}
/// Produces a changeset containing all of the data modification kinds
pub fn modify_data_set() -> Self {
ChangeKind::Content | ChangeKind::Data | ChangeKind::Size
}
/// Produces a changeset containing all of the metadata modification kinds
pub fn modify_metadata_set() -> Self {
ChangeKind::AccessTime
| ChangeKind::Metadata
| ChangeKind::Ownership
| ChangeKind::Permissions
| ChangeKind::WriteTime
}
/// Produces a changeset containing all of the rename kinds
pub fn rename_set() -> Self {
ChangeKind::Rename | ChangeKind::RenameBoth | ChangeKind::RenameFrom | ChangeKind::RenameTo
}
/// Consumes set and returns a sorted vec of the kinds of changes
pub fn into_sorted_vec(self) -> Vec<ChangeKind> {
let mut v = self.0.into_iter().collect::<Vec<_>>();
v.sort();
v
}
}
#[cfg(feature = "schemars")]
impl ChangeKindSet {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(ChangeKindSet)
}
}
impl fmt::Display for ChangeKindSet {
/// Outputs a comma-separated series of [`ChangeKind`] as string that are sorted
/// such that this will always be consistent output
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut kinds = self
.0
.iter()
.map(ToString::to_string)
.collect::<Vec<String>>();
kinds.sort_unstable();
write!(f, "{}", kinds.join(","))
}
}
impl PartialEq for ChangeKindSet {
fn eq(&self, other: &Self) -> bool {
self.to_string() == other.to_string()
}
}
impl Eq for ChangeKindSet {}
impl Hash for ChangeKindSet {
/// Hashes based on the output of [`fmt::Display`]
fn hash<H: Hasher>(&self, state: &mut H) {
self.to_string().hash(state);
}
}
impl BitOr<ChangeKindSet> for ChangeKindSet {
type Output = Self;
fn bitor(mut self, rhs: ChangeKindSet) -> Self::Output {
self.extend(rhs.0);
self
}
}
impl BitOr<ChangeKind> for ChangeKindSet {
type Output = Self;
fn bitor(mut self, rhs: ChangeKind) -> Self::Output {
self.0.insert(rhs);
self
}
}
impl BitOr<ChangeKindSet> for ChangeKind {
type Output = ChangeKindSet;
fn bitor(self, rhs: ChangeKindSet) -> Self::Output {
rhs | self
}
}
impl Sub<ChangeKindSet> for ChangeKindSet {
type Output = Self;
fn sub(self, other: Self) -> Self::Output {
ChangeKindSet(&self.0 - &other.0)
}
}
impl Sub<&'_ ChangeKindSet> for &ChangeKindSet {
type Output = ChangeKindSet;
fn sub(self, other: &ChangeKindSet) -> Self::Output {
ChangeKindSet(&self.0 - &other.0)
}
}
impl FromStr for ChangeKindSet {
type Err = strum::ParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut change_set = HashSet::new();
for word in s.split(',') {
change_set.insert(ChangeKind::from_str(word.trim())?);
}
Ok(ChangeKindSet(change_set))
}
}
impl FromIterator<ChangeKind> for ChangeKindSet {
fn from_iter<I: IntoIterator<Item = ChangeKind>>(iter: I) -> Self {
let mut change_set = HashSet::new();
for i in iter {
change_set.insert(i);
}
ChangeKindSet(change_set)
}
}
impl From<ChangeKind> for ChangeKindSet {
fn from(change_kind: ChangeKind) -> Self {
let mut set = Self::empty();
set.insert(change_kind);
set
}
}
impl From<Vec<ChangeKind>> for ChangeKindSet {
fn from(changes: Vec<ChangeKind>) -> Self {
changes.into_iter().collect()
}
}
impl Default for ChangeKindSet {
fn default() -> Self {
Self::empty()
}
}

@ -1,53 +0,0 @@
use std::ops::{Deref, DerefMut};
use derive_more::{Display, From, Into};
use serde::{Deserialize, Serialize};
/// Represents some command with arguments to execute
#[derive(Clone, Debug, Display, From, Into, Hash, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct Cmd(String);
impl Cmd {
/// Creates a new command from the given `cmd`
pub fn new(cmd: impl Into<String>) -> Self {
Self(cmd.into())
}
/// Returns reference to the program portion of the command
pub fn program(&self) -> &str {
match self.0.split_once(' ') {
Some((program, _)) => program.trim(),
None => self.0.trim(),
}
}
/// Returns reference to the arguments portion of the command
pub fn arguments(&self) -> &str {
match self.0.split_once(' ') {
Some((_, arguments)) => arguments.trim(),
None => "",
}
}
}
#[cfg(feature = "schemars")]
impl Cmd {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(Cmd)
}
}
impl Deref for Cmd {
type Target = String;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for Cmd {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}

@ -1,59 +0,0 @@
use std::fs::FileType as StdFileType;
use std::path::PathBuf;
use derive_more::IsVariant;
use serde::{Deserialize, Serialize};
use strum::AsRefStr;
/// Represents information about a single entry within a directory
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
pub struct DirEntry {
/// Represents the full path to the entry
pub path: PathBuf,
/// Represents the type of the entry as a file/dir/symlink
pub file_type: FileType,
/// Depth at which this entry was created relative to the root (0 being immediately within
/// root)
pub depth: usize,
}
#[cfg(feature = "schemars")]
impl DirEntry {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(DirEntry)
}
}
/// Represents the type associated with a dir entry
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, AsRefStr, IsVariant, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
#[strum(serialize_all = "snake_case")]
pub enum FileType {
Dir,
File,
Symlink,
}
impl From<StdFileType> for FileType {
fn from(ft: StdFileType) -> Self {
if ft.is_dir() {
Self::Dir
} else if ft.is_symlink() {
Self::Symlink
} else {
Self::File
}
}
}
#[cfg(feature = "schemars")]
impl FileType {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(FileType)
}
}

@ -1,404 +0,0 @@
use std::io;
use std::path::{Path, PathBuf};
use std::time::SystemTime;
use bitflags::bitflags;
use serde::{Deserialize, Serialize};
use super::{deserialize_u128_option, serialize_u128_option, FileType};
/// Represents metadata about some path on a remote machine
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct Metadata {
/// Canonicalized path to the file or directory, resolving symlinks, only included
/// if flagged during the request
pub canonicalized_path: Option<PathBuf>,
/// Represents the type of the entry as a file/dir/symlink
pub file_type: FileType,
/// Size of the file/directory/symlink in bytes
pub len: u64,
/// Whether or not the file/directory/symlink is marked as unwriteable
pub readonly: bool,
/// Represents the last time (in milliseconds) when the file/directory/symlink was accessed;
/// can be optional as certain systems don't support this
#[serde(serialize_with = "serialize_u128_option")]
#[serde(deserialize_with = "deserialize_u128_option")]
pub accessed: Option<u128>,
/// Represents when (in milliseconds) the file/directory/symlink was created;
/// can be optional as certain systems don't support this
#[serde(serialize_with = "serialize_u128_option")]
#[serde(deserialize_with = "deserialize_u128_option")]
pub created: Option<u128>,
/// Represents the last time (in milliseconds) when the file/directory/symlink was modified;
/// can be optional as certain systems don't support this
#[serde(serialize_with = "serialize_u128_option")]
#[serde(deserialize_with = "deserialize_u128_option")]
pub modified: Option<u128>,
/// Represents metadata that is specific to a unix remote machine
pub unix: Option<UnixMetadata>,
/// Represents metadata that is specific to a windows remote machine
pub windows: Option<WindowsMetadata>,
}
impl Metadata {
pub async fn read(
path: impl AsRef<Path>,
canonicalize: bool,
resolve_file_type: bool,
) -> io::Result<Self> {
let metadata = tokio::fs::symlink_metadata(path.as_ref()).await?;
let canonicalized_path = if canonicalize {
Some(tokio::fs::canonicalize(path.as_ref()).await?)
} else {
None
};
// If asking for resolved file type and current type is symlink, then we want to refresh
// our metadata to get the filetype for the resolved link
let file_type = if resolve_file_type && metadata.file_type().is_symlink() {
tokio::fs::metadata(path).await?.file_type()
} else {
metadata.file_type()
};
Ok(Self {
canonicalized_path,
accessed: metadata
.accessed()
.ok()
.and_then(|t| t.duration_since(SystemTime::UNIX_EPOCH).ok())
.map(|d| d.as_millis()),
created: metadata
.created()
.ok()
.and_then(|t| t.duration_since(SystemTime::UNIX_EPOCH).ok())
.map(|d| d.as_millis()),
modified: metadata
.modified()
.ok()
.and_then(|t| t.duration_since(SystemTime::UNIX_EPOCH).ok())
.map(|d| d.as_millis()),
len: metadata.len(),
readonly: metadata.permissions().readonly(),
file_type: if file_type.is_dir() {
FileType::Dir
} else if file_type.is_file() {
FileType::File
} else {
FileType::Symlink
},
#[cfg(unix)]
unix: Some({
use std::os::unix::prelude::*;
let mode = metadata.mode();
crate::protocol::UnixMetadata::from(mode)
}),
#[cfg(not(unix))]
unix: None,
#[cfg(windows)]
windows: Some({
use std::os::windows::prelude::*;
let attributes = metadata.file_attributes();
crate::protocol::WindowsMetadata::from(attributes)
}),
#[cfg(not(windows))]
windows: None,
})
}
}
#[cfg(feature = "schemars")]
impl Metadata {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(Metadata)
}
}
/// Represents unix-specific metadata about some path on a remote machine
#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct UnixMetadata {
/// Represents whether or not owner can read from the file
pub owner_read: bool,
/// Represents whether or not owner can write to the file
pub owner_write: bool,
/// Represents whether or not owner can execute the file
pub owner_exec: bool,
/// Represents whether or not associated group can read from the file
pub group_read: bool,
/// Represents whether or not associated group can write to the file
pub group_write: bool,
/// Represents whether or not associated group can execute the file
pub group_exec: bool,
/// Represents whether or not other can read from the file
pub other_read: bool,
/// Represents whether or not other can write to the file
pub other_write: bool,
/// Represents whether or not other can execute the file
pub other_exec: bool,
}
#[cfg(feature = "schemars")]
impl UnixMetadata {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(UnixMetadata)
}
}
impl From<u32> for UnixMetadata {
/// Create from a unix mode bitset
fn from(mode: u32) -> Self {
let flags = UnixFilePermissionFlags::from_bits_truncate(mode);
Self {
owner_read: flags.contains(UnixFilePermissionFlags::OWNER_READ),
owner_write: flags.contains(UnixFilePermissionFlags::OWNER_WRITE),
owner_exec: flags.contains(UnixFilePermissionFlags::OWNER_EXEC),
group_read: flags.contains(UnixFilePermissionFlags::GROUP_READ),
group_write: flags.contains(UnixFilePermissionFlags::GROUP_WRITE),
group_exec: flags.contains(UnixFilePermissionFlags::GROUP_EXEC),
other_read: flags.contains(UnixFilePermissionFlags::OTHER_READ),
other_write: flags.contains(UnixFilePermissionFlags::OTHER_WRITE),
other_exec: flags.contains(UnixFilePermissionFlags::OTHER_EXEC),
}
}
}
impl From<UnixMetadata> for u32 {
/// Convert to a unix mode bitset
fn from(metadata: UnixMetadata) -> Self {
let mut flags = UnixFilePermissionFlags::empty();
if metadata.owner_read {
flags.insert(UnixFilePermissionFlags::OWNER_READ);
}
if metadata.owner_write {
flags.insert(UnixFilePermissionFlags::OWNER_WRITE);
}
if metadata.owner_exec {
flags.insert(UnixFilePermissionFlags::OWNER_EXEC);
}
if metadata.group_read {
flags.insert(UnixFilePermissionFlags::GROUP_READ);
}
if metadata.group_write {
flags.insert(UnixFilePermissionFlags::GROUP_WRITE);
}
if metadata.group_exec {
flags.insert(UnixFilePermissionFlags::GROUP_EXEC);
}
if metadata.other_read {
flags.insert(UnixFilePermissionFlags::OTHER_READ);
}
if metadata.other_write {
flags.insert(UnixFilePermissionFlags::OTHER_WRITE);
}
if metadata.other_exec {
flags.insert(UnixFilePermissionFlags::OTHER_EXEC);
}
flags.bits()
}
}
impl UnixMetadata {
pub fn is_readonly(self) -> bool {
!(self.owner_read || self.group_read || self.other_read)
}
}
bitflags! {
struct UnixFilePermissionFlags: u32 {
const OWNER_READ = 0o400;
const OWNER_WRITE = 0o200;
const OWNER_EXEC = 0o100;
const GROUP_READ = 0o40;
const GROUP_WRITE = 0o20;
const GROUP_EXEC = 0o10;
const OTHER_READ = 0o4;
const OTHER_WRITE = 0o2;
const OTHER_EXEC = 0o1;
}
}
/// Represents windows-specific metadata about some path on a remote machine
#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct WindowsMetadata {
/// Represents whether or not a file or directory is an archive
pub archive: bool,
/// Represents whether or not a file or directory is compressed
pub compressed: bool,
/// Represents whether or not the file or directory is encrypted
pub encrypted: bool,
/// Represents whether or not a file or directory is hidden
pub hidden: bool,
/// Represents whether or not a directory or user data stream is configured with integrity
pub integrity_stream: bool,
/// Represents whether or not a file does not have other attributes set
pub normal: bool,
/// Represents whether or not a file or directory is not to be indexed by content indexing
/// service
pub not_content_indexed: bool,
/// Represents whether or not a user data stream is not to be read by the background data
/// integrity scanner
pub no_scrub_data: bool,
/// Represents whether or not the data of a file is not available immediately
pub offline: bool,
/// Represents whether or not a file or directory is not fully present locally
pub recall_on_data_access: bool,
/// Represents whether or not a file or directory has no physical representation on the local
/// system (is virtual)
pub recall_on_open: bool,
/// Represents whether or not a file or directory has an associated reparse point, or a file is
/// a symbolic link
pub reparse_point: bool,
/// Represents whether or not a file is a sparse file
pub sparse_file: bool,
/// Represents whether or not a file or directory is used partially or exclusively by the
/// operating system
pub system: bool,
/// Represents whether or not a file is being used for temporary storage
pub temporary: bool,
}
#[cfg(feature = "schemars")]
impl WindowsMetadata {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(WindowsMetadata)
}
}
impl From<u32> for WindowsMetadata {
/// Create from a windows file attribute bitset
fn from(file_attributes: u32) -> Self {
let flags = WindowsFileAttributeFlags::from_bits_truncate(file_attributes);
Self {
archive: flags.contains(WindowsFileAttributeFlags::ARCHIVE),
compressed: flags.contains(WindowsFileAttributeFlags::COMPRESSED),
encrypted: flags.contains(WindowsFileAttributeFlags::ENCRYPTED),
hidden: flags.contains(WindowsFileAttributeFlags::HIDDEN),
integrity_stream: flags.contains(WindowsFileAttributeFlags::INTEGRITY_SYSTEM),
normal: flags.contains(WindowsFileAttributeFlags::NORMAL),
not_content_indexed: flags.contains(WindowsFileAttributeFlags::NOT_CONTENT_INDEXED),
no_scrub_data: flags.contains(WindowsFileAttributeFlags::NO_SCRUB_DATA),
offline: flags.contains(WindowsFileAttributeFlags::OFFLINE),
recall_on_data_access: flags.contains(WindowsFileAttributeFlags::RECALL_ON_DATA_ACCESS),
recall_on_open: flags.contains(WindowsFileAttributeFlags::RECALL_ON_OPEN),
reparse_point: flags.contains(WindowsFileAttributeFlags::REPARSE_POINT),
sparse_file: flags.contains(WindowsFileAttributeFlags::SPARSE_FILE),
system: flags.contains(WindowsFileAttributeFlags::SYSTEM),
temporary: flags.contains(WindowsFileAttributeFlags::TEMPORARY),
}
}
}
impl From<WindowsMetadata> for u32 {
/// Convert to a windows file attribute bitset
fn from(metadata: WindowsMetadata) -> Self {
let mut flags = WindowsFileAttributeFlags::empty();
if metadata.archive {
flags.insert(WindowsFileAttributeFlags::ARCHIVE);
}
if metadata.compressed {
flags.insert(WindowsFileAttributeFlags::COMPRESSED);
}
if metadata.encrypted {
flags.insert(WindowsFileAttributeFlags::ENCRYPTED);
}
if metadata.hidden {
flags.insert(WindowsFileAttributeFlags::HIDDEN);
}
if metadata.integrity_stream {
flags.insert(WindowsFileAttributeFlags::INTEGRITY_SYSTEM);
}
if metadata.normal {
flags.insert(WindowsFileAttributeFlags::NORMAL);
}
if metadata.not_content_indexed {
flags.insert(WindowsFileAttributeFlags::NOT_CONTENT_INDEXED);
}
if metadata.no_scrub_data {
flags.insert(WindowsFileAttributeFlags::NO_SCRUB_DATA);
}
if metadata.offline {
flags.insert(WindowsFileAttributeFlags::OFFLINE);
}
if metadata.recall_on_data_access {
flags.insert(WindowsFileAttributeFlags::RECALL_ON_DATA_ACCESS);
}
if metadata.recall_on_open {
flags.insert(WindowsFileAttributeFlags::RECALL_ON_OPEN);
}
if metadata.reparse_point {
flags.insert(WindowsFileAttributeFlags::REPARSE_POINT);
}
if metadata.sparse_file {
flags.insert(WindowsFileAttributeFlags::SPARSE_FILE);
}
if metadata.system {
flags.insert(WindowsFileAttributeFlags::SYSTEM);
}
if metadata.temporary {
flags.insert(WindowsFileAttributeFlags::TEMPORARY);
}
flags.bits()
}
}
bitflags! {
struct WindowsFileAttributeFlags: u32 {
const ARCHIVE = 0x20;
const COMPRESSED = 0x800;
const ENCRYPTED = 0x4000;
const HIDDEN = 0x2;
const INTEGRITY_SYSTEM = 0x8000;
const NORMAL = 0x80;
const NOT_CONTENT_INDEXED = 0x2000;
const NO_SCRUB_DATA = 0x20000;
const OFFLINE = 0x1000;
const RECALL_ON_DATA_ACCESS = 0x400000;
const RECALL_ON_OPEN = 0x40000;
const REPARSE_POINT = 0x400;
const SPARSE_FILE = 0x200;
const SYSTEM = 0x4;
const TEMPORARY = 0x100;
const VIRTUAL = 0x10000;
}
}

@ -1,294 +0,0 @@
use bitflags::bitflags;
use serde::{Deserialize, Serialize};
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(default, deny_unknown_fields, rename_all = "snake_case")]
pub struct SetPermissionsOptions {
/// Whether or not to exclude symlinks from traversal entirely, meaning that permissions will
/// not be set on symlinks (usually resolving the symlink and setting the permission of the
/// referenced file or directory) that are explicitly provided or show up during recursion.
pub exclude_symlinks: bool,
/// Whether or not to traverse symlinks when recursively setting permissions. Note that this
/// does NOT influence setting permissions when encountering a symlink as most platforms will
/// resolve the symlink before setting permissions.
pub follow_symlinks: bool,
/// Whether or not to set the permissions of the file hierarchies rooted in the paths, instead
/// of just the paths themselves.
pub recursive: bool,
}
#[cfg(feature = "schemars")]
impl SetPermissionsOptions {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(SetPermissionsOptions)
}
}
/// Represents permissions to apply to some path on a remote machine
///
/// When used to set permissions on a file, directory, or symlink,
/// only fields that are set (not `None`) will be applied.
///
/// On `Unix` platforms, this translates directly into the mode that
/// you would find with `chmod`. On all other platforms, this uses the
/// write flags to determine whether or not to set the readonly status.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct Permissions {
/// Represents whether or not owner can read from the file
pub owner_read: Option<bool>,
/// Represents whether or not owner can write to the file
pub owner_write: Option<bool>,
/// Represents whether or not owner can execute the file
pub owner_exec: Option<bool>,
/// Represents whether or not associated group can read from the file
pub group_read: Option<bool>,
/// Represents whether or not associated group can write to the file
pub group_write: Option<bool>,
/// Represents whether or not associated group can execute the file
pub group_exec: Option<bool>,
/// Represents whether or not other can read from the file
pub other_read: Option<bool>,
/// Represents whether or not other can write to the file
pub other_write: Option<bool>,
/// Represents whether or not other can execute the file
pub other_exec: Option<bool>,
}
impl Permissions {
/// Creates a set of [`Permissions`] that indicate readonly status.
///
/// ```
/// use distant_core::protocol::Permissions;
///
/// let permissions = Permissions::readonly();
/// assert_eq!(permissions.is_readonly(), Some(true));
/// assert_eq!(permissions.is_writable(), Some(false));
/// ```
pub fn readonly() -> Self {
Self {
owner_write: Some(false),
group_write: Some(false),
other_write: Some(false),
owner_read: Some(true),
group_read: Some(true),
other_read: Some(true),
owner_exec: None,
group_exec: None,
other_exec: None,
}
}
/// Creates a set of [`Permissions`] that indicate globally writable status.
///
/// ```
/// use distant_core::protocol::Permissions;
///
/// let permissions = Permissions::writable();
/// assert_eq!(permissions.is_readonly(), Some(false));
/// assert_eq!(permissions.is_writable(), Some(true));
/// ```
pub fn writable() -> Self {
Self {
owner_write: Some(true),
group_write: Some(true),
other_write: Some(true),
owner_read: Some(true),
group_read: Some(true),
other_read: Some(true),
owner_exec: None,
group_exec: None,
other_exec: None,
}
}
/// Returns true if the permission set has a value specified for each permission (no `None`
/// settings).
pub fn is_complete(&self) -> bool {
self.owner_read.is_some()
&& self.owner_write.is_some()
&& self.owner_exec.is_some()
&& self.group_read.is_some()
&& self.group_write.is_some()
&& self.group_exec.is_some()
&& self.other_read.is_some()
&& self.other_write.is_some()
&& self.other_exec.is_some()
}
/// Returns `true` if permissions represent readonly, `false` if permissions represent
/// writable, and `None` if no permissions have been set to indicate either status.
#[inline]
pub fn is_readonly(&self) -> Option<bool> {
// Negate the writable status to indicate whether or not readonly
self.is_writable().map(|x| !x)
}
/// Returns `true` if permissions represent ability to write, `false` if permissions represent
/// inability to write, and `None` if no permissions have been set to indicate either status.
#[inline]
pub fn is_writable(&self) -> Option<bool> {
self.owner_write
.zip(self.group_write)
.zip(self.other_write)
.map(|((owner, group), other)| owner || group || other)
}
/// Applies `other` settings to `self`, overwriting any of the permissions in `self` with `other`.
#[inline]
pub fn apply_from(&mut self, other: &Self) {
macro_rules! apply {
($key:ident) => {{
if let Some(value) = other.$key {
self.$key = Some(value);
}
}};
}
apply!(owner_read);
apply!(owner_write);
apply!(owner_exec);
apply!(group_read);
apply!(group_write);
apply!(group_exec);
apply!(other_read);
apply!(other_write);
apply!(other_exec);
}
/// Applies `self` settings to `other`, overwriting any of the permissions in `other` with
/// `self`.
#[inline]
pub fn apply_to(&self, other: &mut Self) {
Self::apply_from(other, self)
}
/// Converts a Unix `mode` into the permission set.
pub fn from_unix_mode(mode: u32) -> Self {
let flags = UnixFilePermissionFlags::from_bits_truncate(mode);
Self {
owner_read: Some(flags.contains(UnixFilePermissionFlags::OWNER_READ)),
owner_write: Some(flags.contains(UnixFilePermissionFlags::OWNER_WRITE)),
owner_exec: Some(flags.contains(UnixFilePermissionFlags::OWNER_EXEC)),
group_read: Some(flags.contains(UnixFilePermissionFlags::GROUP_READ)),
group_write: Some(flags.contains(UnixFilePermissionFlags::GROUP_WRITE)),
group_exec: Some(flags.contains(UnixFilePermissionFlags::GROUP_EXEC)),
other_read: Some(flags.contains(UnixFilePermissionFlags::OTHER_READ)),
other_write: Some(flags.contains(UnixFilePermissionFlags::OTHER_WRITE)),
other_exec: Some(flags.contains(UnixFilePermissionFlags::OTHER_EXEC)),
}
}
/// Converts to a Unix `mode` from a permission set. For any missing setting, a 0 bit is used.
pub fn to_unix_mode(&self) -> u32 {
let mut flags = UnixFilePermissionFlags::empty();
macro_rules! is_true {
($opt:expr) => {{
$opt.is_some() && $opt.unwrap()
}};
}
if is_true!(self.owner_read) {
flags.insert(UnixFilePermissionFlags::OWNER_READ);
}
if is_true!(self.owner_write) {
flags.insert(UnixFilePermissionFlags::OWNER_WRITE);
}
if is_true!(self.owner_exec) {
flags.insert(UnixFilePermissionFlags::OWNER_EXEC);
}
if is_true!(self.group_read) {
flags.insert(UnixFilePermissionFlags::GROUP_READ);
}
if is_true!(self.group_write) {
flags.insert(UnixFilePermissionFlags::GROUP_WRITE);
}
if is_true!(self.group_exec) {
flags.insert(UnixFilePermissionFlags::GROUP_EXEC);
}
if is_true!(self.other_read) {
flags.insert(UnixFilePermissionFlags::OTHER_READ);
}
if is_true!(self.other_write) {
flags.insert(UnixFilePermissionFlags::OTHER_WRITE);
}
if is_true!(self.other_exec) {
flags.insert(UnixFilePermissionFlags::OTHER_EXEC);
}
flags.bits()
}
}
#[cfg(feature = "schemars")]
impl Permissions {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(Permissions)
}
}
#[cfg(unix)]
impl From<std::fs::Permissions> for Permissions {
/// Converts [`std::fs::Permissions`] into [`Permissions`] using
/// [`std::os::unix::fs::PermissionsExt::mode`] to supply the bitset.
fn from(permissions: std::fs::Permissions) -> Self {
use std::os::unix::prelude::*;
Self::from_unix_mode(permissions.mode())
}
}
#[cfg(not(unix))]
impl From<std::fs::Permissions> for Permissions {
/// Converts [`std::fs::Permissions`] into [`Permissions`] using the `readonly` flag.
///
/// This will not set executable flags, but will set all read and write flags with write flags
/// being `false` if `readonly`, otherwise set to `true`.
fn from(permissions: std::fs::Permissions) -> Self {
if permissions.readonly() {
Self::readonly()
} else {
Self::writable()
}
}
}
#[cfg(unix)]
impl From<Permissions> for std::fs::Permissions {
/// Converts [`Permissions`] into [`std::fs::Permissions`] using
/// [`std::os::unix::fs::PermissionsExt::from_mode`].
fn from(permissions: Permissions) -> Self {
use std::os::unix::prelude::*;
std::fs::Permissions::from_mode(permissions.to_unix_mode())
}
}
bitflags! {
struct UnixFilePermissionFlags: u32 {
const OWNER_READ = 0o400;
const OWNER_WRITE = 0o200;
const OWNER_EXEC = 0o100;
const GROUP_READ = 0o40;
const GROUP_WRITE = 0o20;
const GROUP_EXEC = 0o10;
const OTHER_READ = 0o4;
const OTHER_WRITE = 0o2;
const OTHER_EXEC = 0o1;
}
}

@ -1,140 +0,0 @@
use std::fmt;
use std::num::ParseIntError;
use std::str::FromStr;
use derive_more::{Display, Error};
use portable_pty::PtySize as PortablePtySize;
use serde::{Deserialize, Serialize};
/// Represents the size associated with a remote PTY
#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct PtySize {
/// Number of lines of text
pub rows: u16,
/// Number of columns of text
pub cols: u16,
/// Width of a cell in pixels. Note that some systems never fill this value and ignore it.
#[serde(default)]
pub pixel_width: u16,
/// Height of a cell in pixels. Note that some systems never fill this value and ignore it.
#[serde(default)]
pub pixel_height: u16,
}
impl PtySize {
/// Creates new size using just rows and columns
pub fn from_rows_and_cols(rows: u16, cols: u16) -> Self {
Self {
rows,
cols,
..Default::default()
}
}
}
#[cfg(feature = "schemars")]
impl PtySize {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(PtySize)
}
}
impl From<PortablePtySize> for PtySize {
fn from(size: PortablePtySize) -> Self {
Self {
rows: size.rows,
cols: size.cols,
pixel_width: size.pixel_width,
pixel_height: size.pixel_height,
}
}
}
impl From<PtySize> for PortablePtySize {
fn from(size: PtySize) -> Self {
Self {
rows: size.rows,
cols: size.cols,
pixel_width: size.pixel_width,
pixel_height: size.pixel_height,
}
}
}
impl fmt::Display for PtySize {
/// Prints out `rows,cols[,pixel_width,pixel_height]` where the
/// pixel width and pixel height are only included if either
/// one of them is not zero
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{},{}", self.rows, self.cols)?;
if self.pixel_width > 0 || self.pixel_height > 0 {
write!(f, ",{},{}", self.pixel_width, self.pixel_height)?;
}
Ok(())
}
}
impl Default for PtySize {
fn default() -> Self {
PtySize {
rows: 24,
cols: 80,
pixel_width: 0,
pixel_height: 0,
}
}
}
#[derive(Clone, Debug, PartialEq, Eq, Display, Error)]
pub enum PtySizeParseError {
MissingRows,
MissingColumns,
InvalidRows(ParseIntError),
InvalidColumns(ParseIntError),
InvalidPixelWidth(ParseIntError),
InvalidPixelHeight(ParseIntError),
}
impl FromStr for PtySize {
type Err = PtySizeParseError;
/// Attempts to parse a str into PtySize using one of the following formats:
///
/// * rows,cols (defaults to 0 for pixel_width & pixel_height)
/// * rows,cols,pixel_width,pixel_height
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut tokens = s.split(',');
Ok(Self {
rows: tokens
.next()
.ok_or(PtySizeParseError::MissingRows)?
.trim()
.parse()
.map_err(PtySizeParseError::InvalidRows)?,
cols: tokens
.next()
.ok_or(PtySizeParseError::MissingColumns)?
.trim()
.parse()
.map_err(PtySizeParseError::InvalidColumns)?,
pixel_width: tokens
.next()
.map(|s| s.trim().parse())
.transpose()
.map_err(PtySizeParseError::InvalidPixelWidth)?
.unwrap_or(0),
pixel_height: tokens
.next()
.map(|s| s.trim().parse())
.transpose()
.map_err(PtySizeParseError::InvalidPixelHeight)?
.unwrap_or(0),
})
}
}

@ -1,425 +0,0 @@
use std::borrow::Cow;
use std::collections::HashSet;
use std::path::PathBuf;
use std::str::FromStr;
use serde::{Deserialize, Serialize};
use super::FileType;
/// Id associated with a search
pub type SearchId = u32;
/// Represents a query to perform against the filesystem
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct SearchQuery {
/// Kind of data to examine using condition
pub target: SearchQueryTarget,
/// Condition to meet to be considered a match
pub condition: SearchQueryCondition,
/// Paths in which to perform the query
pub paths: Vec<PathBuf>,
/// Options to apply to the query
#[serde(default)]
pub options: SearchQueryOptions,
}
#[cfg(feature = "schemars")]
impl SearchQuery {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(SearchQuery)
}
}
impl FromStr for SearchQuery {
type Err = serde_json::error::Error;
/// Parses search query from a JSON string
fn from_str(s: &str) -> Result<Self, Self::Err> {
serde_json::from_str(s)
}
}
/// Kind of data to examine using conditions
#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(rename_all = "snake_case")]
pub enum SearchQueryTarget {
/// Checks path of file, directory, or symlink
Path,
/// Checks contents of files
Contents,
}
#[cfg(feature = "schemars")]
impl SearchQueryTarget {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(SearchQueryTarget)
}
}
/// Condition used to find a match in a search query
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(rename_all = "snake_case", deny_unknown_fields, tag = "type")]
pub enum SearchQueryCondition {
/// Text is found anywhere (all regex patterns are escaped)
Contains { value: String },
/// Begins with some text (all regex patterns are escaped)
EndsWith { value: String },
/// Matches some text exactly (all regex patterns are escaped)
Equals { value: String },
/// Any of the conditions match
Or { value: Vec<SearchQueryCondition> },
/// Matches some regex
Regex { value: String },
/// Begins with some text (all regex patterns are escaped)
StartsWith { value: String },
}
impl SearchQueryCondition {
/// Creates a new instance with `Contains` variant
pub fn contains(value: impl Into<String>) -> Self {
Self::Contains {
value: value.into(),
}
}
/// Creates a new instance with `EndsWith` variant
pub fn ends_with(value: impl Into<String>) -> Self {
Self::EndsWith {
value: value.into(),
}
}
/// Creates a new instance with `Equals` variant
pub fn equals(value: impl Into<String>) -> Self {
Self::Equals {
value: value.into(),
}
}
/// Creates a new instance with `Or` variant
pub fn or<I, C>(value: I) -> Self
where
I: IntoIterator<Item = C>,
C: Into<SearchQueryCondition>,
{
Self::Or {
value: value.into_iter().map(|s| s.into()).collect(),
}
}
/// Creates a new instance with `Regex` variant
pub fn regex(value: impl Into<String>) -> Self {
Self::Regex {
value: value.into(),
}
}
/// Creates a new instance with `StartsWith` variant
pub fn starts_with(value: impl Into<String>) -> Self {
Self::StartsWith {
value: value.into(),
}
}
/// Converts the condition in a regex string
pub fn to_regex_string(&self) -> String {
match self {
Self::Contains { value } => regex::escape(value),
Self::EndsWith { value } => format!(r"{}$", regex::escape(value)),
Self::Equals { value } => format!(r"^{}$", regex::escape(value)),
Self::Regex { value } => value.to_string(),
Self::StartsWith { value } => format!(r"^{}", regex::escape(value)),
Self::Or { value } => {
let mut s = String::new();
for (i, condition) in value.iter().enumerate() {
if i > 0 {
s.push('|');
}
s.push_str(&condition.to_regex_string());
}
s
}
}
}
}
#[cfg(feature = "schemars")]
impl SearchQueryCondition {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(SearchQueryCondition)
}
}
impl FromStr for SearchQueryCondition {
type Err = std::convert::Infallible;
/// Parses search query from a JSON string
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Self::regex(s))
}
}
/// Options associated with a search query
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(default)]
pub struct SearchQueryOptions {
/// Restrict search to only these file types (otherwise all are allowed).
pub allowed_file_types: HashSet<FileType>,
/// Regex to use to filter paths being searched to only those that match the include condition.
pub include: Option<SearchQueryCondition>,
/// Regex to use to filter paths being searched to only those that do not match the exclude.
/// condition
pub exclude: Option<SearchQueryCondition>,
/// If true, will search upward through parent directories rather than the traditional downward
/// search that recurses through all children directories.
///
/// Note that this will use maximum depth to apply to the reverse direction, and will only look
/// through each ancestor directory's immediate entries. In other words, this will not result
/// in recursing through sibling directories.
///
/// An upward search will ALWAYS search the contents of a directory, so this means providing a
/// path to a directory will search its entries EVEN if the max_depth is 0.
pub upward: bool,
/// Search should follow symbolic links.
pub follow_symbolic_links: bool,
/// Maximum results to return before stopping the query.
pub limit: Option<u64>,
/// Maximum depth (directories) to search
///
/// The smallest depth is 0 and always corresponds to the path given to the new function on
/// this type. Its direct descendents have depth 1, and their descendents have depth 2, and so
/// on.
///
/// Note that this will not simply filter the entries of the iterator, but it will actually
/// avoid descending into directories when the depth is exceeded.
pub max_depth: Option<u64>,
/// Amount of results to batch before sending back excluding final submission that will always
/// include the remaining results even if less than pagination request.
pub pagination: Option<u64>,
}
#[cfg(feature = "schemars")]
impl SearchQueryOptions {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(SearchQueryOptions)
}
}
/// Represents a match for a search query
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(rename_all = "snake_case", deny_unknown_fields, tag = "type")]
pub enum SearchQueryMatch {
/// Matches part of a file's path
Path(SearchQueryPathMatch),
/// Matches part of a file's contents
Contents(SearchQueryContentsMatch),
}
impl SearchQueryMatch {
pub fn into_path_match(self) -> Option<SearchQueryPathMatch> {
match self {
Self::Path(x) => Some(x),
_ => None,
}
}
pub fn into_contents_match(self) -> Option<SearchQueryContentsMatch> {
match self {
Self::Contents(x) => Some(x),
_ => None,
}
}
}
#[cfg(feature = "schemars")]
impl SearchQueryMatch {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(SearchQueryMatch)
}
}
/// Represents details for a match on a path
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct SearchQueryPathMatch {
/// Path associated with the match
pub path: PathBuf,
/// Collection of matches tied to `path` where each submatch's byte offset is relative to
/// `path`
pub submatches: Vec<SearchQuerySubmatch>,
}
#[cfg(feature = "schemars")]
impl SearchQueryPathMatch {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(SearchQueryPathMatch)
}
}
/// Represents details for a match on a file's contents
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct SearchQueryContentsMatch {
/// Path to file whose contents match
pub path: PathBuf,
/// Line(s) that matched
pub lines: SearchQueryMatchData,
/// Line number where match starts (base index 1)
pub line_number: u64,
/// Absolute byte offset corresponding to the start of `lines` in the data being searched
pub absolute_offset: u64,
/// Collection of matches tied to `lines` where each submatch's byte offset is relative to
/// `lines` and not the overall content
pub submatches: Vec<SearchQuerySubmatch>,
}
#[cfg(feature = "schemars")]
impl SearchQueryContentsMatch {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(SearchQueryContentsMatch)
}
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct SearchQuerySubmatch {
/// Content matched by query
pub r#match: SearchQueryMatchData,
/// Byte offset representing start of submatch (inclusive)
pub start: u64,
/// Byte offset representing end of submatch (exclusive)
pub end: u64,
}
#[cfg(feature = "schemars")]
impl SearchQuerySubmatch {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(SearchQuerySubmatch)
}
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(
rename_all = "snake_case",
deny_unknown_fields,
tag = "type",
content = "value"
)]
pub enum SearchQueryMatchData {
/// Match represented as UTF-8 text
Text(String),
/// Match represented as bytes
Bytes(Vec<u8>),
}
impl SearchQueryMatchData {
/// Creates a new instance with `Text` variant
pub fn text(value: impl Into<String>) -> Self {
Self::Text(value.into())
}
/// Creates a new instance with `Bytes` variant
pub fn bytes(value: impl Into<Vec<u8>>) -> Self {
Self::Bytes(value.into())
}
/// Returns the UTF-8 str reference to the data, if is valid UTF-8
pub fn to_str(&self) -> Option<&str> {
match self {
Self::Text(x) => Some(x),
Self::Bytes(x) => std::str::from_utf8(x).ok(),
}
}
/// Converts data to a UTF-8 string, replacing any invalid UTF-8 sequences with
/// [`U+FFFD REPLACEMENT CHARACTER`](https://doc.rust-lang.org/nightly/core/char/const.REPLACEMENT_CHARACTER.html)
pub fn to_string_lossy(&self) -> Cow<'_, str> {
match self {
Self::Text(x) => Cow::Borrowed(x),
Self::Bytes(x) => String::from_utf8_lossy(x),
}
}
}
#[cfg(feature = "schemars")]
impl SearchQueryMatchData {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(SearchQueryMatchData)
}
}
#[cfg(test)]
mod tests {
use super::*;
mod search_query_condition {
use test_log::test;
use super::*;
#[test]
fn to_regex_string_should_convert_to_appropriate_regex_and_escape_as_needed() {
assert_eq!(
SearchQueryCondition::contains("t^es$t").to_regex_string(),
r"t\^es\$t"
);
assert_eq!(
SearchQueryCondition::ends_with("t^es$t").to_regex_string(),
r"t\^es\$t$"
);
assert_eq!(
SearchQueryCondition::equals("t^es$t").to_regex_string(),
r"^t\^es\$t$"
);
assert_eq!(
SearchQueryCondition::or([
SearchQueryCondition::contains("t^es$t"),
SearchQueryCondition::equals("t^es$t"),
SearchQueryCondition::regex("^test$"),
])
.to_regex_string(),
r"t\^es\$t|^t\^es\$t$|^test$"
);
assert_eq!(
SearchQueryCondition::regex("test").to_regex_string(),
"test"
);
assert_eq!(
SearchQueryCondition::starts_with("t^es$t").to_regex_string(),
r"^t\^es\$t"
);
}
}
}

@ -1,59 +0,0 @@
use std::env;
use std::path::PathBuf;
use serde::{Deserialize, Serialize};
/// Represents information about a system
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct SystemInfo {
/// Family of the operating system as described in
/// https://doc.rust-lang.org/std/env/consts/constant.FAMILY.html
pub family: String,
/// Name of the specific operating system as described in
/// https://doc.rust-lang.org/std/env/consts/constant.OS.html
pub os: String,
/// Architecture of the CPI as described in
/// https://doc.rust-lang.org/std/env/consts/constant.ARCH.html
pub arch: String,
/// Current working directory of the running server process
pub current_dir: PathBuf,
/// Primary separator for path components for the current platform
/// as defined in https://doc.rust-lang.org/std/path/constant.MAIN_SEPARATOR.html
pub main_separator: char,
/// Name of the user running the server process
pub username: String,
/// Default shell tied to user running the server process
pub shell: String,
}
#[cfg(feature = "schemars")]
impl SystemInfo {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(SystemInfo)
}
}
impl Default for SystemInfo {
fn default() -> Self {
Self {
family: env::consts::FAMILY.to_string(),
os: env::consts::OS.to_string(),
arch: env::consts::ARCH.to_string(),
current_dir: env::current_dir().unwrap_or_default(),
main_separator: std::path::MAIN_SEPARATOR,
username: whoami::username(),
shell: if cfg!(windows) {
env::var("ComSpec").unwrap_or_else(|_| String::from("cmd.exe"))
} else {
env::var("SHELL").unwrap_or_else(|_| String::from("/bin/sh"))
},
}
}
}

@ -1,5 +1,5 @@
use assert_fs::prelude::*; use assert_fs::prelude::*;
use distant_core::protocol::ChangeKindSet; use distant_core::protocol::{ChangeKind, ChangeKindSet};
use distant_core::DistantChannelExt; use distant_core::DistantChannelExt;
use rstest::*; use rstest::*;
use test_log::test; use test_log::test;
@ -29,7 +29,7 @@ async fn should_handle_large_volume_of_file_watching(#[future] ctx: DistantClien
.watch( .watch(
file.path(), file.path(),
false, false,
ChangeKindSet::modify_set(), ChangeKindSet::new([ChangeKind::Modify]),
ChangeKindSet::empty(), ChangeKindSet::empty(),
) )
.await .await

@ -32,9 +32,6 @@ serde_bytes = "0.11.9"
strum = { version = "0.24.1", features = ["derive"] } strum = { version = "0.24.1", features = ["derive"] }
tokio = { version = "1.27.0", features = ["full"] } tokio = { version = "1.27.0", features = ["full"] }
# Optional dependencies based on features
schemars = { version = "0.8.12", optional = true }
[dev-dependencies] [dev-dependencies]
distant-auth = { version = "=0.20.0-alpha.7", path = "../distant-auth", features = ["tests"] } distant-auth = { version = "=0.20.0-alpha.7", path = "../distant-auth", features = ["tests"] }
env_logger = "0.10.0" env_logger = "0.10.0"

@ -25,18 +25,9 @@ You can import the dependency by adding the following to your `Cargo.toml`:
```toml ```toml
[dependencies] [dependencies]
distant-net = "0.19" distant-net = "0.20"
``` ```
## Features
Currently, the library supports the following features:
- `schemars`: derives the `schemars::JsonSchema` interface on `Request`
and `Response` data types
By default, no features are enabled on the library.
## License ## License
This project is licensed under either of This project is licensed under either of

@ -13,7 +13,6 @@ use crate::common::utils::{deserialize_from_str, serialize_to_str};
/// Contains map information for connections and other use cases /// Contains map information for connections and other use cases
#[derive(Clone, Debug, From, IntoIterator, PartialEq, Eq)] #[derive(Clone, Debug, From, IntoIterator, PartialEq, Eq)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct Map(HashMap<String, String>); pub struct Map(HashMap<String, String>);
impl Map { impl Map {
@ -77,13 +76,6 @@ impl Map {
} }
} }
#[cfg(feature = "schemars")]
impl Map {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(Map)
}
}
impl Default for Map { impl Default for Map {
fn default() -> Self { fn default() -> Self {
Self::new() Self::new()

@ -10,7 +10,6 @@ use crate::common::utils;
/// Represents a request to send /// Represents a request to send
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] #[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct Request<T> { pub struct Request<T> {
/// Unique id associated with the request /// Unique id associated with the request
pub id: Id, pub id: Id,
@ -62,13 +61,6 @@ where
} }
} }
#[cfg(feature = "schemars")]
impl<T: schemars::JsonSchema> Request<T> {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(Request<T>)
}
}
impl<T> From<T> for Request<T> { impl<T> From<T> for Request<T> {
fn from(payload: T) -> Self { fn from(payload: T) -> Self {
Self::new(payload) Self::new(payload)

@ -10,7 +10,6 @@ use crate::common::utils;
/// Represents a response received related to some response /// Represents a response received related to some response
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] #[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
pub struct Response<T> { pub struct Response<T> {
/// Unique id associated with the response /// Unique id associated with the response
pub id: Id, pub id: Id,
@ -67,13 +66,6 @@ where
} }
} }
#[cfg(feature = "schemars")]
impl<T: schemars::JsonSchema> Response<T> {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(Response<T>)
}
}
/// Error encountered when attempting to parse bytes as an untyped response /// Error encountered when attempting to parse bytes as an untyped response
#[derive(Copy, Clone, Debug, Display, Error, PartialEq, Eq, Hash)] #[derive(Copy, Clone, Debug, Display, Error, PartialEq, Eq, Hash)]
pub enum UntypedResponseParseError { pub enum UntypedResponseParseError {

@ -12,7 +12,6 @@ use super::ManagerCapabilityKind;
/// Set of supported capabilities for a manager /// Set of supported capabilities for a manager
#[derive(Clone, Debug, From, Into, PartialEq, Eq, IntoIterator, Serialize, Deserialize)] #[derive(Clone, Debug, From, Into, PartialEq, Eq, IntoIterator, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(transparent)] #[serde(transparent)]
pub struct ManagerCapabilities(#[into_iterator(owned, ref)] HashSet<ManagerCapability>); pub struct ManagerCapabilities(#[into_iterator(owned, ref)] HashSet<ManagerCapability>);
@ -76,13 +75,6 @@ impl ManagerCapabilities {
} }
} }
#[cfg(feature = "schemars")]
impl ManagerCapabilities {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(ManagerCapabilities)
}
}
impl BitAnd for &ManagerCapabilities { impl BitAnd for &ManagerCapabilities {
type Output = ManagerCapabilities; type Output = ManagerCapabilities;
@ -133,7 +125,6 @@ impl FromIterator<ManagerCapability> for ManagerCapabilities {
/// ManagerCapability tied to a manager. A capability is equivalent based on its kind and not /// ManagerCapability tied to a manager. A capability is equivalent based on its kind and not
/// description. /// description.
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(rename_all = "snake_case", deny_unknown_fields)] #[serde(rename_all = "snake_case", deny_unknown_fields)]
pub struct ManagerCapability { pub struct ManagerCapability {
/// Label describing the kind of capability /// Label describing the kind of capability
@ -196,17 +187,3 @@ impl From<ManagerCapabilityKind> for ManagerCapability {
} }
} }
} }
#[cfg(feature = "schemars")]
impl ManagerCapability {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(ManagerCapability)
}
}
#[cfg(feature = "schemars")]
impl ManagerCapabilityKind {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(ManagerCapabilityKind)
}
}

@ -21,10 +21,6 @@ use crate::common::{ConnectionId, Destination, Map, UntypedRequest};
Serialize, Serialize,
Deserialize Deserialize
))] ))]
#[cfg_attr(
feature = "schemars",
strum_discriminants(derive(schemars::JsonSchema))
)]
#[strum_discriminants(name(ManagerCapabilityKind))] #[strum_discriminants(name(ManagerCapabilityKind))]
#[strum_discriminants(strum(serialize_all = "snake_case"))] #[strum_discriminants(strum(serialize_all = "snake_case"))]
#[serde(rename_all = "snake_case", deny_unknown_fields, tag = "type")] #[serde(rename_all = "snake_case", deny_unknown_fields, tag = "type")]

@ -0,0 +1,29 @@
[package]
name = "distant-protocol"
description = "Protocol library for distant, providing data structures used between the client and server"
categories = ["data-structures"]
keywords = ["protocol"]
version = "0.20.0-alpha.7"
authors = ["Chip Senkbeil <chip@senkbeil.org>"]
edition = "2021"
homepage = "https://github.com/chipsenkbeil/distant"
repository = "https://github.com/chipsenkbeil/distant"
readme = "README.md"
license = "MIT OR Apache-2.0"
[features]
default = []
tests = []
[dependencies]
bitflags = "2.0.2"
derive_more = { version = "0.99.17", default-features = false, features = ["deref", "deref_mut", "display", "from", "error", "into", "into_iterator", "is_variant"] }
regex = "1.7.3"
serde = { version = "1.0.159", features = ["derive"] }
serde_bytes = "0.11.9"
strum = { version = "0.24.1", features = ["derive"] }
[dev-dependencies]
rmp = "0.8.11"
rmp-serde = "1.1.1"
serde_json = "1.0.96"

@ -0,0 +1,29 @@
mod capabilities;
mod change;
mod cmd;
mod error;
mod filesystem;
mod metadata;
mod permissions;
mod pty;
mod search;
mod system;
mod version;
pub use capabilities::*;
pub use change::*;
pub use cmd::*;
pub use error::*;
pub use filesystem::*;
pub use metadata::*;
pub use permissions::*;
pub use pty::*;
pub use search::*;
pub use system::*;
pub use version::*;
/// Id for a remote process
pub type ProcessId = u32;
/// Version indicated by the tuple of (major, minor, patch).
pub type SemVer = (u8, u8, u8);

@ -0,0 +1,380 @@
use std::cmp::Ordering;
use std::collections::HashSet;
use std::hash::{Hash, Hasher};
use std::ops::{BitAnd, BitOr, BitXor, Deref, DerefMut};
use std::str::FromStr;
use derive_more::{From, Into, IntoIterator};
use serde::{Deserialize, Serialize};
use strum::{EnumMessage, IntoEnumIterator};
/// Represents the kinds of capabilities available.
pub use crate::request::RequestKind as CapabilityKind;
/// Set of supported capabilities for a server
#[derive(Clone, Debug, From, Into, PartialEq, Eq, IntoIterator, Serialize, Deserialize)]
#[serde(transparent)]
pub struct Capabilities(#[into_iterator(owned, ref)] HashSet<Capability>);
impl Capabilities {
/// Return set of capabilities encompassing all possible capabilities
pub fn all() -> Self {
Self(CapabilityKind::iter().map(Capability::from).collect())
}
/// Return empty set of capabilities
pub fn none() -> Self {
Self(HashSet::new())
}
/// Returns true if the capability with described kind is included
pub fn contains(&self, kind: impl AsRef<str>) -> bool {
let cap = Capability {
kind: kind.as_ref().to_string(),
description: String::new(),
};
self.0.contains(&cap)
}
/// Adds the specified capability to the set of capabilities
///
/// * If the set did not have this capability, returns `true`
/// * If the set did have this capability, returns `false`
pub fn insert(&mut self, cap: impl Into<Capability>) -> bool {
self.0.insert(cap.into())
}
/// Removes the capability with the described kind, returning the capability
pub fn take(&mut self, kind: impl AsRef<str>) -> Option<Capability> {
let cap = Capability {
kind: kind.as_ref().to_string(),
description: String::new(),
};
self.0.take(&cap)
}
/// Removes the capability with the described kind, returning true if it existed
pub fn remove(&mut self, kind: impl AsRef<str>) -> bool {
let cap = Capability {
kind: kind.as_ref().to_string(),
description: String::new(),
};
self.0.remove(&cap)
}
/// Converts into vec of capabilities sorted by kind
pub fn into_sorted_vec(self) -> Vec<Capability> {
let mut this = self.0.into_iter().collect::<Vec<_>>();
this.sort_unstable();
this
}
}
impl AsRef<HashSet<Capability>> for Capabilities {
fn as_ref(&self) -> &HashSet<Capability> {
&self.0
}
}
impl AsMut<HashSet<Capability>> for Capabilities {
fn as_mut(&mut self) -> &mut HashSet<Capability> {
&mut self.0
}
}
impl Deref for Capabilities {
type Target = HashSet<Capability>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for Capabilities {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl BitAnd for &Capabilities {
type Output = Capabilities;
fn bitand(self, rhs: Self) -> Self::Output {
Capabilities(self.0.bitand(&rhs.0))
}
}
impl BitOr for &Capabilities {
type Output = Capabilities;
fn bitor(self, rhs: Self) -> Self::Output {
Capabilities(self.0.bitor(&rhs.0))
}
}
impl BitOr<Capability> for &Capabilities {
type Output = Capabilities;
fn bitor(self, rhs: Capability) -> Self::Output {
let mut other = Capabilities::none();
other.0.insert(rhs);
self.bitor(&other)
}
}
impl BitXor for &Capabilities {
type Output = Capabilities;
fn bitxor(self, rhs: Self) -> Self::Output {
Capabilities(self.0.bitxor(&rhs.0))
}
}
impl FromIterator<Capability> for Capabilities {
fn from_iter<I: IntoIterator<Item = Capability>>(iter: I) -> Self {
let mut this = Capabilities::none();
for capability in iter {
this.0.insert(capability);
}
this
}
}
/// Capability tied to a server. A capability is equivalent based on its kind and not description.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
pub struct Capability {
/// Label describing the kind of capability
pub kind: String,
/// Information about the capability
pub description: String,
}
impl Capability {
/// Will convert the [`Capability`]'s `kind` into a known [`CapabilityKind`] if possible,
/// returning None if the capability is unknown
pub fn to_capability_kind(&self) -> Option<CapabilityKind> {
CapabilityKind::from_str(&self.kind).ok()
}
/// Returns true if the described capability is unknown
pub fn is_unknown(&self) -> bool {
self.to_capability_kind().is_none()
}
}
impl PartialEq for Capability {
fn eq(&self, other: &Self) -> bool {
self.kind.eq_ignore_ascii_case(&other.kind)
}
}
impl Eq for Capability {}
impl PartialOrd for Capability {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for Capability {
fn cmp(&self, other: &Self) -> Ordering {
self.kind
.to_ascii_lowercase()
.cmp(&other.kind.to_ascii_lowercase())
}
}
impl Hash for Capability {
fn hash<H: Hasher>(&self, state: &mut H) {
self.kind.to_ascii_lowercase().hash(state);
}
}
impl From<CapabilityKind> for Capability {
/// Creates a new capability using the kind's default message
fn from(kind: CapabilityKind) -> Self {
Self {
kind: kind.to_string(),
description: kind
.get_message()
.map(ToString::to_string)
.unwrap_or_default(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
mod capabilities {
use super::*;
#[test]
fn should_be_able_to_serialize_to_json() {
let capabilities: Capabilities = [Capability {
kind: "some kind".to_string(),
description: "some description".to_string(),
}]
.into_iter()
.collect();
let value = serde_json::to_value(capabilities).unwrap();
assert_eq!(
value,
serde_json::json!([
{
"kind": "some kind",
"description": "some description",
}
])
);
}
#[test]
fn should_be_able_to_deserialize_from_json() {
let value = serde_json::json!([
{
"kind": "some kind",
"description": "some description",
}
]);
let capabilities: Capabilities = serde_json::from_value(value).unwrap();
assert_eq!(
capabilities,
[Capability {
kind: "some kind".to_string(),
description: "some description".to_string(),
}]
.into_iter()
.collect()
);
}
#[test]
fn should_be_able_to_serialize_to_msgpack() {
let capabilities: Capabilities = [Capability {
kind: "some kind".to_string(),
description: "some description".to_string(),
}]
.into_iter()
.collect();
// NOTE: We don't actually check the output here because it's an implementation detail
// and could change as we change how serialization is done. This is merely to verify
// that we can serialize since there are times when serde fails to serialize at
// runtime.
let _ = rmp_serde::encode::to_vec_named(&capabilities).unwrap();
}
#[test]
fn should_be_able_to_deserialize_from_msgpack() {
// NOTE: It may seem odd that we are serializing just to deserialize, but this is to
// verify that we are not corrupting or preventing issues when serializing on a
// client/server and then trying to deserialize on the other side. This has happened
// enough times with minor changes that we need tests to verify.
let buf = rmp_serde::encode::to_vec_named(
&[Capability {
kind: "some kind".to_string(),
description: "some description".to_string(),
}]
.into_iter()
.collect::<Capabilities>(),
)
.unwrap();
let capabilities: Capabilities = rmp_serde::decode::from_slice(&buf).unwrap();
assert_eq!(
capabilities,
[Capability {
kind: "some kind".to_string(),
description: "some description".to_string(),
}]
.into_iter()
.collect()
);
}
}
mod capability {
use super::*;
#[test]
fn should_be_able_to_serialize_to_json() {
let capability = Capability {
kind: "some kind".to_string(),
description: "some description".to_string(),
};
let value = serde_json::to_value(capability).unwrap();
assert_eq!(
value,
serde_json::json!({
"kind": "some kind",
"description": "some description",
})
);
}
#[test]
fn should_be_able_to_deserialize_from_json() {
let value = serde_json::json!({
"kind": "some kind",
"description": "some description",
});
let capability: Capability = serde_json::from_value(value).unwrap();
assert_eq!(
capability,
Capability {
kind: "some kind".to_string(),
description: "some description".to_string(),
}
);
}
#[test]
fn should_be_able_to_serialize_to_msgpack() {
let capability = Capability {
kind: "some kind".to_string(),
description: "some description".to_string(),
};
// NOTE: We don't actually check the output here because it's an implementation detail
// and could change as we change how serialization is done. This is merely to verify
// that we can serialize since there are times when serde fails to serialize at
// runtime.
let _ = rmp_serde::encode::to_vec_named(&capability).unwrap();
}
#[test]
fn should_be_able_to_deserialize_from_msgpack() {
// NOTE: It may seem odd that we are serializing just to deserialize, but this is to
// verify that we are not corrupting or causing issues when serializing on a
// client/server and then trying to deserialize on the other side. This has happened
// enough times with minor changes that we need tests to verify.
let buf = rmp_serde::encode::to_vec_named(&Capability {
kind: "some kind".to_string(),
description: "some description".to_string(),
})
.unwrap();
let capability: Capability = rmp_serde::decode::from_slice(&buf).unwrap();
assert_eq!(
capability,
Capability {
kind: "some kind".to_string(),
description: "some description".to_string(),
}
);
}
}
}

@ -0,0 +1,380 @@
use std::collections::HashSet;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::iter::FromIterator;
use std::ops::{BitOr, Sub};
use std::path::PathBuf;
use std::str::FromStr;
use derive_more::{Deref, DerefMut, IntoIterator};
use serde::{Deserialize, Serialize};
use strum::{EnumString, EnumVariantNames, VariantNames};
/// Change to one or more paths on the filesystem.
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
pub struct Change {
/// Label describing the kind of change
pub kind: ChangeKind,
/// Paths that were changed
pub paths: Vec<PathBuf>,
}
/// Represents a label attached to a [`Change`] that describes the kind of change.
///
/// This mirrors events seen from `incron`.
#[derive(
Copy,
Clone,
Debug,
strum::Display,
EnumString,
EnumVariantNames,
Hash,
PartialEq,
Eq,
PartialOrd,
Ord,
Serialize,
Deserialize,
)]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
#[strum(serialize_all = "snake_case")]
pub enum ChangeKind {
/// A file was read
Access,
/// A file's or directory's attributes were changed
Attribute,
/// A file open for writing was closed
CloseWrite,
/// A file not open for writing was closed
CloseNoWrite,
/// A file, directory, or something else was created within a watched directory
Create,
/// A file, directory, or something else was deleted
Delete,
/// A file's content was modified
Modify,
/// A file was opened
Open,
/// A file, directory, or something else was renamed in some way
Rename,
/// Catch-all for any other change
Unknown,
}
impl ChangeKind {
/// Returns a list of all variants as str names
pub const fn variants() -> &'static [&'static str] {
Self::VARIANTS
}
/// Returns a list of all variants as a vec
pub fn all() -> Vec<ChangeKind> {
ChangeKindSet::all().into_sorted_vec()
}
/// Returns true if kind is part of the access family.
pub fn is_access(&self) -> bool {
matches!(
self,
Self::Access | Self::CloseWrite | Self::CloseNoWrite | Self::Open
)
}
/// Returns true if kind is part of the create family.
pub fn is_create(&self) -> bool {
matches!(self, Self::Create)
}
/// Returns true if kind is part of the delete family.
pub fn is_delete(&self) -> bool {
matches!(self, Self::Delete)
}
/// Returns true if kind is part of the modify family.
pub fn is_modify(&self) -> bool {
matches!(self, Self::Attribute | Self::Modify)
}
/// Returns true if kind is part of the rename family.
pub fn is_rename(&self) -> bool {
matches!(self, Self::Rename)
}
/// Returns true if kind is unknown.
pub fn is_unknown(&self) -> bool {
matches!(self, Self::Unknown)
}
}
impl BitOr for ChangeKind {
type Output = ChangeKindSet;
fn bitor(self, rhs: Self) -> Self::Output {
let mut set = ChangeKindSet::empty();
set.insert(self);
set.insert(rhs);
set
}
}
/// Represents a distinct set of different change kinds
#[derive(Clone, Debug, Deref, DerefMut, IntoIterator, Serialize, Deserialize)]
pub struct ChangeKindSet(HashSet<ChangeKind>);
impl ChangeKindSet {
pub fn new(set: impl IntoIterator<Item = ChangeKind>) -> Self {
set.into_iter().collect()
}
/// Produces an empty set of [`ChangeKind`]
pub fn empty() -> Self {
Self(HashSet::new())
}
/// Produces a set of all [`ChangeKind`]
pub fn all() -> Self {
vec![
ChangeKind::Access,
ChangeKind::Attribute,
ChangeKind::CloseWrite,
ChangeKind::CloseNoWrite,
ChangeKind::Create,
ChangeKind::Delete,
ChangeKind::Modify,
ChangeKind::Open,
ChangeKind::Rename,
ChangeKind::Unknown,
]
.into_iter()
.collect()
}
/// Consumes set and returns a sorted vec of the kinds of changes
pub fn into_sorted_vec(self) -> Vec<ChangeKind> {
let mut v = self.0.into_iter().collect::<Vec<_>>();
v.sort();
v
}
}
impl fmt::Display for ChangeKindSet {
/// Outputs a comma-separated series of [`ChangeKind`] as string that are sorted
/// such that this will always be consistent output
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut kinds = self
.0
.iter()
.map(ToString::to_string)
.collect::<Vec<String>>();
kinds.sort_unstable();
write!(f, "{}", kinds.join(","))
}
}
impl PartialEq for ChangeKindSet {
fn eq(&self, other: &Self) -> bool {
self.to_string() == other.to_string()
}
}
impl Eq for ChangeKindSet {}
impl Hash for ChangeKindSet {
/// Hashes based on the output of [`fmt::Display`]
fn hash<H: Hasher>(&self, state: &mut H) {
self.to_string().hash(state);
}
}
impl BitOr<ChangeKindSet> for ChangeKindSet {
type Output = Self;
fn bitor(mut self, rhs: ChangeKindSet) -> Self::Output {
self.extend(rhs.0);
self
}
}
impl BitOr<ChangeKind> for ChangeKindSet {
type Output = Self;
fn bitor(mut self, rhs: ChangeKind) -> Self::Output {
self.0.insert(rhs);
self
}
}
impl BitOr<ChangeKindSet> for ChangeKind {
type Output = ChangeKindSet;
fn bitor(self, rhs: ChangeKindSet) -> Self::Output {
rhs | self
}
}
impl Sub<ChangeKindSet> for ChangeKindSet {
type Output = Self;
fn sub(self, other: Self) -> Self::Output {
ChangeKindSet(&self.0 - &other.0)
}
}
impl Sub<&'_ ChangeKindSet> for &ChangeKindSet {
type Output = ChangeKindSet;
fn sub(self, other: &ChangeKindSet) -> Self::Output {
ChangeKindSet(&self.0 - &other.0)
}
}
impl FromStr for ChangeKindSet {
type Err = strum::ParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut change_set = HashSet::new();
for word in s.split(',') {
change_set.insert(ChangeKind::from_str(word.trim())?);
}
Ok(ChangeKindSet(change_set))
}
}
impl FromIterator<ChangeKind> for ChangeKindSet {
fn from_iter<I: IntoIterator<Item = ChangeKind>>(iter: I) -> Self {
let mut change_set = HashSet::new();
for i in iter {
change_set.insert(i);
}
ChangeKindSet(change_set)
}
}
impl From<ChangeKind> for ChangeKindSet {
fn from(change_kind: ChangeKind) -> Self {
let mut set = Self::empty();
set.insert(change_kind);
set
}
}
impl From<Vec<ChangeKind>> for ChangeKindSet {
fn from(changes: Vec<ChangeKind>) -> Self {
changes.into_iter().collect()
}
}
impl Default for ChangeKindSet {
fn default() -> Self {
Self::empty()
}
}
#[cfg(test)]
mod tests {
use super::*;
mod change_kind_set {
use super::*;
#[test]
fn should_be_able_to_serialize_to_json() {
let set = ChangeKindSet::new([ChangeKind::CloseWrite]);
let value = serde_json::to_value(set).unwrap();
assert_eq!(value, serde_json::json!(["close_write"]));
}
#[test]
fn should_be_able_to_deserialize_from_json() {
let value = serde_json::json!(["close_write"]);
let set: ChangeKindSet = serde_json::from_value(value).unwrap();
assert_eq!(set, ChangeKindSet::new([ChangeKind::CloseWrite]));
}
#[test]
fn should_be_able_to_serialize_to_msgpack() {
let set = ChangeKindSet::new([ChangeKind::CloseWrite]);
// NOTE: We don't actually check the output here because it's an implementation detail
// and could change as we change how serialization is done. This is merely to verify
// that we can serialize since there are times when serde fails to serialize at
// runtime.
let _ = rmp_serde::encode::to_vec_named(&set).unwrap();
}
#[test]
fn should_be_able_to_deserialize_from_msgpack() {
// NOTE: It may seem odd that we are serializing just to deserialize, but this is to
// verify that we are not corrupting or causing issues when serializing on a
// client/server and then trying to deserialize on the other side. This has happened
// enough times with minor changes that we need tests to verify.
let buf =
rmp_serde::encode::to_vec_named(&ChangeKindSet::new([ChangeKind::CloseWrite]))
.unwrap();
let set: ChangeKindSet = rmp_serde::decode::from_slice(&buf).unwrap();
assert_eq!(set, ChangeKindSet::new([ChangeKind::CloseWrite]));
}
}
mod change_kind {
use super::*;
#[test]
fn should_be_able_to_serialize_to_json() {
let kind = ChangeKind::CloseWrite;
let value = serde_json::to_value(kind).unwrap();
assert_eq!(value, serde_json::json!("close_write"));
}
#[test]
fn should_be_able_to_deserialize_from_json() {
let value = serde_json::json!("close_write");
let kind: ChangeKind = serde_json::from_value(value).unwrap();
assert_eq!(kind, ChangeKind::CloseWrite);
}
#[test]
fn should_be_able_to_serialize_to_msgpack() {
let kind = ChangeKind::CloseWrite;
// NOTE: We don't actually check the output here because it's an implementation detail
// and could change as we change how serialization is done. This is merely to verify
// that we can serialize since there are times when serde fails to serialize at
// runtime.
let _ = rmp_serde::encode::to_vec_named(&kind).unwrap();
}
#[test]
fn should_be_able_to_deserialize_from_msgpack() {
// NOTE: It may seem odd that we are serializing just to deserialize, but this is to
// verify that we are not corrupting or causing issues when serializing on a
// client/server and then trying to deserialize on the other side. This has happened
// enough times with minor changes that we need tests to verify.
let buf = rmp_serde::encode::to_vec_named(&ChangeKind::CloseWrite).unwrap();
let kind: ChangeKind = rmp_serde::decode::from_slice(&buf).unwrap();
assert_eq!(kind, ChangeKind::CloseWrite);
}
}
}

@ -0,0 +1,89 @@
use std::ops::{Deref, DerefMut};
use derive_more::{Display, From, Into};
use serde::{Deserialize, Serialize};
/// Represents some command with arguments to execute
#[derive(Clone, Debug, Display, From, Into, Hash, PartialEq, Eq, Serialize, Deserialize)]
pub struct Cmd(String);
impl Cmd {
/// Creates a new command from the given `cmd`
pub fn new(cmd: impl Into<String>) -> Self {
Self(cmd.into())
}
/// Returns reference to the program portion of the command
pub fn program(&self) -> &str {
match self.0.split_once(' ') {
Some((program, _)) => program.trim(),
None => self.0.trim(),
}
}
/// Returns reference to the arguments portion of the command
pub fn arguments(&self) -> &str {
match self.0.split_once(' ') {
Some((_, arguments)) => arguments.trim(),
None => "",
}
}
}
impl Deref for Cmd {
type Target = String;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for Cmd {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn should_be_able_to_serialize_to_json() {
let cmd = Cmd::new("echo some text");
let value = serde_json::to_value(cmd).unwrap();
assert_eq!(value, serde_json::json!("echo some text"));
}
#[test]
fn should_be_able_to_deserialize_from_json() {
let value = serde_json::json!("echo some text");
let cmd: Cmd = serde_json::from_value(value).unwrap();
assert_eq!(cmd, Cmd::new("echo some text"));
}
#[test]
fn should_be_able_to_serialize_to_msgpack() {
let cmd = Cmd::new("echo some text");
// NOTE: We don't actually check the output here because it's an implementation detail
// and could change as we change how serialization is done. This is merely to verify
// that we can serialize since there are times when serde fails to serialize at
// runtime.
let _ = rmp_serde::encode::to_vec_named(&cmd).unwrap();
}
#[test]
fn should_be_able_to_deserialize_from_msgpack() {
// NOTE: It may seem odd that we are serializing just to deserialize, but this is to
// verify that we are not corrupting or causing issues when serializing on a
// client/server and then trying to deserialize on the other side. This has happened
// enough times with minor changes that we need tests to verify.
let buf = rmp_serde::encode::to_vec_named(&Cmd::new("echo some text")).unwrap();
let cmd: Cmd = rmp_serde::decode::from_slice(&buf).unwrap();
assert_eq!(cmd, Cmd::new("echo some text"));
}
}

@ -1,12 +1,10 @@
use std::io; use std::io;
use derive_more::Display; use derive_more::Display;
use notify::ErrorKind as NotifyErrorKind;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
/// General purpose error type that can be sent across the wire /// General purpose error type that can be sent across the wire
#[derive(Clone, Debug, Display, PartialEq, Eq, Serialize, Deserialize)] #[derive(Clone, Debug, Display, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[display(fmt = "{kind}: {description}")] #[display(fmt = "{kind}: {description}")]
#[serde(rename_all = "snake_case", deny_unknown_fields)] #[serde(rename_all = "snake_case", deny_unknown_fields)]
pub struct Error { pub struct Error {
@ -26,13 +24,6 @@ impl Error {
} }
} }
#[cfg(feature = "schemars")]
impl Error {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(Error)
}
}
impl<'a> From<&'a str> for Error { impl<'a> From<&'a str> for Error {
fn from(x: &'a str) -> Self { fn from(x: &'a str) -> Self {
Self::from(x.to_string()) Self::from(x.to_string())
@ -63,76 +54,8 @@ impl From<Error> for io::Error {
} }
} }
impl From<notify::Error> for Error {
fn from(x: notify::Error) -> Self {
let err = match x.kind {
NotifyErrorKind::Generic(x) => Self {
kind: ErrorKind::Other,
description: x,
},
NotifyErrorKind::Io(x) => Self::from(x),
NotifyErrorKind::PathNotFound => Self {
kind: ErrorKind::Other,
description: String::from("Path not found"),
},
NotifyErrorKind::WatchNotFound => Self {
kind: ErrorKind::Other,
description: String::from("Watch not found"),
},
NotifyErrorKind::InvalidConfig(_) => Self {
kind: ErrorKind::Other,
description: String::from("Invalid config"),
},
NotifyErrorKind::MaxFilesWatch => Self {
kind: ErrorKind::Other,
description: String::from("Max files watched"),
},
};
Self {
kind: err.kind,
description: format!(
"{}\n\nPaths: {}",
err.description,
x.paths
.into_iter()
.map(|p| p.to_string_lossy().to_string())
.collect::<Vec<String>>()
.join(", ")
),
}
}
}
impl From<walkdir::Error> for Error {
fn from(x: walkdir::Error) -> Self {
if x.io_error().is_some() {
x.into_io_error().map(Self::from).unwrap()
} else {
Self {
kind: ErrorKind::Loop,
description: format!("{x}"),
}
}
}
}
impl From<tokio::task::JoinError> for Error {
fn from(x: tokio::task::JoinError) -> Self {
Self {
kind: if x.is_cancelled() {
ErrorKind::TaskCancelled
} else {
ErrorKind::TaskPanicked
},
description: format!("{x}"),
}
}
}
/// All possible kinds of errors that can be returned /// All possible kinds of errors that can be returned
#[derive(Copy, Clone, Debug, Display, PartialEq, Eq, Serialize, Deserialize)] #[derive(Copy, Clone, Debug, Display, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(rename_all = "snake_case", deny_unknown_fields)] #[serde(rename_all = "snake_case", deny_unknown_fields)]
pub enum ErrorKind { pub enum ErrorKind {
/// An entity was not found, often a file /// An entity was not found, often a file
@ -211,13 +134,6 @@ pub enum ErrorKind {
Unknown, Unknown,
} }
#[cfg(feature = "schemars")]
impl ErrorKind {
pub fn root_schema() -> schemars::schema::RootSchema {
schemars::schema_for!(ErrorKind)
}
}
impl From<io::ErrorKind> for ErrorKind { impl From<io::ErrorKind> for ErrorKind {
fn from(kind: io::ErrorKind) -> Self { fn from(kind: io::ErrorKind) -> Self {
match kind { match kind {
@ -275,3 +191,125 @@ impl From<ErrorKind> for io::ErrorKind {
} }
} }
} }
#[cfg(test)]
mod tests {
use super::*;
mod error {
use super::*;
#[test]
fn should_be_able_to_serialize_to_json() {
let error = Error {
kind: ErrorKind::AddrInUse,
description: "some description".to_string(),
};
let value = serde_json::to_value(error).unwrap();
assert_eq!(
value,
serde_json::json!({
"kind": "addr_in_use",
"description": "some description",
})
);
}
#[test]
fn should_be_able_to_deserialize_from_json() {
let value = serde_json::json!({
"kind": "addr_in_use",
"description": "some description",
});
let error: Error = serde_json::from_value(value).unwrap();
assert_eq!(
error,
Error {
kind: ErrorKind::AddrInUse,
description: "some description".to_string(),
}
);
}
#[test]
fn should_be_able_to_serialize_to_msgpack() {
let error = Error {
kind: ErrorKind::AddrInUse,
description: "some description".to_string(),
};
// NOTE: We don't actually check the output here because it's an implementation detail
// and could change as we change how serialization is done. This is merely to verify
// that we can serialize since there are times when serde fails to serialize at
// runtime.
let _ = rmp_serde::encode::to_vec_named(&error).unwrap();
}
#[test]
fn should_be_able_to_deserialize_from_msgpack() {
// NOTE: It may seem odd that we are serializing just to deserialize, but this is to
// verify that we are not corrupting or preventing issues when serializing on a
// client/server and then trying to deserialize on the other side. This has happened
// enough times with minor changes that we need tests to verify.
let buf = rmp_serde::encode::to_vec_named(&Error {
kind: ErrorKind::AddrInUse,
description: "some description".to_string(),
})
.unwrap();
let error: Error = rmp_serde::decode::from_slice(&buf).unwrap();
assert_eq!(
error,
Error {
kind: ErrorKind::AddrInUse,
description: "some description".to_string(),
}
);
}
}
mod error_kind {
use super::*;
#[test]
fn should_be_able_to_serialize_to_json() {
let kind = ErrorKind::AddrInUse;
let value = serde_json::to_value(kind).unwrap();
assert_eq!(value, serde_json::json!("addr_in_use"));
}
#[test]
fn should_be_able_to_deserialize_from_json() {
let value = serde_json::json!("addr_in_use");
let kind: ErrorKind = serde_json::from_value(value).unwrap();
assert_eq!(kind, ErrorKind::AddrInUse);
}
#[test]
fn should_be_able_to_serialize_to_msgpack() {
let kind = ErrorKind::AddrInUse;
// NOTE: We don't actually check the output here because it's an implementation detail
// and could change as we change how serialization is done. This is merely to verify
// that we can serialize since there are times when serde fails to serialize at
// runtime.
let _ = rmp_serde::encode::to_vec_named(&kind).unwrap();
}
#[test]
fn should_be_able_to_deserialize_from_msgpack() {
// NOTE: It may seem odd that we are serializing just to deserialize, but this is to
// verify that we are not corrupting or causing issues when serializing on a
// client/server and then trying to deserialize on the other side. This has happened
// enough times with minor changes that we need tests to verify.
let buf = rmp_serde::encode::to_vec_named(&ErrorKind::AddrInUse).unwrap();
let kind: ErrorKind = rmp_serde::decode::from_slice(&buf).unwrap();
assert_eq!(kind, ErrorKind::AddrInUse);
}
}
}

@ -0,0 +1,173 @@
use std::fs::FileType as StdFileType;
use std::path::PathBuf;
use derive_more::IsVariant;
use serde::{Deserialize, Serialize};
use strum::AsRefStr;
/// Represents information about a single entry within a directory
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
pub struct DirEntry {
/// Represents the full path to the entry
pub path: PathBuf,
/// Represents the type of the entry as a file/dir/symlink
pub file_type: FileType,
/// Depth at which this entry was created relative to the root (0 being immediately within
/// root)
pub depth: usize,
}
/// Represents the type associated with a dir entry
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, AsRefStr, IsVariant, Serialize, Deserialize)]
#[serde(rename_all = "snake_case", deny_unknown_fields)]
#[strum(serialize_all = "snake_case")]
pub enum FileType {
Dir,
File,
Symlink,
}
impl From<StdFileType> for FileType {
fn from(ft: StdFileType) -> Self {
if ft.is_dir() {
Self::Dir
} else if ft.is_symlink() {
Self::Symlink
} else {
Self::File
}
}
}
#[cfg(test)]
mod tests {
use super::*;
mod dir_entry {
use super::*;
#[test]
fn should_be_able_to_serialize_to_json() {
let entry = DirEntry {
path: PathBuf::from("dir").join("file"),
file_type: FileType::File,
depth: 1,
};
let path = entry.path.to_str().unwrap().to_string();
let value = serde_json::to_value(entry).unwrap();
assert_eq!(
value,
serde_json::json!({
"path": path,
"file_type": "file",
"depth": 1,
})
);
}
#[test]
fn should_be_able_to_deserialize_from_json() {
let value = serde_json::json!({
"path": "test-file",
"file_type": "file",
"depth": 0,
});
let entry: DirEntry = serde_json::from_value(value).unwrap();
assert_eq!(
entry,
DirEntry {
path: PathBuf::from("test-file"),
file_type: FileType::File,
depth: 0,
}
);
}
#[test]
fn should_be_able_to_serialize_to_msgpack() {
let entry = DirEntry {
path: PathBuf::from("dir").join("file"),
file_type: FileType::File,
depth: 1,
};
// NOTE: We don't actually check the output here because it's an implementation detail
// and could change as we change how serialization is done. This is merely to verify
// that we can serialize since there are times when serde fails to serialize at
// runtime.
let _ = rmp_serde::encode::to_vec_named(&entry).unwrap();
}
#[test]
fn should_be_able_to_deserialize_from_msgpack() {
// NOTE: It may seem odd that we are serializing just to deserialize, but this is to
// verify that we are not corrupting or causing issues when serializing on a
// client/server and then trying to deserialize on the other side. This has happened
// enough times with minor changes that we need tests to verify.
let buf = rmp_serde::encode::to_vec_named(&DirEntry {
path: PathBuf::from("test-file"),
file_type: FileType::File,
depth: 0,
})
.unwrap();
let entry: DirEntry = rmp_serde::decode::from_slice(&buf).unwrap();
assert_eq!(
entry,
DirEntry {
path: PathBuf::from("test-file"),
file_type: FileType::File,
depth: 0,
}
);
}
}
mod file_type {
use super::*;
#[test]
fn should_be_able_to_serialize_to_json() {
let ty = FileType::File;
let value = serde_json::to_value(ty).unwrap();
assert_eq!(value, serde_json::json!("file"));
}
#[test]
fn should_be_able_to_deserialize_from_json() {
let value = serde_json::json!("file");
let ty: FileType = serde_json::from_value(value).unwrap();
assert_eq!(ty, FileType::File);
}
#[test]
fn should_be_able_to_serialize_to_msgpack() {
let ty = FileType::File;
// NOTE: We don't actually check the output here because it's an implementation detail
// and could change as we change how serialization is done. This is merely to verify
// that we can serialize since there are times when serde fails to serialize at
// runtime.
let _ = rmp_serde::encode::to_vec_named(&ty).unwrap();
}
#[test]
fn should_be_able_to_deserialize_from_msgpack() {
// NOTE: It may seem odd that we are serializing just to deserialize, but this is to
// verify that we are not corrupting or causing issues when serializing on a
// client/server and then trying to deserialize on the other side. This has happened
// enough times with minor changes that we need tests to verify.
let buf = rmp_serde::encode::to_vec_named(&FileType::File).unwrap();
let ty: FileType = rmp_serde::decode::from_slice(&buf).unwrap();
assert_eq!(ty, FileType::File);
}
}
}

File diff suppressed because it is too large Load Diff

@ -0,0 +1,658 @@
use bitflags::bitflags;
use serde::{Deserialize, Serialize};
use crate::utils;
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
#[serde(default, deny_unknown_fields, rename_all = "snake_case")]
pub struct SetPermissionsOptions {
/// Whether or not to exclude symlinks from traversal entirely, meaning that permissions will
/// not be set on symlinks (usually resolving the symlink and setting the permission of the
/// referenced file or directory) that are explicitly provided or show up during recursion.
#[serde(skip_serializing_if = "utils::is_false")]
pub exclude_symlinks: bool,
/// Whether or not to traverse symlinks when recursively setting permissions. Note that this
/// does NOT influence setting permissions when encountering a symlink as most platforms will
/// resolve the symlink before setting permissions.
#[serde(skip_serializing_if = "utils::is_false")]
pub follow_symlinks: bool,
/// Whether or not to set the permissions of the file hierarchies rooted in the paths, instead
/// of just the paths themselves.
#[serde(skip_serializing_if = "utils::is_false")]
pub recursive: bool,
}
/// Represents permissions to apply to some path on a remote machine
///
/// When used to set permissions on a file, directory, or symlink,
/// only fields that are set (not `None`) will be applied.
///
/// On `Unix` platforms, this translates directly into the mode that
/// you would find with `chmod`. On all other platforms, this uses the
/// write flags to determine whether or not to set the readonly status.
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
pub struct Permissions {
/// Represents whether or not owner can read from the file
#[serde(default, skip_serializing_if = "Option::is_none")]
pub owner_read: Option<bool>,
/// Represents whether or not owner can write to the file
#[serde(default, skip_serializing_if = "Option::is_none")]
pub owner_write: Option<bool>,
/// Represents whether or not owner can execute the file
#[serde(default, skip_serializing_if = "Option::is_none")]
pub owner_exec: Option<bool>,
/// Represents whether or not associated group can read from the file
#[serde(default, skip_serializing_if = "Option::is_none")]
pub group_read: Option<bool>,
/// Represents whether or not associated group can write to the file
#[serde(default, skip_serializing_if = "Option::is_none")]
pub group_write: Option<bool>,
/// Represents whether or not associated group can execute the file
#[serde(default, skip_serializing_if = "Option::is_none")]
pub group_exec: Option<bool>,
/// Represents whether or not other can read from the file
#[serde(default, skip_serializing_if = "Option::is_none")]
pub other_read: Option<bool>,
/// Represents whether or not other can write to the file
#[serde(default, skip_serializing_if = "Option::is_none")]
pub other_write: Option<bool>,
/// Represents whether or not other can execute the file
#[serde(default, skip_serializing_if = "Option::is_none")]
pub other_exec: Option<bool>,
}
impl Permissions {
/// Creates a set of [`Permissions`] that indicate readonly status.
///
/// ```
/// use distant_protocol::Permissions;
///
/// let permissions = Permissions::readonly();
/// assert_eq!(permissions.is_readonly(), Some(true));
/// assert_eq!(permissions.is_writable(), Some(false));
/// ```
pub fn readonly() -> Self {
Self {
owner_write: Some(false),
group_write: Some(false),
other_write: Some(false),
owner_read: Some(true),
group_read: Some(true),
other_read: Some(true),
owner_exec: None,
group_exec: None,
other_exec: None,
}
}
/// Creates a set of [`Permissions`] that indicate globally writable status.
///
/// ```
/// use distant_protocol::Permissions;
///
/// let permissions = Permissions::writable();
/// assert_eq!(permissions.is_readonly(), Some(false));
/// assert_eq!(permissions.is_writable(), Some(true));
/// ```
pub fn writable() -> Self {
Self {
owner_write: Some(true),
group_write: Some(true),
other_write: Some(true),
owner_read: Some(true),
group_read: Some(true),
other_read: Some(true),
owner_exec: None,
group_exec: None,
other_exec: None,
}
}
/// Returns true if the permission set has a value specified for each permission (no `None`
/// settings).
///
/// ```
/// use distant_protocol::Permissions;
///
/// let permissions = Permissions {
/// owner_write: Some(true),
/// group_write: Some(false),
/// other_write: Some(true),
/// owner_read: Some(false),
/// group_read: Some(true),
/// other_read: Some(false),
/// owner_exec: Some(true),
/// group_exec: Some(false),
/// other_exec: Some(true),
/// };
/// assert!(permissions.is_complete());
/// ```
pub fn is_complete(&self) -> bool {
self.owner_read.is_some()
&& self.owner_write.is_some()
&& self.owner_exec.is_some()
&& self.group_read.is_some()
&& self.group_write.is_some()
&& self.group_exec.is_some()
&& self.other_read.is_some()
&& self.other_write.is_some()
&& self.other_exec.is_some()
}
/// Returns `true` if permissions represent readonly, `false` if permissions represent
/// writable, and `None` if no permissions have been set to indicate either status.
///
/// ```
/// use distant_protocol::Permissions;
///
/// assert_eq!(
/// Permissions { owner_write: Some(true), ..Default::default() }.is_readonly(),
/// Some(false)
/// );
///
/// assert_eq!(
/// Permissions { owner_write: Some(false), ..Default::default() }.is_readonly(),
/// Some(true)
/// );
///
/// assert_eq!(
/// Permissions { ..Default::default() }.is_writable(),
/// None
/// );
/// ```
#[inline]
pub fn is_readonly(&self) -> Option<bool> {
// Negate the writable status to indicate whether or not readonly
self.is_writable().map(|x| !x)
}
/// Returns `true` if permissions represent ability to write, `false` if permissions represent
/// inability to write, and `None` if no permissions have been set to indicate either status.
///
/// ```
/// use distant_protocol::Permissions;
///
/// assert_eq!(
/// Permissions { owner_write: Some(true), ..Default::default() }.is_writable(),
/// Some(true)
/// );
///
/// assert_eq!(
/// Permissions { owner_write: Some(false), ..Default::default() }.is_writable(),
/// Some(false)
/// );
///
/// assert_eq!(
/// Permissions { ..Default::default() }.is_writable(),
/// None
/// );
/// ```
#[inline]
pub fn is_writable(&self) -> Option<bool> {
match (self.owner_write, self.group_write, self.other_write) {
(None, None, None) => None,
(owner, group, other) => {
Some(owner.unwrap_or(false) || group.unwrap_or(false) || other.unwrap_or(false))
}
}
}
/// Applies `other` settings to `self`, overwriting any of the permissions in `self` with `other`.
///
/// ```
/// use distant_protocol::Permissions;
///
/// let mut a = Permissions {
/// owner_read: Some(true),
/// owner_write: Some(false),
/// owner_exec: None,
/// ..Default::default()
/// };
///
/// let b = Permissions {
/// owner_read: Some(false),
/// owner_write: None,
/// owner_exec: Some(true),
/// ..Default::default()
/// };
///
/// a.apply_from(&b);
///
/// assert_eq!(a, Permissions {
/// owner_read: Some(false),
/// owner_write: Some(false),
/// owner_exec: Some(true),
/// ..Default::default()
/// });
/// ```
#[inline]
pub fn apply_from(&mut self, other: &Self) {
macro_rules! apply {
($key:ident) => {{
if let Some(value) = other.$key {
self.$key = Some(value);
}
}};
}
apply!(owner_read);
apply!(owner_write);
apply!(owner_exec);
apply!(group_read);
apply!(group_write);
apply!(group_exec);
apply!(other_read);
apply!(other_write);
apply!(other_exec);
}
/// Applies `self` settings to `other`, overwriting any of the permissions in `other` with
/// `self`.
///
/// ```
/// use distant_protocol::Permissions;
///
/// let a = Permissions {
/// owner_read: Some(true),
/// owner_write: Some(false),
/// owner_exec: None,
/// ..Default::default()
/// };
///
/// let mut b = Permissions {
/// owner_read: Some(false),
/// owner_write: None,
/// owner_exec: Some(true),
/// ..Default::default()
/// };
///
/// a.apply_to(&mut b);
///
/// assert_eq!(b, Permissions {
/// owner_read: Some(true),
/// owner_write: Some(false),
/// owner_exec: Some(true),
/// ..Default::default()
/// });
/// ```
#[inline]
pub fn apply_to(&self, other: &mut Self) {
Self::apply_from(other, self)
}
/// Converts a Unix `mode` into the permission set.
pub fn from_unix_mode(mode: u32) -> Self {
let flags = UnixFilePermissionFlags::from_bits_truncate(mode);
Self {
owner_read: Some(flags.contains(UnixFilePermissionFlags::OWNER_READ)),
owner_write: Some(flags.contains(UnixFilePermissionFlags::OWNER_WRITE)),
owner_exec: Some(flags.contains(UnixFilePermissionFlags::OWNER_EXEC)),
group_read: Some(flags.contains(UnixFilePermissionFlags::GROUP_READ)),
group_write: Some(flags.contains(UnixFilePermissionFlags::GROUP_WRITE)),
group_exec: Some(flags.contains(UnixFilePermissionFlags::GROUP_EXEC)),
other_read: Some(flags.contains(UnixFilePermissionFlags::OTHER_READ)),
other_write: Some(flags.contains(UnixFilePermissionFlags::OTHER_WRITE)),
other_exec: Some(flags.contains(UnixFilePermissionFlags::OTHER_EXEC)),
}
}
/// Converts to a Unix `mode` from a permission set. For any missing setting, a 0 bit is used.
///
/// ```
/// use distant_protocol::Permissions;
///
/// assert_eq!(Permissions {
/// owner_read: Some(true),
/// owner_write: Some(true),
/// owner_exec: Some(true),
/// group_read: Some(true),
/// group_write: Some(true),
/// group_exec: Some(true),
/// other_read: Some(true),
/// other_write: Some(true),
/// other_exec: Some(true),
/// }.to_unix_mode(), 0o777);
///
/// assert_eq!(Permissions {
/// owner_read: Some(true),
/// owner_write: Some(false),
/// owner_exec: Some(false),
/// group_read: Some(true),
/// group_write: Some(false),
/// group_exec: Some(false),
/// other_read: Some(true),
/// other_write: Some(false),
/// other_exec: Some(false),
/// }.to_unix_mode(), 0o444);
///
/// assert_eq!(Permissions {
/// owner_exec: Some(true),
/// group_exec: Some(true),
/// other_exec: Some(true),
/// ..Default::default()
/// }.to_unix_mode(), 0o111);
/// ```
pub fn to_unix_mode(&self) -> u32 {
let mut flags = UnixFilePermissionFlags::empty();
macro_rules! is_true {
($opt:expr) => {{
$opt.is_some() && $opt.unwrap()
}};
}
if is_true!(self.owner_read) {
flags.insert(UnixFilePermissionFlags::OWNER_READ);
}
if is_true!(self.owner_write) {
flags.insert(UnixFilePermissionFlags::OWNER_WRITE);
}
if is_true!(self.owner_exec) {
flags.insert(UnixFilePermissionFlags::OWNER_EXEC);
}
if is_true!(self.group_read) {
flags.insert(UnixFilePermissionFlags::GROUP_READ);
}
if is_true!(self.group_write) {
flags.insert(UnixFilePermissionFlags::GROUP_WRITE);
}
if is_true!(self.group_exec) {
flags.insert(UnixFilePermissionFlags::GROUP_EXEC);
}
if is_true!(self.other_read) {
flags.insert(UnixFilePermissionFlags::OTHER_READ);
}
if is_true!(self.other_write) {
flags.insert(UnixFilePermissionFlags::OTHER_WRITE);
}
if is_true!(self.other_exec) {
flags.insert(UnixFilePermissionFlags::OTHER_EXEC);
}
flags.bits()
}
}
#[cfg(unix)]
impl From<std::fs::Permissions> for Permissions {
/// Converts [`std::fs::Permissions`] into [`Permissions`] using
/// [`std::os::unix::fs::PermissionsExt::mode`] to supply the bitset.
fn from(permissions: std::fs::Permissions) -> Self {
use std::os::unix::prelude::*;
Self::from_unix_mode(permissions.mode())
}
}
#[cfg(not(unix))]
impl From<std::fs::Permissions> for Permissions {
/// Converts [`std::fs::Permissions`] into [`Permissions`] using the `readonly` flag.
///
/// This will not set executable flags, but will set all read and write flags with write flags
/// being `false` if `readonly`, otherwise set to `true`.
fn from(permissions: std::fs::Permissions) -> Self {
if permissions.readonly() {
Self::readonly()
} else {
Self::writable()
}
}
}
#[cfg(unix)]
impl From<Permissions> for std::fs::Permissions {
/// Converts [`Permissions`] into [`std::fs::Permissions`] using
/// [`std::os::unix::fs::PermissionsExt::from_mode`].
fn from(permissions: Permissions) -> Self {
use std::os::unix::prelude::*;
std::fs::Permissions::from_mode(permissions.to_unix_mode())
}
}
bitflags! {
struct UnixFilePermissionFlags: u32 {
const OWNER_READ = 0o400;
const OWNER_WRITE = 0o200;
const OWNER_EXEC = 0o100;
const GROUP_READ = 0o40;
const GROUP_WRITE = 0o20;
const GROUP_EXEC = 0o10;
const OTHER_READ = 0o4;
const OTHER_WRITE = 0o2;
const OTHER_EXEC = 0o1;
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn should_be_able_to_serialize_minimal_permissions_to_json() {
let permissions = Permissions {
owner_read: None,
owner_write: None,
owner_exec: None,
group_read: None,
group_write: None,
group_exec: None,
other_read: None,
other_write: None,
other_exec: None,
};
let value = serde_json::to_value(permissions).unwrap();
assert_eq!(value, serde_json::json!({}));
}
#[test]
fn should_be_able_to_serialize_full_permissions_to_json() {
let permissions = Permissions {
owner_read: Some(true),
owner_write: Some(false),
owner_exec: Some(true),
group_read: Some(false),
group_write: Some(true),
group_exec: Some(false),
other_read: Some(true),
other_write: Some(false),
other_exec: Some(true),
};
let value = serde_json::to_value(permissions).unwrap();
assert_eq!(
value,
serde_json::json!({
"owner_read": true,
"owner_write": false,
"owner_exec": true,
"group_read": false,
"group_write": true,
"group_exec": false,
"other_read": true,
"other_write": false,
"other_exec": true,
})
);
}
#[test]
fn should_be_able_to_deserialize_minimal_permissions_from_json() {
let value = serde_json::json!({});
let permissions: Permissions = serde_json::from_value(value).unwrap();
assert_eq!(
permissions,
Permissions {
owner_read: None,
owner_write: None,
owner_exec: None,
group_read: None,
group_write: None,
group_exec: None,
other_read: None,
other_write: None,
other_exec: None,
}
);
}
#[test]
fn should_be_able_to_deserialize_full_permissions_from_json() {
let value = serde_json::json!({
"owner_read": true,
"owner_write": false,
"owner_exec": true,
"group_read": false,
"group_write": true,
"group_exec": false,
"other_read": true,
"other_write": false,
"other_exec": true,
});
let permissions: Permissions = serde_json::from_value(value).unwrap();
assert_eq!(
permissions,
Permissions {
owner_read: Some(true),
owner_write: Some(false),
owner_exec: Some(true),
group_read: Some(false),
group_write: Some(true),
group_exec: Some(false),
other_read: Some(true),
other_write: Some(false),
other_exec: Some(true),
}
);
}
#[test]
fn should_be_able_to_serialize_minimal_permissions_to_msgpack() {
let permissions = Permissions {
owner_read: None,
owner_write: None,
owner_exec: None,
group_read: None,
group_write: None,
group_exec: None,
other_read: None,
other_write: None,
other_exec: None,
};
// NOTE: We don't actually check the output here because it's an implementation detail
// and could change as we change how serialization is done. This is merely to verify
// that we can serialize since there are times when serde fails to serialize at
// runtime.
let _ = rmp_serde::encode::to_vec_named(&permissions).unwrap();
}
#[test]
fn should_be_able_to_serialize_full_permissions_to_msgpack() {
let permissions = Permissions {
owner_read: Some(true),
owner_write: Some(false),
owner_exec: Some(true),
group_read: Some(true),
group_write: Some(false),
group_exec: Some(true),
other_read: Some(true),
other_write: Some(false),
other_exec: Some(true),
};
// NOTE: We don't actually check the output here because it's an implementation detail
// and could change as we change how serialization is done. This is merely to verify
// that we can serialize since there are times when serde fails to serialize at
// runtime.
let _ = rmp_serde::encode::to_vec_named(&permissions).unwrap();
}
#[test]
fn should_be_able_to_deserialize_minimal_permissions_from_msgpack() {
// NOTE: It may seem odd that we are serializing just to deserialize, but this is to
// verify that we are not corrupting or preventing issues when serializing on a
// client/server and then trying to deserialize on the other side. This has happened
// enough times with minor changes that we need tests to verify.
let buf = rmp_serde::encode::to_vec_named(&Permissions {
owner_read: None,
owner_write: None,
owner_exec: None,
group_read: None,
group_write: None,
group_exec: None,
other_read: None,
other_write: None,
other_exec: None,
})
.unwrap();
let permissions: Permissions = rmp_serde::decode::from_slice(&buf).unwrap();
assert_eq!(
permissions,
Permissions {
owner_read: None,
owner_write: None,
owner_exec: None,
group_read: None,
group_write: None,
group_exec: None,
other_read: None,
other_write: None,
other_exec: None,
}
);
}
#[test]
fn should_be_able_to_deserialize_full_permissions_from_msgpack() {
// NOTE: It may seem odd that we are serializing just to deserialize, but this is to
// verify that we are not corrupting or preventing issues when serializing on a
// client/server and then trying to deserialize on the other side. This has happened
// enough times with minor changes that we need tests to verify.
let buf = rmp_serde::encode::to_vec_named(&Permissions {
owner_read: Some(true),
owner_write: Some(false),
owner_exec: Some(true),
group_read: Some(true),
group_write: Some(false),
group_exec: Some(true),
other_read: Some(true),
other_write: Some(false),
other_exec: Some(true),
})
.unwrap();
let permissions: Permissions = rmp_serde::decode::from_slice(&buf).unwrap();
assert_eq!(
permissions,
Permissions {
owner_read: Some(true),
owner_write: Some(false),
owner_exec: Some(true),
group_read: Some(true),
group_write: Some(false),
group_exec: Some(true),
other_read: Some(true),
other_write: Some(false),
other_exec: Some(true),
}
);
}
}

@ -0,0 +1,241 @@
use std::fmt;
use std::num::ParseIntError;
use std::str::FromStr;
use derive_more::{Display, Error};
use serde::{Deserialize, Serialize};
/// Represents the size associated with a remote PTY
#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct PtySize {
/// Number of lines of text
pub rows: u16,
/// Number of columns of text
pub cols: u16,
/// Width of a cell in pixels. Note that some systems never fill this value and ignore it.
#[serde(default)]
pub pixel_width: u16,
/// Height of a cell in pixels. Note that some systems never fill this value and ignore it.
#[serde(default)]
pub pixel_height: u16,
}
impl PtySize {
/// Creates new size using just rows and columns
pub fn from_rows_and_cols(rows: u16, cols: u16) -> Self {
Self {
rows,
cols,
..Default::default()
}
}
}
impl fmt::Display for PtySize {
/// Prints out `rows,cols[,pixel_width,pixel_height]` where the
/// pixel width and pixel height are only included if either
/// one of them is not zero
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{},{}", self.rows, self.cols)?;
if self.pixel_width > 0 || self.pixel_height > 0 {
write!(f, ",{},{}", self.pixel_width, self.pixel_height)?;
}
Ok(())
}
}
impl Default for PtySize {
fn default() -> Self {
PtySize {
rows: 24,
cols: 80,
pixel_width: 0,
pixel_height: 0,
}
}
}
#[derive(Clone, Debug, PartialEq, Eq, Display, Error)]
pub enum PtySizeParseError {
MissingRows,
MissingColumns,
InvalidRows(ParseIntError),
InvalidColumns(ParseIntError),
InvalidPixelWidth(ParseIntError),
InvalidPixelHeight(ParseIntError),
}
impl FromStr for PtySize {
type Err = PtySizeParseError;
/// Attempts to parse a str into PtySize using one of the following formats:
///
/// * rows,cols (defaults to 0 for pixel_width & pixel_height)
/// * rows,cols,pixel_width,pixel_height
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut tokens = s.split(',');
Ok(Self {
rows: tokens
.next()
.ok_or(PtySizeParseError::MissingRows)?
.trim()
.parse()
.map_err(PtySizeParseError::InvalidRows)?,
cols: tokens
.next()
.ok_or(PtySizeParseError::MissingColumns)?
.trim()
.parse()
.map_err(PtySizeParseError::InvalidColumns)?,
pixel_width: tokens
.next()
.map(|s| s.trim().parse())
.transpose()
.map_err(PtySizeParseError::InvalidPixelWidth)?
.unwrap_or(0),
pixel_height: tokens
.next()
.map(|s| s.trim().parse())
.transpose()
.map_err(PtySizeParseError::InvalidPixelHeight)?
.unwrap_or(0),
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn should_be_able_to_serialize_to_json() {
let size = PtySize {
rows: 10,
cols: 20,
pixel_width: 30,
pixel_height: 40,
};
let value = serde_json::to_value(size).unwrap();
assert_eq!(
value,
serde_json::json!({
"rows": 10,
"cols": 20,
"pixel_width": 30,
"pixel_height": 40,
})
);
}
#[test]
fn should_be_able_to_deserialize_minimal_size_from_json() {
let value = serde_json::json!({
"rows": 10,
"cols": 20,
});
let size: PtySize = serde_json::from_value(value).unwrap();
assert_eq!(
size,
PtySize {
rows: 10,
cols: 20,
pixel_width: 0,
pixel_height: 0,
}
);
}
#[test]
fn should_be_able_to_deserialize_full_size_from_json() {
let value = serde_json::json!({
"rows": 10,
"cols": 20,
"pixel_width": 30,
"pixel_height": 40,
});
let size: PtySize = serde_json::from_value(value).unwrap();
assert_eq!(
size,
PtySize {
rows: 10,
cols: 20,
pixel_width: 30,
pixel_height: 40,
}
);
}
#[test]
fn should_be_able_to_serialize_to_msgpack() {
let size = PtySize {
rows: 10,
cols: 20,
pixel_width: 30,
pixel_height: 40,
};
// NOTE: We don't actually check the output here because it's an implementation detail
// and could change as we change how serialization is done. This is merely to verify
// that we can serialize since there are times when serde fails to serialize at
// runtime.
let _ = rmp_serde::encode::to_vec_named(&size).unwrap();
}
#[test]
fn should_be_able_to_deserialize_minimal_size_from_msgpack() {
// NOTE: It may seem odd that we are serializing just to deserialize, but this is to
// verify that we are not corrupting or causing issues when serializing on a
// client/server and then trying to deserialize on the other side. This has happened
// enough times with minor changes that we need tests to verify.
#[derive(Serialize)]
struct PartialSize {
rows: u16,
cols: u16,
}
let buf = rmp_serde::encode::to_vec_named(&PartialSize { rows: 10, cols: 20 }).unwrap();
let size: PtySize = rmp_serde::decode::from_slice(&buf).unwrap();
assert_eq!(
size,
PtySize {
rows: 10,
cols: 20,
pixel_width: 0,
pixel_height: 0,
}
);
}
#[test]
fn should_be_able_to_deserialize_full_size_from_msgpack() {
// NOTE: It may seem odd that we are serializing just to deserialize, but this is to
// verify that we are not corrupting or causing issues when serializing on a
// client/server and then trying to deserialize on the other side. This has happened
// enough times with minor changes that we need tests to verify.
let buf = rmp_serde::encode::to_vec_named(&PtySize {
rows: 10,
cols: 20,
pixel_width: 30,
pixel_height: 40,
})
.unwrap();
let size: PtySize = rmp_serde::decode::from_slice(&buf).unwrap();
assert_eq!(
size,
PtySize {
rows: 10,
cols: 20,
pixel_width: 30,
pixel_height: 40,
}
);
}
}

File diff suppressed because it is too large Load Diff

@ -0,0 +1,142 @@
use std::path::PathBuf;
use serde::{Deserialize, Serialize};
/// Represents information about a system
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct SystemInfo {
/// Family of the operating system as described in
/// https://doc.rust-lang.org/std/env/consts/constant.FAMILY.html
pub family: String,
/// Name of the specific operating system as described in
/// https://doc.rust-lang.org/std/env/consts/constant.OS.html
pub os: String,
/// Architecture of the CPI as described in
/// https://doc.rust-lang.org/std/env/consts/constant.ARCH.html
pub arch: String,
/// Current working directory of the running server process
pub current_dir: PathBuf,
/// Primary separator for path components for the current platform
/// as defined in https://doc.rust-lang.org/std/path/constant.MAIN_SEPARATOR.html
pub main_separator: char,
/// Name of the user running the server process
pub username: String,
/// Default shell tied to user running the server process
pub shell: String,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn should_be_able_to_serialize_to_json() {
let info = SystemInfo {
family: String::from("family"),
os: String::from("os"),
arch: String::from("arch"),
current_dir: PathBuf::from("current-dir"),
main_separator: '/',
username: String::from("username"),
shell: String::from("shell"),
};
let value = serde_json::to_value(info).unwrap();
assert_eq!(
value,
serde_json::json!({
"family": "family",
"os": "os",
"arch": "arch",
"current_dir": "current-dir",
"main_separator": '/',
"username": "username",
"shell": "shell",
})
);
}
#[test]
fn should_be_able_to_deserialize_from_json() {
let value = serde_json::json!({
"family": "family",
"os": "os",
"arch": "arch",
"current_dir": "current-dir",
"main_separator": '/',
"username": "username",
"shell": "shell",
});
let info: SystemInfo = serde_json::from_value(value).unwrap();
assert_eq!(
info,
SystemInfo {
family: String::from("family"),
os: String::from("os"),
arch: String::from("arch"),
current_dir: PathBuf::from("current-dir"),
main_separator: '/',
username: String::from("username"),
shell: String::from("shell"),
}
);
}
#[test]
fn should_be_able_to_serialize_to_msgpack() {
let info = SystemInfo {
family: String::from("family"),
os: String::from("os"),
arch: String::from("arch"),
current_dir: PathBuf::from("current-dir"),
main_separator: '/',
username: String::from("username"),
shell: String::from("shell"),
};
// NOTE: We don't actually check the output here because it's an implementation detail
// and could change as we change how serialization is done. This is merely to verify
// that we can serialize since there are times when serde fails to serialize at
// runtime.
let _ = rmp_serde::encode::to_vec_named(&info).unwrap();
}
#[test]
fn should_be_able_to_deserialize_from_msgpack() {
// NOTE: It may seem odd that we are serializing just to deserialize, but this is to
// verify that we are not corrupting or causing issues when serializing on a
// client/server and then trying to deserialize on the other side. This has happened
// enough times with minor changes that we need tests to verify.
let buf = rmp_serde::encode::to_vec_named(&SystemInfo {
family: String::from("family"),
os: String::from("os"),
arch: String::from("arch"),
current_dir: PathBuf::from("current-dir"),
main_separator: '/',
username: String::from("username"),
shell: String::from("shell"),
})
.unwrap();
let info: SystemInfo = rmp_serde::decode::from_slice(&buf).unwrap();
assert_eq!(
info,
SystemInfo {
family: String::from("family"),
os: String::from("os"),
arch: String::from("arch"),
current_dir: PathBuf::from("current-dir"),
main_separator: '/',
username: String::from("username"),
shell: String::from("shell"),
}
);
}
}

@ -0,0 +1,130 @@
use serde::{Deserialize, Serialize};
use crate::common::{Capabilities, SemVer};
/// Represents version information.
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct Version {
/// General version of server (arbitrary format)
pub server_version: String,
/// Protocol version
pub protocol_version: SemVer,
/// Capabilities of the server
pub capabilities: Capabilities,
}
#[cfg(test)]
mod tests {
use super::*;
use crate::common::Capability;
#[test]
fn should_be_able_to_serialize_to_json() {
let version = Version {
server_version: String::from("some version"),
protocol_version: (1, 2, 3),
capabilities: [Capability {
kind: String::from("some kind"),
description: String::from("some description"),
}]
.into_iter()
.collect(),
};
let value = serde_json::to_value(version).unwrap();
assert_eq!(
value,
serde_json::json!({
"server_version": "some version",
"protocol_version": [1, 2, 3],
"capabilities": [{
"kind": "some kind",
"description": "some description",
}]
})
);
}
#[test]
fn should_be_able_to_deserialize_from_json() {
let value = serde_json::json!({
"server_version": "some version",
"protocol_version": [1, 2, 3],
"capabilities": [{
"kind": "some kind",
"description": "some description",
}]
});
let version: Version = serde_json::from_value(value).unwrap();
assert_eq!(
version,
Version {
server_version: String::from("some version"),
protocol_version: (1, 2, 3),
capabilities: [Capability {
kind: String::from("some kind"),
description: String::from("some description"),
}]
.into_iter()
.collect(),
}
);
}
#[test]
fn should_be_able_to_serialize_to_msgpack() {
let version = Version {
server_version: String::from("some version"),
protocol_version: (1, 2, 3),
capabilities: [Capability {
kind: String::from("some kind"),
description: String::from("some description"),
}]
.into_iter()
.collect(),
};
// NOTE: We don't actually check the output here because it's an implementation detail
// and could change as we change how serialization is done. This is merely to verify
// that we can serialize since there are times when serde fails to serialize at
// runtime.
let _ = rmp_serde::encode::to_vec_named(&version).unwrap();
}
#[test]
fn should_be_able_to_deserialize_from_msgpack() {
// NOTE: It may seem odd that we are serializing just to deserialize, but this is to
// verify that we are not corrupting or causing issues when serializing on a
// client/server and then trying to deserialize on the other side. This has happened
// enough times with minor changes that we need tests to verify.
let buf = rmp_serde::encode::to_vec_named(&Version {
server_version: String::from("some version"),
protocol_version: (1, 2, 3),
capabilities: [Capability {
kind: String::from("some kind"),
description: String::from("some description"),
}]
.into_iter()
.collect(),
})
.unwrap();
let version: Version = rmp_serde::decode::from_slice(&buf).unwrap();
assert_eq!(
version,
Version {
server_version: String::from("some version"),
protocol_version: (1, 2, 3),
capabilities: [Capability {
kind: String::from("some kind"),
description: String::from("some description"),
}]
.into_iter()
.collect(),
}
);
}
}

@ -0,0 +1,17 @@
mod common;
mod msg;
mod request;
mod response;
mod utils;
pub use common::*;
pub use msg::*;
pub use request::*;
pub use response::*;
/// Protocol version indicated by the tuple of (major, minor, patch).
///
/// This is different from the crate version, which matches that of the complete suite of distant
/// crates. Rather, this verison is used to provide stability indicators when the protocol itself
/// changes across crate versions.
pub const PROTOCOL_VERSION: SemVer = (0, 1, 0);

@ -0,0 +1,192 @@
use derive_more::From;
use serde::{Deserialize, Serialize};
/// Represents a wrapper around a message, supporting single and batch payloads.
#[derive(Clone, Debug, From, PartialEq, Eq, Serialize, Deserialize)]
#[serde(untagged)]
pub enum Msg<T> {
Single(T),
Batch(Vec<T>),
}
impl<T> Msg<T> {
/// Creates a new msg with a singular payload.
#[inline]
pub fn single(payload: T) -> Self {
Self::Single(payload)
}
/// Creates a new msg with a batch payload.
pub fn batch<I>(payloads: I) -> Self
where
I: IntoIterator<Item = T>,
{
Self::Batch(payloads.into_iter().collect())
}
/// Returns true if msg has a single payload.
#[inline]
pub fn is_single(&self) -> bool {
matches!(self, Self::Single(_))
}
/// Returns reference to single value if msg is single variant.
#[inline]
pub fn as_single(&self) -> Option<&T> {
match self {
Self::Single(x) => Some(x),
_ => None,
}
}
/// Returns mutable reference to single value if msg is single variant.
#[inline]
pub fn as_mut_single(&mut self) -> Option<&T> {
match self {
Self::Single(x) => Some(x),
_ => None,
}
}
/// Returns the single value if msg is single variant.
#[inline]
pub fn into_single(self) -> Option<T> {
match self {
Self::Single(x) => Some(x),
_ => None,
}
}
/// Returns true if msg has a batch of payloads.
#[inline]
pub fn is_batch(&self) -> bool {
matches!(self, Self::Batch(_))
}
/// Returns reference to batch value if msg is batch variant.
#[inline]
pub fn as_batch(&self) -> Option<&[T]> {
match self {
Self::Batch(x) => Some(x),
_ => None,
}
}
/// Returns mutable reference to batch value if msg is batch variant.
#[inline]
pub fn as_mut_batch(&mut self) -> Option<&mut [T]> {
match self {
Self::Batch(x) => Some(x),
_ => None,
}
}
/// Returns the batch value if msg is batch variant.
#[inline]
pub fn into_batch(self) -> Option<Vec<T>> {
match self {
Self::Batch(x) => Some(x),
_ => None,
}
}
/// Convert into a collection of payload data.
#[inline]
pub fn into_vec(self) -> Vec<T> {
match self {
Self::Single(x) => vec![x],
Self::Batch(x) => x,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
mod single {
use super::*;
#[test]
fn should_be_able_to_serialize_to_json() {
let msg = Msg::single("hello world");
let value = serde_json::to_value(msg).unwrap();
assert_eq!(value, serde_json::json!("hello world"));
}
#[test]
fn should_be_able_to_deserialize_from_json() {
let value = serde_json::json!("hello world");
let msg: Msg<String> = serde_json::from_value(value).unwrap();
assert_eq!(msg, Msg::single(String::from("hello world")));
}
#[test]
fn should_be_able_to_serialize_to_msgpack() {
let msg = Msg::single("hello world");
// NOTE: We don't actually check the output here because it's an implementation detail
// and could change as we change how serialization is done. This is merely to verify
// that we can serialize since there are times when serde fails to serialize at
// runtime.
let _ = rmp_serde::encode::to_vec_named(&msg).unwrap();
}
#[test]
fn should_be_able_to_deserialize_from_msgpack() {
// NOTE: It may seem odd that we are serializing just to deserialize, but this is to
// verify that we are not corrupting or causing issues when serializing on a
// client/server and then trying to deserialize on the other side. This has happened
// enough times with minor changes that we need tests to verify.
let buf = rmp_serde::encode::to_vec_named(&Msg::single("hello world")).unwrap();
let msg: Msg<String> = rmp_serde::decode::from_slice(&buf).unwrap();
assert_eq!(msg, Msg::single(String::from("hello world")));
}
}
mod batch {
use super::*;
#[test]
fn should_be_able_to_serialize_to_json() {
let msg = Msg::batch(["hello world"]);
let value = serde_json::to_value(msg).unwrap();
assert_eq!(value, serde_json::json!(["hello world"]));
}
#[test]
fn should_be_able_to_deserialize_from_json() {
let value = serde_json::json!(["hello world"]);
let msg: Msg<String> = serde_json::from_value(value).unwrap();
assert_eq!(msg, Msg::batch([String::from("hello world")]));
}
#[test]
fn should_be_able_to_serialize_to_msgpack() {
let msg = Msg::batch(["hello world"]);
// NOTE: We don't actually check the output here because it's an implementation detail
// and could change as we change how serialization is done. This is merely to verify
// that we can serialize since there are times when serde fails to serialize at
// runtime.
let _ = rmp_serde::encode::to_vec_named(&msg).unwrap();
}
#[test]
fn should_be_able_to_deserialize_from_msgpack() {
// NOTE: It may seem odd that we are serializing just to deserialize, but this is to
// verify that we are not corrupting or causing issues when serializing on a
// client/server and then trying to deserialize on the other side. This has happened
// enough times with minor changes that we need tests to verify.
let buf = rmp_serde::encode::to_vec_named(&Msg::batch(["hello world"])).unwrap();
let msg: Msg<String> = rmp_serde::decode::from_slice(&buf).unwrap();
assert_eq!(msg, Msg::batch([String::from("hello world")]));
}
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -1,6 +1,24 @@
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
pub(crate) fn deserialize_u128_option<'de, D>(deserializer: D) -> Result<Option<u128>, D::Error> /// Used purely for skipping serialization of values that are false by default.
#[inline]
pub const fn is_false(value: &bool) -> bool {
!*value
}
/// Used purely for skipping serialization of values that are 1 by default.
#[inline]
pub const fn is_one(value: &usize) -> bool {
*value == 1
}
/// Used to provide a default serde value of 1.
#[inline]
pub const fn one() -> usize {
1
}
pub fn deserialize_u128_option<'de, D>(deserializer: D) -> Result<Option<u128>, D::Error>
where where
D: serde::Deserializer<'de>, D: serde::Deserializer<'de>,
{ {
@ -15,7 +33,7 @@ where
} }
} }
pub(crate) fn serialize_u128_option<S: serde::Serializer>( pub fn serialize_u128_option<S: serde::Serializer>(
val: &Option<u128>, val: &Option<u128>,
s: S, s: S,
) -> Result<S::Ok, S::Error> { ) -> Result<S::Ok, S::Error> {

@ -10,7 +10,7 @@ use async_trait::async_trait;
use distant_core::net::server::ConnectionCtx; use distant_core::net::server::ConnectionCtx;
use distant_core::protocol::{ use distant_core::protocol::{
Capabilities, CapabilityKind, DirEntry, Environment, FileType, Metadata, Permissions, Capabilities, CapabilityKind, DirEntry, Environment, FileType, Metadata, Permissions,
ProcessId, PtySize, SetPermissionsOptions, SystemInfo, UnixMetadata, ProcessId, PtySize, SetPermissionsOptions, SystemInfo, UnixMetadata, Version, PROTOCOL_VERSION,
}; };
use distant_core::{DistantApi, DistantCtx}; use distant_core::{DistantApi, DistantCtx};
use log::*; use log::*;
@ -79,22 +79,6 @@ impl DistantApi for SshDistantApi {
Ok(()) Ok(())
} }
async fn capabilities(&self, ctx: DistantCtx<Self::LocalData>) -> io::Result<Capabilities> {
debug!("[Conn {}] Querying capabilities", ctx.connection_id);
let mut capabilities = Capabilities::all();
// Searching is not supported by ssh implementation
// TODO: Could we have external search using ripgrep's JSON lines API?
capabilities.take(CapabilityKind::Search);
capabilities.take(CapabilityKind::CancelSearch);
// Broken via wezterm-ssh, so not supported right now
capabilities.take(CapabilityKind::SetPermissions);
Ok(capabilities)
}
async fn read_file( async fn read_file(
&self, &self,
ctx: DistantCtx<Self::LocalData>, ctx: DistantCtx<Self::LocalData>,
@ -1013,4 +997,24 @@ impl DistantApi for SshDistantApi {
shell, shell,
}) })
} }
async fn version(&self, ctx: DistantCtx<Self::LocalData>) -> io::Result<Version> {
debug!("[Conn {}] Querying capabilities", ctx.connection_id);
let mut capabilities = Capabilities::all();
// Searching is not supported by ssh implementation
// TODO: Could we have external search using ripgrep's JSON lines API?
capabilities.take(CapabilityKind::Search);
capabilities.take(CapabilityKind::CancelSearch);
// Broken via wezterm-ssh, so not supported right now
capabilities.take(CapabilityKind::SetPermissions);
Ok(Version {
server_version: format!("{} {}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION")),
protocol_version: PROTOCOL_VERSION,
capabilities,
})
}
} }

@ -57,7 +57,7 @@ where
if environment.is_empty() { if environment.is_empty() {
None None
} else { } else {
Some(environment.into_map()) Some(environment)
}, },
) )
.compat() .compat()
@ -143,7 +143,7 @@ where
if environment.is_empty() { if environment.is_empty() {
None None
} else { } else {
Some(environment.into_map()) Some(environment)
}, },
) )
.compat() .compat()

@ -7,7 +7,8 @@ use anyhow::Context;
use distant_core::net::common::{ConnectionId, Host, Map, Request, Response}; use distant_core::net::common::{ConnectionId, Host, Map, Request, Response};
use distant_core::net::manager::ManagerClient; use distant_core::net::manager::ManagerClient;
use distant_core::protocol::{ use distant_core::protocol::{
self, ChangeKindSet, FileType, Permissions, SearchQuery, SetPermissionsOptions, SystemInfo, self, Capabilities, ChangeKindSet, FileType, Permissions, SearchQuery, SetPermissionsOptions,
SystemInfo,
}; };
use distant_core::{DistantChannel, DistantChannelExt, RemoteCommand, Searcher, Watcher}; use distant_core::{DistantChannel, DistantChannelExt, RemoteCommand, Searcher, Watcher};
use log::*; use log::*;
@ -48,60 +49,6 @@ async fn read_cache(path: &Path) -> Cache {
async fn async_run(cmd: ClientSubcommand) -> CliResult { async fn async_run(cmd: ClientSubcommand) -> CliResult {
match cmd { match cmd {
ClientSubcommand::Capabilities {
cache,
connection,
format,
network,
} => {
debug!("Connecting to manager");
let mut client = connect_to_manager(format, network).await?;
let mut cache = read_cache(&cache).await;
let connection_id =
use_or_lookup_connection_id(&mut cache, connection, &mut client).await?;
debug!("Opening raw channel to connection {}", connection_id);
let channel = client
.open_raw_channel(connection_id)
.await
.with_context(|| {
format!("Failed to open raw channel to connection {connection_id}")
})?;
debug!("Retrieving capabilities");
let capabilities = channel
.into_client()
.into_channel()
.capabilities()
.await
.with_context(|| {
format!("Failed to retrieve capabilities using connection {connection_id}")
})?;
match format {
Format::Shell => {
#[derive(Tabled)]
struct EntryRow {
kind: String,
description: String,
}
let table = Table::new(capabilities.into_sorted_vec().into_iter().map(|cap| {
EntryRow {
kind: cap.kind,
description: cap.description,
}
}))
.with(Style::ascii())
.with(Modify::new(Rows::new(..)).with(Alignment::left()))
.to_string();
println!("{table}");
}
Format::Json => println!("{}", serde_json::to_string(&capabilities).unwrap()),
}
}
ClientSubcommand::Connect { ClientSubcommand::Connect {
cache, cache,
destination, destination,
@ -402,7 +349,12 @@ async fn async_run(cmd: ClientSubcommand) -> CliResult {
cmd.as_deref().unwrap_or(r"$SHELL") cmd.as_deref().unwrap_or(r"$SHELL")
); );
Shell::new(channel.into_client().into_channel()) Shell::new(channel.into_client().into_channel())
.spawn(cmd, environment, current_dir, MAX_PIPE_CHUNK_SIZE) .spawn(
cmd,
environment.into_map(),
current_dir,
MAX_PIPE_CHUNK_SIZE,
)
.await?; .await?;
} }
ClientSubcommand::Spawn { ClientSubcommand::Spawn {
@ -449,7 +401,12 @@ async fn async_run(cmd: ClientSubcommand) -> CliResult {
environment, current_dir, cmd environment, current_dir, cmd
); );
Shell::new(channel.into_client().into_channel()) Shell::new(channel.into_client().into_channel())
.spawn(cmd, environment, current_dir, MAX_PIPE_CHUNK_SIZE) .spawn(
cmd,
environment.into_map(),
current_dir,
MAX_PIPE_CHUNK_SIZE,
)
.await?; .await?;
} else { } else {
debug!( debug!(
@ -457,7 +414,7 @@ async fn async_run(cmd: ClientSubcommand) -> CliResult {
environment, current_dir, cmd environment, current_dir, cmd
); );
let mut proc = RemoteCommand::new() let mut proc = RemoteCommand::new()
.environment(environment) .environment(environment.into_map())
.current_dir(current_dir) .current_dir(current_dir)
.pty(None) .pty(None)
.spawn(channel.into_client().into_channel(), &cmd) .spawn(channel.into_client().into_channel(), &cmd)
@ -548,6 +505,114 @@ async fn async_run(cmd: ClientSubcommand) -> CliResult {
.context("Failed to write system information to stdout")?; .context("Failed to write system information to stdout")?;
out.flush().context("Failed to flush stdout")?; out.flush().context("Failed to flush stdout")?;
} }
ClientSubcommand::Version {
cache,
connection,
format,
network,
} => {
debug!("Connecting to manager");
let mut client = connect_to_manager(format, network).await?;
let mut cache = read_cache(&cache).await;
let connection_id =
use_or_lookup_connection_id(&mut cache, connection, &mut client).await?;
debug!("Opening raw channel to connection {}", connection_id);
let channel = client
.open_raw_channel(connection_id)
.await
.with_context(|| {
format!("Failed to open raw channel to connection {connection_id}")
})?;
debug!("Retrieving version information");
let version = channel
.into_client()
.into_channel()
.version()
.await
.with_context(|| {
format!("Failed to retrieve version using connection {connection_id}")
})?;
match format {
Format::Shell => {
let (major, minor, patch) = distant_core::protocol::PROTOCOL_VERSION;
println!(
"Client: {} {} (Protocol {major}.{minor}.{patch})",
env!("CARGO_PKG_NAME"),
env!("CARGO_PKG_VERSION")
);
let (major, minor, patch) = version.protocol_version;
println!(
"Server: {} (Protocol {major}.{minor}.{patch})",
version.server_version
);
// Build a complete set of capabilities to show which ones we support
let client_capabilities = Capabilities::all();
let server_capabilities = version.capabilities;
let mut capabilities: Vec<String> = client_capabilities
.union(server_capabilities.as_ref())
.map(|cap| {
let kind = &cap.kind;
if client_capabilities.contains(kind)
&& server_capabilities.contains(kind)
{
format!("+{kind}")
} else {
format!("-{kind}")
}
})
.collect();
capabilities.sort_unstable();
// Figure out the text length of the longest capability
let max_len = capabilities.iter().map(|x| x.len()).max().unwrap_or(0);
if max_len > 0 {
const MAX_COLS: usize = 4;
// Determine how wide we have available to determine how many columns
// to use; if we don't have a terminal width, default to something
//
// Maximum columns we want to support is 4
let cols = match terminal_size::terminal_size() {
// If we have a tty, see how many we can fit including space char
//
// Ensure that we at least return 1 as cols
Some((width, _)) => std::cmp::max(width.0 as usize / (max_len + 1), 1),
// If we have no tty, default to 4 columns
None => MAX_COLS,
};
println!("Capabilities supported (+) or not (-):");
for chunk in capabilities.chunks(std::cmp::min(cols, MAX_COLS)) {
let cnt = chunk.len();
match cnt {
1 => println!("{:max_len$}", chunk[0]),
2 => println!("{:max_len$} {:max_len$}", chunk[0], chunk[1]),
3 => println!(
"{:max_len$} {:max_len$} {:max_len$}",
chunk[0], chunk[1], chunk[2]
),
4 => println!(
"{:max_len$} {:max_len$} {:max_len$} {:max_len$}",
chunk[0], chunk[1], chunk[2], chunk[3]
),
_ => unreachable!("Chunk of size {cnt} is not 1 > i <= {MAX_COLS}"),
}
}
}
}
Format::Json => {
println!("{}", serde_json::to_string(&version).unwrap())
}
}
}
ClientSubcommand::FileSystem(ClientFileSystemSubcommand::Copy { ClientSubcommand::FileSystem(ClientFileSystemSubcommand::Copy {
cache, cache,
connection, connection,

@ -156,10 +156,10 @@ fn format_shell(state: &mut FormatterState, data: protocol::Response) -> Output
"{}{}", "{}{}",
match change.kind { match change.kind {
ChangeKind::Create => "Following paths were created:\n", ChangeKind::Create => "Following paths were created:\n",
ChangeKind::Remove => "Following paths were removed:\n", ChangeKind::Delete => "Following paths were removed:\n",
x if x.is_access_kind() => "Following paths were accessed:\n", x if x.is_access() => "Following paths were accessed:\n",
x if x.is_modify_kind() => "Following paths were modified:\n", x if x.is_modify() => "Following paths were modified:\n",
x if x.is_rename_kind() => "Following paths were renamed:\n", x if x.is_rename() => "Following paths were renamed:\n",
_ => "Following paths were affected:\n", _ => "Following paths were affected:\n",
}, },
change change
@ -375,17 +375,23 @@ fn format_shell(state: &mut FormatterState, data: protocol::Response) -> Output
) )
.into_bytes(), .into_bytes(),
), ),
protocol::Response::Capabilities { supported } => { protocol::Response::Version(version) => {
#[derive(Tabled)] #[derive(Tabled)]
struct EntryRow { struct EntryRow {
kind: String, kind: String,
description: String, description: String,
} }
let table = Table::new(supported.into_sorted_vec().into_iter().map(|cap| EntryRow { let table = Table::new(
kind: cap.kind, version
description: cap.description, .capabilities
})) .into_sorted_vec()
.into_iter()
.map(|cap| EntryRow {
kind: cap.kind,
description: cap.description,
}),
)
.with(Style::ascii()) .with(Style::ascii())
.with(Modify::new(Rows::new(..)).with(Alignment::left())) .with(Modify::new(Rows::new(..)).with(Alignment::left()))
.to_string() .to_string()

@ -3,8 +3,6 @@ use std::{fs, io};
use anyhow::Context; use anyhow::Context;
use clap::CommandFactory; use clap::CommandFactory;
use clap_complete::generate as clap_generate; use clap_complete::generate as clap_generate;
use distant_core::net::common::{Request, Response};
use distant_core::protocol;
use crate::options::{Config, GenerateSubcommand}; use crate::options::{Config, GenerateSubcommand};
use crate::{CliResult, Options}; use crate::{CliResult, Options};
@ -20,35 +18,6 @@ async fn async_run(cmd: GenerateSubcommand) -> CliResult {
.await .await
.context("Failed to write default config to {file:?}")?, .context("Failed to write default config to {file:?}")?,
GenerateSubcommand::Schema { file } => {
let request_schema =
serde_json::to_value(&Request::<protocol::Msg<protocol::Request>>::root_schema())
.context("Failed to serialize request schema")?;
let response_schema =
serde_json::to_value(&Response::<protocol::Msg<protocol::Response>>::root_schema())
.context("Failed to serialize response schema")?;
let schema = serde_json::json!({
"request": request_schema,
"response": response_schema,
});
if let Some(path) = file {
serde_json::to_writer_pretty(
&mut fs::OpenOptions::new()
.create(true)
.write(true)
.open(&path)
.with_context(|| format!("Failed to open {path:?}"))?,
&schema,
)
.context("Failed to write to {path:?}")?;
} else {
serde_json::to_writer_pretty(&mut io::stdout(), &schema)
.context("Failed to print to stdout")?;
}
}
GenerateSubcommand::Completion { file, shell } => { GenerateSubcommand::Completion { file, shell } => {
let name = "distant"; let name = "distant";
let mut cmd = Options::command(); let mut cmd = Options::command();

@ -7,7 +7,7 @@ use clap_complete::Shell as ClapCompleteShell;
use derive_more::IsVariant; use derive_more::IsVariant;
use distant_core::net::common::{ConnectionId, Destination, Map, PortRange}; use distant_core::net::common::{ConnectionId, Destination, Map, PortRange};
use distant_core::net::server::Shutdown; use distant_core::net::server::Shutdown;
use distant_core::protocol::{ChangeKind, Environment}; use distant_core::protocol::ChangeKind;
use service_manager::ServiceManagerKind; use service_manager::ServiceManagerKind;
use crate::constants; use crate::constants;
@ -103,9 +103,6 @@ impl Options {
network.merge(config.client.network); network.merge(config.client.network);
*timeout = timeout.take().or(config.client.api.timeout); *timeout = timeout.take().or(config.client.api.timeout);
} }
ClientSubcommand::Capabilities { network, .. } => {
network.merge(config.client.network);
}
ClientSubcommand::Connect { ClientSubcommand::Connect {
network, options, .. network, options, ..
} => { } => {
@ -153,6 +150,9 @@ impl Options {
ClientSubcommand::SystemInfo { network, .. } => { ClientSubcommand::SystemInfo { network, .. } => {
network.merge(config.client.network); network.merge(config.client.network);
} }
ClientSubcommand::Version { network, .. } => {
network.merge(config.client.network);
}
} }
} }
DistantSubcommand::Generate(_) => { DistantSubcommand::Generate(_) => {
@ -263,28 +263,6 @@ pub enum ClientSubcommand {
network: NetworkSettings, network: NetworkSettings,
}, },
/// Retrieves capabilities of the remote server
Capabilities {
/// Location to store cached data
#[clap(
long,
value_hint = ValueHint::FilePath,
value_parser,
default_value = CACHE_FILE_PATH_STR.as_str()
)]
cache: PathBuf,
/// Specify a connection being managed
#[clap(long)]
connection: Option<ConnectionId>,
#[clap(flatten)]
network: NetworkSettings,
#[clap(short, long, default_value_t, value_enum)]
format: Format,
},
/// Requests that active manager connects to the server at the specified destination /// Requests that active manager connects to the server at the specified destination
Connect { Connect {
/// Location to store cached data /// Location to store cached data
@ -392,7 +370,7 @@ pub enum ClientSubcommand {
/// Environment variables to provide to the shell /// Environment variables to provide to the shell
#[clap(long, default_value_t)] #[clap(long, default_value_t)]
environment: Environment, environment: Map,
/// Optional command to run instead of $SHELL /// Optional command to run instead of $SHELL
#[clap(name = "CMD", last = true)] #[clap(name = "CMD", last = true)]
@ -434,7 +412,7 @@ pub enum ClientSubcommand {
/// Environment variables to provide to the shell /// Environment variables to provide to the shell
#[clap(long, default_value_t)] #[clap(long, default_value_t)]
environment: Environment, environment: Map,
/// Command to run /// Command to run
#[clap(name = "CMD", num_args = 1.., last = true)] #[clap(name = "CMD", num_args = 1.., last = true)]
@ -458,12 +436,33 @@ pub enum ClientSubcommand {
#[clap(flatten)] #[clap(flatten)]
network: NetworkSettings, network: NetworkSettings,
}, },
/// Retrieves version information of the remote server
Version {
/// Location to store cached data
#[clap(
long,
value_hint = ValueHint::FilePath,
value_parser,
default_value = CACHE_FILE_PATH_STR.as_str()
)]
cache: PathBuf,
/// Specify a connection being managed
#[clap(long)]
connection: Option<ConnectionId>,
#[clap(flatten)]
network: NetworkSettings,
#[clap(short, long, default_value_t, value_enum)]
format: Format,
},
} }
impl ClientSubcommand { impl ClientSubcommand {
pub fn cache_path(&self) -> &Path { pub fn cache_path(&self) -> &Path {
match self { match self {
Self::Capabilities { cache, .. } => cache.as_path(),
Self::Connect { cache, .. } => cache.as_path(), Self::Connect { cache, .. } => cache.as_path(),
Self::FileSystem(fs) => fs.cache_path(), Self::FileSystem(fs) => fs.cache_path(),
Self::Launch { cache, .. } => cache.as_path(), Self::Launch { cache, .. } => cache.as_path(),
@ -471,12 +470,12 @@ impl ClientSubcommand {
Self::Shell { cache, .. } => cache.as_path(), Self::Shell { cache, .. } => cache.as_path(),
Self::Spawn { cache, .. } => cache.as_path(), Self::Spawn { cache, .. } => cache.as_path(),
Self::SystemInfo { cache, .. } => cache.as_path(), Self::SystemInfo { cache, .. } => cache.as_path(),
Self::Version { cache, .. } => cache.as_path(),
} }
} }
pub fn network_settings(&self) -> &NetworkSettings { pub fn network_settings(&self) -> &NetworkSettings {
match self { match self {
Self::Capabilities { network, .. } => network,
Self::Connect { network, .. } => network, Self::Connect { network, .. } => network,
Self::FileSystem(fs) => fs.network_settings(), Self::FileSystem(fs) => fs.network_settings(),
Self::Launch { network, .. } => network, Self::Launch { network, .. } => network,
@ -484,6 +483,7 @@ impl ClientSubcommand {
Self::Shell { network, .. } => network, Self::Shell { network, .. } => network,
Self::Spawn { network, .. } => network, Self::Spawn { network, .. } => network,
Self::SystemInfo { network, .. } => network, Self::SystemInfo { network, .. } => network,
Self::Version { network, .. } => network,
} }
} }
} }
@ -894,13 +894,6 @@ pub enum GenerateSubcommand {
file: PathBuf, file: PathBuf,
}, },
/// Generate JSON schema for server request/response
Schema {
/// If specified, will output to the file at the given path instead of stdout
#[clap(long)]
file: Option<PathBuf>,
},
// Generate completion info for CLI // Generate completion info for CLI
Completion { Completion {
/// If specified, will output to the file at the given path instead of stdout /// If specified, will output to the file at the given path instead of stdout
@ -1272,7 +1265,7 @@ mod tests {
log_file: None, log_file: None,
log_level: None, log_level: None,
}, },
command: DistantSubcommand::Client(ClientSubcommand::Capabilities { command: DistantSubcommand::Client(ClientSubcommand::Version {
cache: PathBuf::new(), cache: PathBuf::new(),
connection: None, connection: None,
network: NetworkSettings { network: NetworkSettings {
@ -1309,7 +1302,7 @@ mod tests {
log_file: Some(PathBuf::from("config-log-file")), log_file: Some(PathBuf::from("config-log-file")),
log_level: Some(LogLevel::Trace), log_level: Some(LogLevel::Trace),
}, },
command: DistantSubcommand::Client(ClientSubcommand::Capabilities { command: DistantSubcommand::Client(ClientSubcommand::Version {
cache: PathBuf::new(), cache: PathBuf::new(),
connection: None, connection: None,
network: NetworkSettings { network: NetworkSettings {
@ -1330,7 +1323,7 @@ mod tests {
log_file: Some(PathBuf::from("cli-log-file")), log_file: Some(PathBuf::from("cli-log-file")),
log_level: Some(LogLevel::Info), log_level: Some(LogLevel::Info),
}, },
command: DistantSubcommand::Client(ClientSubcommand::Capabilities { command: DistantSubcommand::Client(ClientSubcommand::Version {
cache: PathBuf::new(), cache: PathBuf::new(),
connection: None, connection: None,
network: NetworkSettings { network: NetworkSettings {
@ -1367,7 +1360,7 @@ mod tests {
log_file: Some(PathBuf::from("cli-log-file")), log_file: Some(PathBuf::from("cli-log-file")),
log_level: Some(LogLevel::Info), log_level: Some(LogLevel::Info),
}, },
command: DistantSubcommand::Client(ClientSubcommand::Capabilities { command: DistantSubcommand::Client(ClientSubcommand::Version {
cache: PathBuf::new(), cache: PathBuf::new(),
connection: None, connection: None,
network: NetworkSettings { network: NetworkSettings {
@ -1666,7 +1659,7 @@ mod tests {
windows_pipe: None, windows_pipe: None,
}, },
current_dir: None, current_dir: None,
environment: map!(), environment: Default::default(),
cmd: None, cmd: None,
}), }),
}; };

@ -1,4 +1,3 @@
mod capabilities;
mod copy; mod copy;
mod dir_create; mod dir_create;
mod dir_read; mod dir_read;
@ -15,4 +14,5 @@ mod remove;
mod rename; mod rename;
mod search; mod search;
mod system_info; mod system_info;
mod version;
mod watch; mod watch;

@ -54,18 +54,12 @@ async fn should_support_json_search_filesystem_using_query(
{ {
"type": "contents", "type": "contents",
"path": root.child("file2.txt").to_string_lossy(), "path": root.child("file2.txt").to_string_lossy(),
"lines": { "lines": "textual\n",
"type": "text",
"value": "textual\n",
},
"line_number": 3, "line_number": 3,
"absolute_offset": 9, "absolute_offset": 9,
"submatches": [ "submatches": [
{ {
"match": { "match": "ua",
"type": "text",
"value": "ua",
},
"start": 4, "start": 4,
"end": 6, "end": 6,
} }

@ -1,4 +1,4 @@
use distant_core::protocol::{Capabilities, Capability}; use distant_core::protocol::{Capabilities, Capability, SemVer, PROTOCOL_VERSION};
use rstest::*; use rstest::*;
use serde_json::json; use serde_json::json;
use test_log::test; use test_log::test;
@ -13,15 +13,19 @@ async fn should_support_json_capabilities(mut api_process: CtxCommand<ApiProcess
let id = rand::random::<u64>().to_string(); let id = rand::random::<u64>().to_string();
let req = json!({ let req = json!({
"id": id, "id": id,
"payload": { "type": "capabilities" }, "payload": { "type": "version" },
}); });
let res = api_process.write_and_read_json(req).await.unwrap().unwrap(); let res = api_process.write_and_read_json(req).await.unwrap().unwrap();
assert_eq!(res["origin_id"], id, "JSON: {res}"); assert_eq!(res["origin_id"], id, "JSON: {res}");
assert_eq!(res["payload"]["type"], "capabilities", "JSON: {res}"); assert_eq!(res["payload"]["type"], "version", "JSON: {res}");
let supported: Capabilities = res["payload"]["supported"] let protocol_version: SemVer =
serde_json::from_value(res["payload"]["protocol_version"].clone()).unwrap();
assert_eq!(protocol_version, PROTOCOL_VERSION);
let capabilities: Capabilities = res["payload"]["capabilities"]
.as_array() .as_array()
.expect("Field 'supported' was not an array") .expect("Field 'supported' was not an array")
.iter() .iter()
@ -33,5 +37,5 @@ async fn should_support_json_capabilities(mut api_process: CtxCommand<ApiProcess
// NOTE: Our local server api should always support all capabilities since it is the reference // NOTE: Our local server api should always support all capabilities since it is the reference
// implementation for our api // implementation for our api
assert_eq!(supported, Capabilities::all()); assert_eq!(capabilities, Capabilities::all());
} }

@ -1,68 +0,0 @@
use indoc::indoc;
use rstest::*;
use crate::cli::fixtures::*;
const EXPECTED_TABLE: &str = indoc! {"
+------------------+------------------------------------------------------------------+
| kind | description |
+------------------+------------------------------------------------------------------+
| cancel_search | Supports canceling an active search against the filesystem |
+------------------+------------------------------------------------------------------+
| capabilities | Supports retrieving capabilities |
+------------------+------------------------------------------------------------------+
| copy | Supports copying files, directories, and symlinks |
+------------------+------------------------------------------------------------------+
| dir_create | Supports creating directory |
+------------------+------------------------------------------------------------------+
| dir_read | Supports reading directory |
+------------------+------------------------------------------------------------------+
| exists | Supports checking if a path exists |
+------------------+------------------------------------------------------------------+
| file_append | Supports appending to binary file |
+------------------+------------------------------------------------------------------+
| file_append_text | Supports appending to text file |
+------------------+------------------------------------------------------------------+
| file_read | Supports reading binary file |
+------------------+------------------------------------------------------------------+
| file_read_text | Supports reading text file |
+------------------+------------------------------------------------------------------+
| file_write | Supports writing binary file |
+------------------+------------------------------------------------------------------+
| file_write_text | Supports writing text file |
+------------------+------------------------------------------------------------------+
| metadata | Supports retrieving metadata about a file, directory, or symlink |
+------------------+------------------------------------------------------------------+
| proc_kill | Supports killing a spawned process |
+------------------+------------------------------------------------------------------+
| proc_resize_pty | Supports resizing the pty of a spawned process |
+------------------+------------------------------------------------------------------+
| proc_spawn | Supports spawning a process |
+------------------+------------------------------------------------------------------+
| proc_stdin | Supports sending stdin to a spawned process |
+------------------+------------------------------------------------------------------+
| remove | Supports removing files, directories, and symlinks |
+------------------+------------------------------------------------------------------+
| rename | Supports renaming files, directories, and symlinks |
+------------------+------------------------------------------------------------------+
| search | Supports searching filesystem using queries |
+------------------+------------------------------------------------------------------+
| set_permissions | Supports setting permissions on a file, directory, or symlink |
+------------------+------------------------------------------------------------------+
| system_info | Supports retrieving system information |
+------------------+------------------------------------------------------------------+
| unwatch | Supports unwatching filesystem for changes |
+------------------+------------------------------------------------------------------+
| watch | Supports watching filesystem for changes |
+------------------+------------------------------------------------------------------+
"};
#[rstest]
#[test_log::test]
fn should_output_capabilities(ctx: DistantManagerCtx) {
ctx.cmd("capabilities")
.assert()
.success()
.stdout(EXPECTED_TABLE)
.stderr("");
}

@ -1,4 +1,3 @@
mod capabilities;
mod fs_copy; mod fs_copy;
mod fs_exists; mod fs_exists;
mod fs_make_dir; mod fs_make_dir;
@ -12,3 +11,4 @@ mod fs_watch;
mod fs_write; mod fs_write;
mod spawn; mod spawn;
mod system_info; mod system_info;
mod version;

@ -0,0 +1,34 @@
use distant_core::protocol::PROTOCOL_VERSION;
use rstest::*;
use crate::cli::fixtures::*;
use crate::cli::utils::TrimmedLinesMatchPredicate;
#[rstest]
#[test_log::test]
fn should_output_capabilities(ctx: DistantManagerCtx) {
// Because all of our crates have the same version, we can expect it to match
let package_name = "distant-core";
let package_version = env!("CARGO_PKG_VERSION");
let (major, minor, patch) = PROTOCOL_VERSION;
// Since our client and server are built the same, all capabilities should be listed with +
// and using 4 columns since we are not using a tty
let expected = indoc::formatdoc! {"
Client: distant {package_version} (Protocol {major}.{minor}.{patch})
Server: {package_name} {package_version} (Protocol {major}.{minor}.{patch})
Capabilities supported (+) or not (-):
+cancel_search +copy +dir_create +dir_read
+exists +file_append +file_append_text +file_read
+file_read_text +file_write +file_write_text +metadata
+proc_kill +proc_resize_pty +proc_spawn +proc_stdin
+remove +rename +search +set_permissions
+system_info +unwatch +version +watch
"};
ctx.cmd("version")
.assert()
.success()
.stdout(TrimmedLinesMatchPredicate::new(expected))
.stderr("");
}

@ -1,9 +1,12 @@
use predicates::prelude::*; use ::predicates::prelude::*;
mod predicates;
mod reader; mod reader;
pub use self::predicates::TrimmedLinesMatchPredicate;
pub use reader::ThreadedReader; pub use reader::ThreadedReader;
/// Produces a regex predicate using the given string /// Produces a regex predicate using the given string
pub fn regex_pred(s: &str) -> predicates::str::RegexPredicate { pub fn regex_pred(s: &str) -> ::predicates::str::RegexPredicate {
predicate::str::is_match(s).unwrap() predicate::str::is_match(s).unwrap()
} }

@ -0,0 +1,50 @@
use predicates::reflection::PredicateReflection;
use predicates::Predicate;
use std::fmt;
/// Checks if lines of text match the provided, trimming each line
/// of both before comparing.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct TrimmedLinesMatchPredicate {
pattern: String,
}
impl TrimmedLinesMatchPredicate {
pub fn new(pattern: impl Into<String>) -> Self {
Self {
pattern: pattern.into(),
}
}
}
impl fmt::Display for TrimmedLinesMatchPredicate {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "trimmed_lines expects {}", self.pattern)
}
}
impl Predicate<str> for TrimmedLinesMatchPredicate {
fn eval(&self, variable: &str) -> bool {
let mut expected = self.pattern.lines();
let mut actual = variable.lines();
// Fail if we don't have the same number of lines
// or of the trimmed result of lines don't match
//
// Otherwise if we finish processing all lines,
// we are a success
loop {
match (expected.next(), actual.next()) {
(Some(expected), Some(actual)) => {
if expected.trim() != actual.trim() {
return false;
}
}
(None, None) => return true,
_ => return false,
}
}
}
}
impl PredicateReflection for TrimmedLinesMatchPredicate {}
Loading…
Cancel
Save