diff --git a/docs/src/SUMMARY.md b/docs/src/SUMMARY.md index 659bf211e..3c289037d 100644 --- a/docs/src/SUMMARY.md +++ b/docs/src/SUMMARY.md @@ -55,6 +55,7 @@ # Experimental features - [bootc image](experimental-bootc-image.md) +- [--progress-fd](experimental-progress-fd.md) # More information diff --git a/docs/src/bootc-via-api.md b/docs/src/bootc-via-api.md index fdfdf7939..c2a5ea779 100644 --- a/docs/src/bootc-via-api.md +++ b/docs/src/bootc-via-api.md @@ -7,7 +7,7 @@ are stable and will not change. ## Using `bootc edit` and `bootc status --json` While bootc does not depend on Kubernetes, it does currently -also offere a Kubernetes *style* API, especially oriented +also offer a Kubernetes *style* API, especially oriented towards the [spec and status and other conventions](https://kubernetes.io/docs/reference/using-api/api-concepts/). In general, most use cases of driving bootc via API are probably diff --git a/docs/src/experimental-progress-fd.md b/docs/src/experimental-progress-fd.md new file mode 100644 index 000000000..86ac8d151 --- /dev/null +++ b/docs/src/experimental-progress-fd.md @@ -0,0 +1,32 @@ + +# Interactive progress with `--progress-fd` + +This is an experimental feature; tracking issue: + +While the `bootc status` tooling allows a client to discover the state +of the system, during interactive changes such as `bootc upgrade` +or `bootc switch` it is possible to monitor the status of downloads +or other operations at a fine-grained level with `-progress-fd`. + +The format of data output over `--progress-fd` is [JSON Lines](https://jsonlines.org) +which is a series of JSON objects separated by newlines (the intermediate +JSON content is guaranteed not to contain a literal newline). + +You can find the JSON schema describing this version here: +[progress-v0.schema.json](progress-v0.schema.json). + +Deploying a new image with either switch or upgrade consists +of three stages: `pulling`, `importing`, and `staging`. The `pulling` step +downloads the image from the registry, offering per-layer and progress in +each message. The `importing` step imports the image into storage and consists +of a single step. Finally, `staging` runs a variety of staging +tasks. Currently, they are staging the image to disk, pulling bound images, +and removing old images. + +Note that new stages or fields may be added at any time. + +Importing and staging are affected by disk speed and the total image size. Pulling +is affected by network speed and how many layers invalidate between pulls. +Therefore, a large image with a good caching strategy will have longer +importing and staging times, and a small bespoke container image will have +negligible importing and staging times. \ No newline at end of file diff --git a/docs/src/progress-v0.schema.json b/docs/src/progress-v0.schema.json new file mode 100644 index 000000000..d37473cf6 --- /dev/null +++ b/docs/src/progress-v0.schema.json @@ -0,0 +1,239 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Event", + "description": "An event emitted as JSON.", + "oneOf": [ + { + "type": "object", + "required": [ + "type", + "version" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "Start" + ] + }, + "version": { + "description": "The semantic version of the progress protocol.", + "type": "string" + } + } + }, + { + "description": "An incremental update to a container image layer download", + "type": "object", + "required": [ + "bytes", + "bytes_cached", + "bytes_total", + "description", + "id", + "steps", + "steps_cached", + "steps_total", + "subtasks", + "task", + "type" + ], + "properties": { + "bytes": { + "description": "The number of bytes already fetched.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "bytes_cached": { + "description": "The number of bytes fetched by a previous run.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "bytes_total": { + "description": "Total number of bytes. If zero, then this should be considered \"unspecified\".", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "description": { + "description": "A human readable description of the task if i18n is not available.", + "type": "string" + }, + "id": { + "description": "A human and machine readable unique identifier for the task (e.g., the image name). For tasks that only happen once, it can be set to the same value as task.", + "type": "string" + }, + "steps": { + "description": "The initial position of progress.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "steps_cached": { + "description": "The number of steps fetched by a previous run.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "steps_total": { + "description": "The total number of steps (e.g. container image layers, RPMs)", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "subtasks": { + "description": "The currently running subtasks.", + "type": "array", + "items": { + "$ref": "#/definitions/SubTaskBytes" + } + }, + "task": { + "description": "A machine readable type (e.g., pulling) for the task (used for i18n and UI customization).", + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "ProgressBytes" + ] + } + } + }, + { + "description": "An incremental update with discrete steps", + "type": "object", + "required": [ + "description", + "id", + "steps", + "steps_cached", + "steps_total", + "subtasks", + "task", + "type" + ], + "properties": { + "description": { + "description": "A human readable description of the task if i18n is not available.", + "type": "string" + }, + "id": { + "description": "A human and machine readable unique identifier for the task (e.g., the image name). For tasks that only happen once, it can be set to the same value as task.", + "type": "string" + }, + "steps": { + "description": "The initial position of progress.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "steps_cached": { + "description": "The number of steps fetched by a previous run.", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "steps_total": { + "description": "The total number of steps (e.g. container image layers, RPMs)", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "subtasks": { + "description": "The currently running subtasks.", + "type": "array", + "items": { + "$ref": "#/definitions/SubTaskStep" + } + }, + "task": { + "description": "A machine readable type (e.g., pulling) for the task (used for i18n and UI customization).", + "type": "string" + }, + "type": { + "type": "string", + "enum": [ + "ProgressSteps" + ] + } + } + } + ], + "definitions": { + "SubTaskBytes": { + "description": "An incremental update to e.g. a container image layer download. The first time a given \"subtask\" name is seen, a new progress bar should be created. If bytes == bytes_total, then the subtask is considered complete.", + "type": "object", + "required": [ + "bytes", + "bytesCached", + "bytesTotal", + "description", + "id", + "subtask" + ], + "properties": { + "bytes": { + "description": "Updated byte level progress", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "bytesCached": { + "description": "The number of bytes fetched by a previous run (e.g., zstd_chunked).", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "bytesTotal": { + "description": "Total number of bytes", + "type": "integer", + "format": "uint64", + "minimum": 0.0 + }, + "description": { + "description": "A human readable description of the task if i18n is not available. (e.g., \"OSTree Chunk:\", \"Derived Layer:\")", + "type": "string" + }, + "id": { + "description": "A human and machine readable identifier for the task (e.g., ostree chunk/layer hash).", + "type": "string" + }, + "subtask": { + "description": "A machine readable type for the task (used for i18n). (e.g., \"ostree_chunk\", \"ostree_derived\")", + "type": "string" + } + } + }, + "SubTaskStep": { + "description": "Marks the beginning and end of a dictrete step", + "type": "object", + "required": [ + "completed", + "description", + "id", + "subtask" + ], + "properties": { + "completed": { + "description": "Starts as false when beginning to execute and turns true when completed.", + "type": "boolean" + }, + "description": { + "description": "A human readable description of the task if i18n is not available. (e.g., \"OSTree Chunk:\", \"Derived Layer:\")", + "type": "string" + }, + "id": { + "description": "A human and machine readable identifier for the task (e.g., ostree chunk/layer hash).", + "type": "string" + }, + "subtask": { + "description": "A machine readable type for the task (used for i18n). (e.g., \"ostree_chunk\", \"ostree_derived\")", + "type": "string" + } + } + } + } +} \ No newline at end of file diff --git a/lib/src/cli.rs b/lib/src/cli.rs index b0b4a54e3..63bb9caf9 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -37,8 +37,8 @@ pub(crate) struct ProgressOptions { /// /// Interactive progress will be written to this file descriptor as "JSON lines" /// format, where each value is separated by a newline. - #[clap(long)] - pub(crate) json_fd: Option, + #[clap(long, hide = true)] + pub(crate) progress_fd: Option, } impl TryFrom for ProgressWriter { @@ -46,7 +46,7 @@ impl TryFrom for ProgressWriter { fn try_from(value: ProgressOptions) -> Result { let r = value - .json_fd + .progress_fd .map(TryInto::try_into) .transpose()? .unwrap_or_default(); @@ -359,6 +359,12 @@ pub(crate) enum ImageOpts { Cmd(ImageCmdOpts), } +#[derive(Debug, Clone, clap::ValueEnum, PartialEq, Eq)] +pub(crate) enum SchemaType { + Host, + Progress, +} + /// Hidden, internal only options #[derive(Debug, clap::Subcommand, PartialEq, Eq)] pub(crate) enum InternalsOpts { @@ -371,7 +377,10 @@ pub(crate) enum InternalsOpts { }, FixupEtcFstab, /// Should only be used by `make update-generated` - PrintJsonSchema, + PrintJsonSchema { + #[clap(long)] + of: SchemaType, + }, /// Perform cleanup actions Cleanup, /// Proxy frontend for the `ostree-ext` CLI. @@ -1090,8 +1099,11 @@ async fn run_from_opt(opt: Opt) -> Result<()> { .await } InternalsOpts::FixupEtcFstab => crate::deploy::fixup_etc_fstab(&root), - InternalsOpts::PrintJsonSchema => { - let schema = schema_for!(crate::spec::Host); + InternalsOpts::PrintJsonSchema { of } => { + let schema = match of { + SchemaType::Host => schema_for!(crate::spec::Host), + SchemaType::Progress => schema_for!(crate::progress_jsonl::Event), + }; let mut stdout = std::io::stdout().lock(); serde_json::to_writer_pretty(&mut stdout, &schema)?; Ok(()) diff --git a/lib/src/deploy.rs b/lib/src/deploy.rs index 88d44d9a4..5d932b8b0 100644 --- a/lib/src/deploy.rs +++ b/lib/src/deploy.rs @@ -21,7 +21,7 @@ use ostree_ext::ostree::{self, Sysroot}; use ostree_ext::sysroot::SysrootLock; use ostree_ext::tokio_util::spawn_blocking_cancellable_flatten; -use crate::progress_jsonl::{Event, ProgressWriter, SubTaskBytes, SubTaskStep, API_VERSION}; +use crate::progress_jsonl::{Event, ProgressWriter, SubTaskBytes, SubTaskStep}; use crate::spec::ImageReference; use crate::spec::{BootOrder, HostSpec}; use crate::status::labels_of_config; @@ -149,7 +149,7 @@ async fn handle_layer_progress_print( bytes_total: u64, prog: ProgressWriter, quiet: bool, -) { +) -> ProgressWriter { let start = std::time::Instant::now(); let mut total_read = 0u64; let bar = indicatif::MultiProgress::new(); @@ -214,7 +214,6 @@ async fn handle_layer_progress_print( subtask.bytes = layer_size; subtasks.push(subtask.clone()); prog.send(Event::ProgressBytes { - api_version: API_VERSION.into(), task: "pulling".into(), description: format!("Pulling Image: {digest}").into(), id: (*digest).into(), @@ -245,7 +244,6 @@ async fn handle_layer_progress_print( byte_bar.set_position(bytes.fetched); subtask.bytes = byte_bar.position(); prog.send_lossy(Event::ProgressBytes { - api_version: API_VERSION.into(), task: "pulling".into(), description: format!("Pulling Image: {digest}").into(), id: (*digest).into(), @@ -283,7 +281,6 @@ async fn handle_layer_progress_print( // use as a heuristic to begin import progress // Cannot be lossy or it is dropped prog.send(Event::ProgressSteps { - api_version: API_VERSION.into(), task: "importing".into(), description: "Importing Image".into(), id: (*digest).into(), @@ -299,6 +296,9 @@ async fn handle_layer_progress_print( .into(), }) .await; + + // Return the writer + prog } /// Wrapper for pulling a container image, wiring up status output. @@ -333,7 +333,6 @@ pub(crate) async fn pull( let bytes_to_fetch: u64 = layers_to_fetch.iter().map(|(l, _)| l.layer.size()).sum(); let bytes_total: u64 = prep.all_layers().map(|l| l.layer.size()).sum(); - let prog_print = prog.clone(); let digest = prep.manifest_digest.clone(); let digest_imp = prep.manifest_digest.clone(); let layer_progress = imp.request_progress(); @@ -347,16 +346,15 @@ pub(crate) async fn pull( layers_total, bytes_to_fetch, bytes_total, - prog_print, + prog, quiet, ) .await }); let import = imp.import(prep).await; - let _ = printer.await; + let prog = printer.await?; // Both the progress and the import are done, so import is done as well prog.send(Event::ProgressSteps { - api_version: API_VERSION.into(), task: "importing".into(), description: "Importing Image".into(), id: digest_imp.clone().as_ref().into(), @@ -571,7 +569,6 @@ pub(crate) async fn stage( }; let mut subtasks = vec![]; prog.send(Event::ProgressSteps { - api_version: API_VERSION.into(), task: "staging".into(), description: "Deploying Image".into(), id: image.manifest_digest.clone().as_ref().into(), @@ -594,7 +591,6 @@ pub(crate) async fn stage( subtask.description = "Deploying Image".into(); subtask.completed = false; prog.send(Event::ProgressSteps { - api_version: API_VERSION.into(), task: "staging".into(), description: "Deploying Image".into(), id: image.manifest_digest.clone().as_ref().into(), @@ -625,7 +621,6 @@ pub(crate) async fn stage( subtask.description = "Pulling Bound Images".into(); subtask.completed = false; prog.send(Event::ProgressSteps { - api_version: API_VERSION.into(), task: "staging".into(), description: "Deploying Image".into(), id: image.manifest_digest.clone().as_ref().into(), @@ -648,7 +643,6 @@ pub(crate) async fn stage( subtask.description = "Removing old images".into(); subtask.completed = false; prog.send(Event::ProgressSteps { - api_version: API_VERSION.into(), task: "staging".into(), description: "Deploying Image".into(), id: image.manifest_digest.clone().as_ref().into(), @@ -672,7 +666,6 @@ pub(crate) async fn stage( subtask.completed = true; subtasks.push(subtask.clone()); prog.send(Event::ProgressSteps { - api_version: API_VERSION.into(), task: "staging".into(), description: "Deploying Image".into(), id: image.manifest_digest.clone().as_ref().into(), diff --git a/lib/src/progress_jsonl.rs b/lib/src/progress_jsonl.rs index acee12847..b23406edf 100644 --- a/lib/src/progress_jsonl.rs +++ b/lib/src/progress_jsonl.rs @@ -2,6 +2,7 @@ //! see . use anyhow::Result; +use schemars::JsonSchema; use serde::Serialize; use std::borrow::Cow; use std::os::fd::{FromRawFd, OwnedFd, RawFd}; @@ -15,12 +16,15 @@ use tokio::sync::Mutex; // Maximum number of times per second that an event will be written. const REFRESH_HZ: u16 = 5; -pub const API_VERSION: &str = "org.containers.bootc.progress/v1"; +/// Semantic version of the protocol. +const API_VERSION: &str = "0.1.0"; /// An incremental update to e.g. a container image layer download. /// The first time a given "subtask" name is seen, a new progress bar should be created. /// If bytes == bytes_total, then the subtask is considered complete. -#[derive(Debug, serde::Serialize, serde::Deserialize, Default, Clone)] +#[derive( + Debug, serde::Serialize, serde::Deserialize, Default, Clone, JsonSchema, PartialEq, Eq, +)] #[serde(rename_all = "camelCase")] pub struct SubTaskBytes<'t> { /// A machine readable type for the task (used for i18n). @@ -44,7 +48,9 @@ pub struct SubTaskBytes<'t> { } /// Marks the beginning and end of a dictrete step -#[derive(Debug, serde::Serialize, serde::Deserialize, Default, Clone)] +#[derive( + Debug, serde::Serialize, serde::Deserialize, Default, Clone, JsonSchema, PartialEq, Eq, +)] #[serde(rename_all = "camelCase")] pub struct SubTaskStep<'t> { /// A machine readable type for the task (used for i18n). @@ -64,18 +70,20 @@ pub struct SubTaskStep<'t> { } /// An event emitted as JSON. -#[derive(Debug, serde::Serialize, serde::Deserialize)] +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, JsonSchema, PartialEq, Eq)] #[serde( tag = "type", rename_all = "PascalCase", rename_all_fields = "camelCase" )] pub enum Event<'t> { + Start { + /// The semantic version of the progress protocol. + #[serde(borrow)] + version: Cow<'t, str>, + }, /// An incremental update to a container image layer download ProgressBytes { - /// The version of the progress event format. - #[serde(borrow)] - api_version: Cow<'t, str>, /// A machine readable type (e.g., pulling) for the task (used for i18n /// and UI customization). #[serde(borrow)] @@ -105,9 +113,6 @@ pub enum Event<'t> { }, /// An incremental update with discrete steps ProgressSteps { - /// The version of the progress event format. - #[serde(borrow)] - api_version: Cow<'t, str>, /// A machine readable type (e.g., pulling) for the task (used for i18n /// and UI customization). #[serde(borrow)] @@ -149,6 +154,8 @@ impl FromStr for RawProgressFd { #[derive(Debug)] struct ProgressWriterInner { + /// true if we sent the initial Start message + sent_start: bool, last_write: Option, fd: BufWriter, } @@ -170,6 +177,7 @@ impl TryFrom for ProgressWriter { impl From for ProgressWriter { fn from(value: Sender) -> Self { let inner = ProgressWriterInner { + sent_start: false, last_write: None, fd: BufWriter::new(value), }; @@ -189,6 +197,18 @@ impl TryFrom for ProgressWriter { } impl ProgressWriter { + /// Serialize the target value as a single line of JSON and write it. + async fn send_impl_inner(inner: &mut ProgressWriterInner, v: T) -> Result<()> { + // serde is guaranteed not to output newlines here + let buf = serde_json::to_vec(&v)?; + inner.fd.write_all(&buf).await?; + // We always end in a newline + inner.fd.write_all(b"\n").await?; + // And flush to ensure the remote side sees updates immediately + inner.fd.flush().await?; + Ok(()) + } + /// Serialize the target object to JSON as a single line pub(crate) async fn send_impl(&self, v: T, required: bool) -> Result<()> { let mut guard = self.inner.lock().await; @@ -197,8 +217,17 @@ impl ProgressWriter { return Ok(()); }; + // If this is our first message, emit the Start message + if !inner.sent_start { + inner.sent_start = true; + let start = Event::Start { + version: API_VERSION.into(), + }; + Self::send_impl_inner(inner, &start).await?; + } + // For messages that can be dropped, if we already sent an update within this cycle, discard this one. - // TODO: Also consider querying the pipe buffer and also dropping if we can't do this write. + // TODO: Also consider querying the pipe buffer and also dropping if wqe can't do this write. let now = Instant::now(); if !required { const REFRESH_MS: u32 = 1000 / REFRESH_HZ as u32; @@ -209,22 +238,15 @@ impl ProgressWriter { } } - // SAFETY: Propagating panics from the mutex here is intentional - // serde is guaranteed not to output newlines here - let buf = serde_json::to_vec(&v)?; - inner.fd.write_all(&buf).await?; - // We always end in a newline - inner.fd.write_all(b"\n").await?; - // And flush to ensure the remote side sees updates immediately - inner.fd.flush().await?; + Self::send_impl_inner(inner, &v).await?; // Update the last write time inner.last_write = Some(now); Ok(()) } /// Send an event. - pub(crate) async fn send(&self, v: T) { - if let Err(e) = self.send_impl(v, true).await { + pub(crate) async fn send(&self, event: Event<'_>) { + if let Err(e) = self.send_impl(event, true).await { eprintln!("Failed to write to jsonl: {}", e); // Stop writing to fd but let process continue // SAFETY: Propagating panics from the mutex here is intentional @@ -233,8 +255,8 @@ impl ProgressWriter { } /// Send an event that can be dropped. - pub(crate) async fn send_lossy(&self, v: T) { - if let Err(e) = self.send_impl(v, false).await { + pub(crate) async fn send_lossy(&self, event: Event<'_>) { + if let Err(e) = self.send_impl(event, false).await { eprintln!("Failed to write to jsonl: {}", e); // Stop writing to fd but let process continue // SAFETY: Propagating panics from the mutex here is intentional @@ -271,18 +293,30 @@ mod test { #[tokio::test] async fn test_jsonl() -> Result<()> { let testvalues = [ - S { - s: "foo".into(), - v: 42, + Event::ProgressSteps { + task: "sometask".into(), + description: "somedesc".into(), + id: "someid".into(), + steps_cached: 0, + steps: 0, + steps_total: 3, + subtasks: Vec::new(), }, - S { - // Test with an embedded newline to sanity check that serde doesn't write it literally - s: "foo\nbar".into(), - v: 0, + Event::ProgressBytes { + task: "sometask".into(), + description: "somedesc".into(), + id: "someid".into(), + bytes_cached: 0, + bytes: 11, + bytes_total: 42, + steps_cached: 0, + steps: 0, + steps_total: 3, + subtasks: Vec::new(), }, ]; let (send, recv) = tokio::net::unix::pipe::pipe()?; - let testvalues_sender = &testvalues; + let testvalues_sender = testvalues.iter().cloned(); let sender = async move { let w = ProgressWriter::try_from(send)?; for value in testvalues_sender { @@ -295,10 +329,18 @@ mod test { let tf = BufReader::new(recv); let mut expected = testvalues.iter(); let mut lines = tf.lines(); + let mut got_first = false; while let Some(line) = lines.next_line().await? { - let found: S = serde_json::from_str(&line)?; - let expected = expected.next().unwrap(); - assert_eq!(&found, expected); + let found: Event = serde_json::from_str(&line)?; + let expected_value = if !got_first { + got_first = true; + &Event::Start { + version: API_VERSION.into(), + } + } else { + expected.next().unwrap() + }; + assert_eq!(&found, expected_value); } anyhow::Ok(()) }; diff --git a/tests/booted/test-image-pushpull-upgrade.nu b/tests/booted/test-image-pushpull-upgrade.nu index b6feeebfc..bb1b9941d 100644 --- a/tests/booted/test-image-pushpull-upgrade.nu +++ b/tests/booted/test-image-pushpull-upgrade.nu @@ -60,7 +60,7 @@ RUN echo test content > /usr/share/blah.txt try { systemctl kill test-cat-progress } systemd-run -u test-cat-progress -- /bin/bash -c $"exec cat ($progress_fifo) > ($progress_json)" # nushell doesn't do fd passing right now either, so run via bash - bash -c $"bootc switch --json-fd 3 --transport containers-storage localhost/bootc-derived 3>($progress_fifo)" + bash -c $"bootc switch --progress-fd 3 --transport containers-storage localhost/bootc-derived 3>($progress_fifo)" # Now, let's do some checking of the progress json let progress = open --raw $progress_json | from json -o sanity_check_switch_progress_json $progress @@ -75,10 +75,12 @@ RUN echo test content > /usr/share/blah.txt # This just does some basic verification of the progress JSON def sanity_check_switch_progress_json [data] { - let first = $data.0; let event_count = $data | length - assert equal $first.type ProgressBytes - let steps = $first.stepsTotal + # The first one should always be a start event + let first = $data.0; + assert equal $first.type Start + let second = $data.1 + let steps = $second.stepsTotal mut i = 0 for elt in $data { if $elt.type != "ProgressBytes" { diff --git a/xtask/src/xtask.rs b/xtask/src/xtask.rs index 0cb939019..9c6d8081a 100644 --- a/xtask/src/xtask.rs +++ b/xtask/src/xtask.rs @@ -147,10 +147,14 @@ fn update_generated(sh: &Shell) -> Result<()> { ) .run()?; } - let schema = cmd!(sh, "cargo run -q -- internals print-json-schema").read()?; - let target = "docs/src/host-v1.schema.json"; - std::fs::write(target, &schema)?; - println!("Updated {target}"); + for (of, target) in [ + ("host", "docs/src/host-v1.schema.json"), + ("progress", "docs/src/progress-v0.schema.json"), + ] { + let schema = cmd!(sh, "cargo run -q -- internals print-json-schema --of={of}").read()?; + std::fs::write(target, &schema)?; + println!("Updated {target}"); + } Ok(()) }