diff --git a/Cargo.lock b/Cargo.lock index 0ccc4b5e9..fc5586ee2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1862,6 +1862,7 @@ dependencies = [ "nom", "pool", "poule", + "prettytable-rs", "prost", "prost-build", "rand", diff --git a/bin/src/ctl/command.rs b/bin/src/ctl/command.rs index bf45c9178..87f3376ba 100644 --- a/bin/src/ctl/command.rs +++ b/bin/src/ctl/command.rs @@ -2,20 +2,11 @@ use anyhow::{self, bail, Context}; use prettytable::Table; use sozu_command_lib::proto::command::{ - request::RequestType, response_content::ContentType, ListWorkers, QueryCertificatesFilters, - QueryClusterByDomain, QueryClustersHashes, QueryMetricsOptions, Request, Response, - ResponseContent, ResponseStatus, RunState, UpgradeMain, + request::RequestType, response_content::ContentType, ListWorkers, QueryMetricsOptions, Request, + Response, ResponseContent, ResponseStatus, RunState, UpgradeMain, }; -use crate::ctl::{ - create_channel, - display::{ - print_available_metrics, print_certificates_by_worker, print_certificates_with_validity, - print_cluster_responses, print_frontend_list, print_json_response, print_listeners, - print_metrics, print_request_counts, print_status, - }, - CommandManager, -}; +use crate::ctl::{create_channel, CommandManager}; impl CommandManager { fn write_request_on_channel(&mut self, request: Request) -> anyhow::Result<()> { @@ -44,25 +35,7 @@ impl CommandManager { } ResponseStatus::Failure => bail!("Request failed: {}", response.message), ResponseStatus::Ok => { - println!("{}", response.message); - - if let Some(response_content) = response.content { - match response_content.content_type { - Some(ContentType::RequestCounts(request_counts)) => { - print_request_counts(&request_counts, self.json)?; - } - Some(ContentType::FrontendList(frontends)) => { - print_frontend_list(frontends, self.json)?; - } - Some(ContentType::Workers(worker_infos)) => { - print_status(worker_infos, self.json)?; - } - Some(ContentType::ListenersList(list)) => { - print_listeners(list, self.json)?; - } - _ => {} - } - } + response.display(self.json)?; break; } } @@ -225,28 +198,8 @@ impl CommandManager { ResponseStatus::Processing => { debug!("Proxy is processing: {}", response.message); } - ResponseStatus::Failure => { - if self.json { - return print_json_response(&response.message); - } else { - bail!("could not query proxy state: {}", response.message); - } - } - ResponseStatus::Ok => { - if let Some(response_content) = response.content { - match response_content.content_type { - Some(ContentType::Metrics(aggregated_metrics_data)) => { - print_metrics(aggregated_metrics_data, self.json)? - } - Some(ContentType::AvailableMetrics(available)) => { - print_available_metrics(&available, self.json)?; - } - _ => { - debug!("Wrong kind of response here"); - } - } - } - + ResponseStatus::Failure | ResponseStatus::Ok => { + response.display(self.json)?; break; } } @@ -266,169 +219,4 @@ impl CommandManager { Ok(()) } - - pub fn query_cluster( - &mut self, - cluster_id: Option, - domain: Option, - ) -> Result<(), anyhow::Error> { - if cluster_id.is_some() && domain.is_some() { - bail!("Error: Either request an cluster ID or a domain name"); - } - - let request = if let Some(ref cluster_id) = cluster_id { - RequestType::QueryClusterById(cluster_id.to_string()).into() - } else if let Some(ref domain) = domain { - let splitted: Vec = - domain.splitn(2, '/').map(|elem| elem.to_string()).collect(); - - if splitted.is_empty() { - bail!("Domain can't be empty"); - } - - let query_domain = QueryClusterByDomain { - hostname: splitted - .get(0) - .with_context(|| "Domain can't be empty")? - .clone(), - path: splitted.get(1).cloned().map(|path| format!("/{path}")), // We add the / again because of the splitn removing it - }; - - RequestType::QueryClustersByDomain(query_domain).into() - } else { - RequestType::QueryClustersHashes(QueryClustersHashes {}).into() - }; - - self.write_request_on_channel(request)?; - - loop { - let response = self.read_channel_message_with_timeout()?; - - match response.status() { - ResponseStatus::Processing => { - debug!("Proxy is processing: {}", response.message); - } - ResponseStatus::Failure => { - if self.json { - print_json_response(&response.message)?; - } - bail!("could not query proxy state: {}", response.message); - } - ResponseStatus::Ok => { - match response.content { - Some(ResponseContent { - content_type: Some(ContentType::WorkerResponses(worker_responses)), - }) => print_cluster_responses( - cluster_id, - domain, - worker_responses, - self.json, - )?, - _ => bail!("Wrong response content"), - } - break; - } - } - } - - Ok(()) - } - - pub fn query_certificates( - &mut self, - fingerprint: Option, - domain: Option, - query_workers: bool, - ) -> Result<(), anyhow::Error> { - let filters = QueryCertificatesFilters { - domain, - fingerprint, - }; - - if query_workers { - self.query_certificates_from_workers(filters) - } else { - self.query_certificates_from_the_state(filters) - } - } - - fn query_certificates_from_workers( - &mut self, - filters: QueryCertificatesFilters, - ) -> Result<(), anyhow::Error> { - self.write_request_on_channel(RequestType::QueryCertificatesFromWorkers(filters).into())?; - - loop { - let response = self.read_channel_message_with_timeout()?; - - match response.status() { - ResponseStatus::Processing => { - debug!("Proxy is processing: {}", response.message); - } - ResponseStatus::Failure => { - if self.json { - print_json_response(&response.message)?; - } - bail!("could not get certificate: {}", response.message); - } - ResponseStatus::Ok => { - info!("We did get a response from the proxy"); - match response.content { - Some(ResponseContent { - content_type: Some(ContentType::WorkerResponses(worker_responses)), - }) => print_certificates_by_worker(worker_responses.map, self.json)?, - _ => bail!("unexpected response: {:?}", response.content), - } - break; - } - } - } - Ok(()) - } - - fn query_certificates_from_the_state( - &mut self, - filters: QueryCertificatesFilters, - ) -> anyhow::Result<()> { - self.write_request_on_channel(RequestType::QueryCertificatesFromTheState(filters).into())?; - - loop { - let response = self.read_channel_message_with_timeout()?; - - match response.status() { - ResponseStatus::Processing => { - debug!("Proxy is processing: {}", response.message); - } - ResponseStatus::Failure => { - bail!("could not get certificate: {}", response.message); - } - ResponseStatus::Ok => { - debug!("We did get a response from the proxy"); - trace!("response message: {:?}\n", response.message); - - if let Some(response_content) = response.content { - let certs = match response_content.content_type { - Some(ContentType::CertificatesWithFingerprints(certs)) => certs.certs, - _ => bail!(format!("Wrong response content {:?}", response_content)), - }; - if certs.is_empty() { - bail!("No certificates match your request."); - } - - if self.json { - print_json_response(&certs)?; - } else { - print_certificates_with_validity(certs) - .with_context(|| "Could not show certificate")?; - } - } else { - debug!("No response content."); - } - - break; - } - } - } - Ok(()) - } } diff --git a/bin/src/ctl/display.rs b/bin/src/ctl/display.rs deleted file mode 100644 index dea9dcad5..000000000 --- a/bin/src/ctl/display.rs +++ /dev/null @@ -1,789 +0,0 @@ -use std::collections::{BTreeMap, HashMap, HashSet}; - -use anyhow::{self, Context}; -use prettytable::{Row, Table}; -use time::format_description; -use x509_parser::time::ASN1Time; - -use sozu_command_lib::proto::{ - command::{ - filtered_metrics, response_content::ContentType, AggregatedMetrics, AvailableMetrics, - CertificateAndKey, CertificatesWithFingerprints, ClusterMetrics, FilteredMetrics, - ListedFrontends, ListenersList, RequestCounts, ResponseContent, WorkerInfos, WorkerMetrics, - WorkerResponses, - }, - display::concatenate_vector, -}; - -pub fn print_listeners(listeners_list: ListenersList, json: bool) -> anyhow::Result<()> { - if json { - return print_json_response(&listeners_list); - } - println!("\nHTTP LISTENERS\n================"); - - for (_, http_listener) in listeners_list.http_listeners.iter() { - let mut table = Table::new(); - table.set_format(*prettytable::format::consts::FORMAT_BOX_CHARS); - table.add_row(row![ - "socket address", - format!("{:?}", http_listener.address) - ]); - table.add_row(row![ - "public address", - format!("{:?}", http_listener.public_address), - ]); - table.add_row(row!["404", http_listener.answer_404]); - table.add_row(row!["503", http_listener.answer_503]); - table.add_row(row!["expect proxy", http_listener.expect_proxy]); - table.add_row(row!["sticky name", http_listener.sticky_name]); - table.add_row(row!["front timeout", http_listener.front_timeout]); - table.add_row(row!["back timeout", http_listener.back_timeout]); - table.add_row(row!["connect timeout", http_listener.connect_timeout]); - table.add_row(row!["request timeout", http_listener.request_timeout]); - table.add_row(row!["activated", http_listener.active]); - table.printstd(); - } - - println!("\nHTTPS LISTENERS\n================"); - - for (_, https_listener) in listeners_list.https_listeners.iter() { - let mut table = Table::new(); - table.set_format(*prettytable::format::consts::FORMAT_BOX_CHARS); - let mut tls_versions = String::new(); - for tls_version in https_listener.versions.iter() { - tls_versions.push_str(&format!("{tls_version:?}\n")); - } - - table.add_row(row![ - "socket address", - format!("{:?}", https_listener.address) - ]); - table.add_row(row![ - "public address", - format!("{:?}", https_listener.public_address) - ]); - table.add_row(row!["404", https_listener.answer_404,]); - table.add_row(row!["503", https_listener.answer_503,]); - table.add_row(row!["versions", tls_versions]); - table.add_row(row![ - "cipher list", - list_string_vec(&https_listener.cipher_list), - ]); - table.add_row(row![ - "cipher suites", - list_string_vec(&https_listener.cipher_suites), - ]); - table.add_row(row![ - "signature algorithms", - list_string_vec(&https_listener.signature_algorithms), - ]); - table.add_row(row![ - "groups list", - list_string_vec(&https_listener.groups_list), - ]); - table.add_row(row!["key", format!("{:?}", https_listener.key),]); - table.add_row(row!["expect proxy", https_listener.expect_proxy,]); - table.add_row(row!["sticky name", https_listener.sticky_name,]); - table.add_row(row!["front timeout", https_listener.front_timeout,]); - table.add_row(row!["back timeout", https_listener.back_timeout,]); - table.add_row(row!["connect timeout", https_listener.connect_timeout,]); - table.add_row(row!["request timeout", https_listener.request_timeout,]); - table.add_row(row!["activated", https_listener.active]); - table.printstd(); - } - - println!("\nTCP LISTENERS\n================"); - - if !listeners_list.tcp_listeners.is_empty() { - let mut table = Table::new(); - table.set_format(*prettytable::format::consts::FORMAT_BOX_CHARS); - table.add_row(row!["TCP frontends"]); - table.add_row(row![ - "socket address", - "public address", - "expect proxy", - "front timeout", - "back timeout", - "connect timeout", - "activated" - ]); - for (_, tcp_listener) in listeners_list.tcp_listeners.iter() { - table.add_row(row![ - format!("{:?}", tcp_listener.address), - format!("{:?}", tcp_listener.public_address), - tcp_listener.expect_proxy, - tcp_listener.front_timeout, - tcp_listener.back_timeout, - tcp_listener.connect_timeout, - tcp_listener.active, - ]); - } - table.printstd(); - } - Ok(()) -} - -pub fn print_status(worker_infos: WorkerInfos, json: bool) -> anyhow::Result<()> { - if json { - return print_json_response(&worker_infos); - } - let mut table = Table::new(); - table.set_format(*prettytable::format::consts::FORMAT_BOX_CHARS); - table.add_row(row!["worker id", "pid", "run state"]); - - for worker_info in worker_infos.vec { - let row = row!(worker_info.id, worker_info.pid, worker_info.run_state); - table.add_row(row); - } - - table.printstd(); - Ok(()) -} - -pub fn print_frontend_list(frontends: ListedFrontends, json: bool) -> anyhow::Result<()> { - if json { - return print_json_response(&frontends); - } - trace!(" We received this frontends to display {:#?}", frontends); - // HTTP frontends - if !frontends.http_frontends.is_empty() { - let mut table = Table::new(); - table.set_format(*prettytable::format::consts::FORMAT_BOX_CHARS); - table.add_row(row!["HTTP frontends "]); - table.add_row(row![ - "cluster_id", - "address", - "hostname", - "path", - "method", - "position", - "tags" - ]); - for http_frontend in frontends.http_frontends.iter() { - table.add_row(row!( - http_frontend - .cluster_id - .clone() - .unwrap_or("Deny".to_owned()), - http_frontend.address.to_string(), - http_frontend.hostname.to_string(), - format!("{:?}", http_frontend.path), - format!("{:?}", http_frontend.method), - format!("{:?}", http_frontend.position), - format_tags_to_string(&http_frontend.tags) - )); - } - table.printstd(); - } - - // HTTPS frontends - if !frontends.https_frontends.is_empty() { - let mut table = Table::new(); - table.set_format(*prettytable::format::consts::FORMAT_BOX_CHARS); - table.add_row(row!["HTTPS frontends"]); - table.add_row(row![ - "cluster_id", - "address", - "hostname", - "path", - "method", - "position", - "tags" - ]); - for https_frontend in frontends.https_frontends.iter() { - table.add_row(row!( - https_frontend - .cluster_id - .clone() - .unwrap_or("Deny".to_owned()), - https_frontend.address.to_string(), - https_frontend.hostname.to_string(), - format!("{:?}", https_frontend.path), - format!("{:?}", https_frontend.method), - format!("{:?}", https_frontend.position), - format_tags_to_string(&https_frontend.tags) - )); - } - table.printstd(); - } - - // TCP frontends - if !frontends.tcp_frontends.is_empty() { - let mut table = Table::new(); - table.set_format(*prettytable::format::consts::FORMAT_BOX_CHARS); - table.add_row(row!["TCP frontends "]); - table.add_row(row!["Cluster ID", "address", "tags"]); - for tcp_frontend in frontends.tcp_frontends.iter() { - table.add_row(row!( - tcp_frontend.cluster_id, - tcp_frontend.address, - format_tags_to_string(&tcp_frontend.tags) - )); - } - table.printstd(); - } - Ok(()) -} - -pub fn print_metrics( - // main & worker metrics - aggregated_metrics: AggregatedMetrics, - json: bool, -) -> anyhow::Result<()> { - if json { - debug!("Here are the metrics, per worker"); - return print_json_response(&aggregated_metrics); - } - - // main process metrics - println!("\nMAIN PROCESS\n============"); - print_proxy_metrics(&aggregated_metrics.main); - - // workers - for (worker_id, worker_metrics) in aggregated_metrics.workers.iter() { - println!("\nWorker {worker_id}\n========="); - print_worker_metrics(worker_metrics)?; - } - Ok(()) -} - -fn print_worker_metrics(worker_metrics: &WorkerMetrics) -> anyhow::Result<()> { - print_proxy_metrics(&worker_metrics.proxy); - print_cluster_metrics(&worker_metrics.clusters); - - Ok(()) -} - -fn print_proxy_metrics(proxy_metrics: &BTreeMap) { - let filtered = filter_metrics(proxy_metrics); - print_gauges_and_counts(&filtered); - print_percentiles(&filtered); -} - -fn print_cluster_metrics(cluster_metrics: &BTreeMap) { - for (cluster_id, cluster_metrics_data) in cluster_metrics.iter() { - println!("\nCluster {cluster_id}\n--------"); - - let filtered = filter_metrics(&cluster_metrics_data.cluster); - print_gauges_and_counts(&filtered); - print_percentiles(&filtered); - - for backend_metrics in cluster_metrics_data.backends.iter() { - println!("\n{cluster_id}/{}\n--------", backend_metrics.backend_id); - let filtered = filter_metrics(&backend_metrics.metrics); - print_gauges_and_counts(&filtered); - print_percentiles(&filtered); - } - } -} - -fn filter_metrics( - metrics: &BTreeMap, -) -> BTreeMap { - let mut filtered_metrics = BTreeMap::new(); - - for (metric_key, filtered_value) in metrics.iter() { - filtered_metrics.insert( - metric_key.replace('\t', ".").to_string(), - filtered_value.clone(), - ); - } - filtered_metrics -} - -fn print_gauges_and_counts(filtered_metrics: &BTreeMap) { - let mut titles: Vec = filtered_metrics - .iter() - .filter_map(|(title, filtered_data)| match filtered_data.inner { - Some(filtered_metrics::Inner::Count(_)) | Some(filtered_metrics::Inner::Gauge(_)) => { - Some(title.to_owned()) - } - _ => None, - }) - .collect(); - - // sort the titles so they always appear in the same order - titles.sort(); - - if titles.is_empty() { - return; - } - - let mut table = Table::new(); - table.set_format(*prettytable::format::consts::FORMAT_BOX_CHARS); - - table.set_titles(Row::new(vec![cell!(""), cell!("gauge"), cell!("count")])); - - for title in titles { - let mut row = vec![cell!(title)]; - match filtered_metrics.get(&title) { - Some(filtered_metrics) => match filtered_metrics.inner { - Some(filtered_metrics::Inner::Count(c)) => { - row.push(cell!("")); - row.push(cell!(c)) - } - Some(filtered_metrics::Inner::Gauge(c)) => { - row.push(cell!(c)); - row.push(cell!("")) - } - _ => {} - }, - _ => row.push(cell!("")), - } - table.add_row(Row::new(row)); - } - - table.printstd(); -} - -fn print_percentiles(filtered_metrics: &BTreeMap) { - let mut percentile_titles: Vec = filtered_metrics - .iter() - .filter_map(|(title, filtered_data)| match filtered_data.inner.clone() { - Some(filtered_metrics::Inner::Percentiles(_)) => Some(title.to_owned()), - _ => None, - }) - .collect(); - - // sort the metrics so they always appear in the same order - percentile_titles.sort(); - - if percentile_titles.is_empty() { - return; - } - - let mut percentile_table = Table::new(); - percentile_table.set_format(*prettytable::format::consts::FORMAT_BOX_CHARS); - - percentile_table.set_titles(Row::new(vec![ - cell!("Percentiles"), - cell!("samples"), - cell!("p50"), - cell!("p90"), - cell!("p99"), - cell!("p99.9"), - cell!("p99.99"), - cell!("p99.999"), - cell!("p100"), - ])); - - for title in percentile_titles { - if let Some(FilteredMetrics { - inner: Some(filtered_metrics::Inner::Percentiles(percentiles)), - }) = filtered_metrics.get(&title) - { - percentile_table.add_row(Row::new(vec![ - cell!(title), - cell!(percentiles.samples), - cell!(percentiles.p_50), - cell!(percentiles.p_90), - cell!(percentiles.p_99), - cell!(percentiles.p_99_9), - cell!(percentiles.p_99_99), - cell!(percentiles.p_99_999), - cell!(percentiles.p_100), - ])); - } else { - println!("Something went VERY wrong here"); - } - } - - percentile_table.printstd(); -} - -pub fn print_json_response(input: &T) -> Result<(), anyhow::Error> { - println!( - "{}", - serde_json::to_string_pretty(&input).context("Error while parsing response to JSON")? - ); - Ok(()) -} - -/// Creates an empty table of the form -/// ```text -/// ┌────────────┬─────────────┬───────────┬────────┐ -/// │ │ header │ header │ header │ -/// ├────────────┼─────────────┼───────────┼────────┤ -/// │ cluster_id │ │ │ │ -/// ├────────────┼─────────────┼───────────┼────────┤ -/// │ cluster_id │ │ │ │ -/// ├────────────┼─────────────┼───────────┼────────┤ -/// │ cluster_id │ │ │ │ -/// └────────────┴─────────────┴───────────┴────────┘ -/// ``` -pub fn create_cluster_table(headers: Vec<&str>, data: &BTreeMap) -> Table { - let mut table = Table::new(); - table.set_format(*prettytable::format::consts::FORMAT_BOX_CHARS); - let mut row_header: Vec<_> = headers.iter().map(|h| cell!(h)).collect(); - for ref key in data.keys() { - row_header.push(cell!(&key)); - } - table.add_row(Row::new(row_header)); - table -} - -pub fn print_cluster_responses( - cluster_id: Option, - domain: Option, - worker_responses: WorkerResponses, - json: bool, -) -> anyhow::Result<()> { - if json { - return print_json_response(&worker_responses); - } - - if let Some(needle) = cluster_id.or(domain) { - let mut cluster_table = create_cluster_table( - vec!["id", "sticky_session", "https_redirect"], - &worker_responses.map, - ); - - let mut frontend_table = - create_cluster_table(vec!["id", "hostname", "path"], &worker_responses.map); - - let mut https_frontend_table = - create_cluster_table(vec!["id", "hostname", "path"], &worker_responses.map); - - let mut tcp_frontend_table = - create_cluster_table(vec!["id", "address"], &worker_responses.map); - - let mut backend_table = create_cluster_table( - vec!["backend id", "IP address", "Backup"], - &worker_responses.map, - ); - - let worker_ids: HashSet<&String> = worker_responses.map.keys().collect(); - - let mut cluster_infos = HashMap::new(); - let mut http_frontends = HashMap::new(); - let mut https_frontends = HashMap::new(); - let mut tcp_frontends = HashMap::new(); - let mut backends = HashMap::new(); - - for (worker_id, response_content) in worker_responses.map.iter() { - if let Some(ContentType::Clusters(clusters)) = &response_content.content_type { - for cluster in clusters.vec.iter() { - if cluster.configuration.is_some() { - let entry = cluster_infos.entry(cluster).or_insert(Vec::new()); - entry.push(worker_id.to_owned()); - } - - for frontend in cluster.http_frontends.iter() { - let entry = http_frontends.entry(frontend).or_insert(Vec::new()); - entry.push(worker_id.to_owned()); - } - - for frontend in cluster.https_frontends.iter() { - let entry = https_frontends.entry(frontend).or_insert(Vec::new()); - entry.push(worker_id.to_owned()); - } - - for frontend in cluster.tcp_frontends.iter() { - let entry = tcp_frontends.entry(frontend).or_insert(Vec::new()); - entry.push(worker_id.to_owned()); - } - - for backend in cluster.backends.iter() { - let entry = backends.entry(backend).or_insert(Vec::new()); - entry.push(worker_id.to_owned()); - } - } - } - } - - println!("Cluster level configuration for {needle}:\n"); - - for (cluster_info, workers_the_cluster_is_present_on) in cluster_infos.iter() { - let mut row = Vec::new(); - row.push(cell!(cluster_info - .configuration - .as_ref() - .map(|conf| conf.cluster_id.to_owned()) - .unwrap_or_else(|| String::from("None")))); - row.push(cell!(cluster_info - .configuration - .as_ref() - .map(|conf| conf.sticky_session) - .unwrap_or_else(|| false))); - row.push(cell!(cluster_info - .configuration - .as_ref() - .map(|conf| conf.https_redirect) - .unwrap_or_else(|| false))); - - for worker in workers_the_cluster_is_present_on { - if worker_ids.contains(worker) { - row.push(cell!("X")); - } else { - row.push(cell!("")); - } - } - - cluster_table.add_row(Row::new(row)); - } - - cluster_table.printstd(); - - println!("\nHTTP frontends configuration for {needle}:\n"); - - for (key, values) in http_frontends.iter() { - let mut row = Vec::new(); - match &key.cluster_id { - Some(cluster_id) => row.push(cell!(cluster_id)), - None => row.push(cell!("-")), - } - row.push(cell!(key.hostname)); - row.push(cell!(key.path)); - - for val in values.iter() { - if worker_ids.contains(val) { - row.push(cell!("X")); - } else { - row.push(cell!("")); - } - } - - frontend_table.add_row(Row::new(row)); - } - - frontend_table.printstd(); - - println!("\nHTTPS frontends configuration for {needle}:\n"); - - for (key, values) in https_frontends.iter() { - let mut row = Vec::new(); - match &key.cluster_id { - Some(cluster_id) => row.push(cell!(cluster_id)), - None => row.push(cell!("-")), - } - row.push(cell!(key.hostname)); - row.push(cell!(key.path)); - - for val in values.iter() { - if worker_ids.contains(val) { - row.push(cell!("X")); - } else { - row.push(cell!("")); - } - } - - https_frontend_table.add_row(Row::new(row)); - } - - https_frontend_table.printstd(); - - println!("\nTCP frontends configuration for {needle}:\n"); - - for (key, values) in tcp_frontends.iter() { - let mut row = vec![cell!(key.cluster_id), cell!(format!("{}", key.address))]; - - for val in values.iter() { - if worker_ids.contains(val) { - row.push(cell!(String::from("X"))); - } else { - row.push(cell!(String::from(""))); - } - } - - tcp_frontend_table.add_row(Row::new(row)); - } - - tcp_frontend_table.printstd(); - - println!("\nbackends configuration for {needle}:\n"); - - for (key, values) in backends.iter() { - let mut row = vec![ - cell!(key.backend_id), - cell!(format!("{}", key.address)), - cell!(key - .backup - .map(|b| if b { "X" } else { "" }) - .unwrap_or_else(|| "")), - ]; - - for val in values { - if worker_ids.contains(&val) { - row.push(cell!("X")); - } else { - row.push(cell!("")); - } - } - - backend_table.add_row(Row::new(row)); - } - - backend_table.printstd(); - - return Ok(()); - } - - // display all clusters in a simplified table showing their hashes - let mut clusters_table = Table::new(); - clusters_table.set_format(*prettytable::format::consts::FORMAT_BOX_CHARS); - let mut header = vec![cell!("cluster id")]; - for worker_id in worker_responses.map.keys() { - header.push(cell!(format!("worker {}", worker_id))); - } - header.push(cell!("desynchronized")); - clusters_table.add_row(Row::new(header)); - - let mut cluster_hashes = HashMap::new(); - - for response_content in worker_responses.map.values() { - if let Some(ContentType::ClusterHashes(hashes)) = &response_content.content_type { - for (cluster_id, hash) in hashes.map.iter() { - cluster_hashes - .entry(cluster_id) - .or_insert(Vec::new()) - .push(hash); - } - } - } - - for (cluster_id, hashes) in cluster_hashes.iter() { - let mut row = vec![cell!(cluster_id)]; - for val in hashes.iter() { - row.push(cell!(format!("{val}"))); - } - - let hs: HashSet<&u64> = hashes.iter().cloned().collect(); - if hs.len() > 1 { - row.push(cell!("X")); - } else { - row.push(cell!("")); - } - - clusters_table.add_row(Row::new(row)); - } - - clusters_table.printstd(); - Ok(()) -} - -pub fn print_certificates_by_worker( - response_contents: BTreeMap, - json: bool, -) -> anyhow::Result<()> { - if json { - return print_json_response(&response_contents); - } - - for (worker_id, response_content) in response_contents.iter() { - println!("Worker {}", worker_id); - match &response_content.content_type { - Some(ContentType::CertificatesByAddress(list)) => { - for certs in list.certificates.iter() { - println!("\t{}:", certs.address); - - for summary in certs.certificate_summaries.iter() { - println!("\t\t{}", summary); - } - - println!(); - } - } - Some(ContentType::CertificatesWithFingerprints(CertificatesWithFingerprints { - certs, - })) => print_certificates_with_validity(certs.clone())?, - - _ => {} - } - println!(); - } - Ok(()) -} - -fn format_tags_to_string(tags: &BTreeMap) -> String { - tags.iter() - .map(|(k, v)| format!("{k}={v}")) - .collect::>() - .join(", ") -} - -pub fn print_available_metrics( - available_metrics: &AvailableMetrics, - json: bool, -) -> anyhow::Result<()> { - if json { - return print_json_response(&available_metrics); - } - println!("Available metrics on the proxy level:"); - for metric_name in &available_metrics.proxy_metrics { - println!("\t{metric_name}"); - } - println!("Available metrics on the cluster level:"); - for metric_name in &available_metrics.cluster_metrics { - println!("\t{metric_name}"); - } - Ok(()) -} - -fn list_string_vec(vec: &[String]) -> String { - let mut output = String::new(); - for item in vec.iter() { - output.push_str(item); - output.push('\n'); - } - output -} - -pub fn print_certificates_with_validity( - certs: BTreeMap, -) -> anyhow::Result<()> { - let mut table = Table::new(); - table.set_format(*prettytable::format::consts::FORMAT_CLEAN); - table.add_row(row![ - "fingeprint", - "valid not before", - "valide not after", - "domain names", - ]); - - for (fingerprint, cert) in certs { - let (_unparsed, pem_certificate) = - x509_parser::pem::parse_x509_pem(cert.certificate.as_bytes()) - .with_context(|| "Could not parse pem certificate")?; - - let x509_certificate = pem_certificate - .parse_x509() - .with_context(|| "Could not parse x509 certificate")?; - - let validity = x509_certificate.validity(); - - table.add_row(row!( - fingerprint, - format_datetime(validity.not_before)?, - format_datetime(validity.not_after)?, - concatenate_vector(&cert.names), - )); - } - table.printstd(); - - Ok(()) -} - -pub fn print_request_counts(request_counts: &RequestCounts, json: bool) -> anyhow::Result<()> { - if json { - return print_json_response(&request_counts); - } - let mut table = Table::new(); - table.set_format(*prettytable::format::consts::FORMAT_BOX_CHARS); - table.add_row(row!["request type", "count"]); - - for (request_type, count) in &request_counts.map { - table.add_row(row!(request_type, count)); - } - table.printstd(); - Ok(()) -} - -// ISO 8601 -fn format_datetime(asn1_time: ASN1Time) -> anyhow::Result { - let datetime = asn1_time.to_datetime(); - - let formatted = datetime - .format(&format_description::well_known::Iso8601::DEFAULT) - .with_context(|| "Could not format the datetime to ISO 8601")?; - Ok(formatted) -} diff --git a/bin/src/ctl/mod.rs b/bin/src/ctl/mod.rs index bab0d62a9..02ed0f429 100644 --- a/bin/src/ctl/mod.rs +++ b/bin/src/ctl/mod.rs @@ -13,8 +13,6 @@ use crate::{ }; mod command; -/// TODO: just create a display() method on sozu_command_lib::Response and put everything in there -mod display; mod request_builder; pub struct CommandManager { diff --git a/bin/src/ctl/request_builder.rs b/bin/src/ctl/request_builder.rs index 8cef5cc17..1d8013ccf 100644 --- a/bin/src/ctl/request_builder.rs +++ b/bin/src/ctl/request_builder.rs @@ -3,13 +3,16 @@ use std::collections::BTreeMap; use anyhow::{bail, Context}; use sozu_command_lib::{ - certificate::{calculate_fingerprint, split_certificate_chain, Fingerprint}, - config::{Config, ListenerBuilder}, + certificate::{ + decode_fingerprint, get_fingerprint_from_certificate_path, load_full_certificate, + }, + config::ListenerBuilder, proto::command::{ - request::RequestType, ActivateListener, AddBackend, AddCertificate, CertificateAndKey, - Cluster, CountRequests, DeactivateListener, FrontendFilters, HardStop, ListListeners, - ListenerType, LoadBalancingParams, MetricsConfiguration, PathRule, ProxyProtocolConfig, - RemoveBackend, RemoveCertificate, RemoveListener, ReplaceCertificate, RequestHttpFrontend, + request::RequestType, ActivateListener, AddBackend, AddCertificate, Cluster, CountRequests, + DeactivateListener, FrontendFilters, HardStop, ListListeners, ListenerType, + LoadBalancingParams, MetricsConfiguration, PathRule, ProxyProtocolConfig, + QueryCertificatesFilters, QueryClusterByDomain, QueryClustersHashes, RemoveBackend, + RemoveCertificate, RemoveListener, ReplaceCertificate, RequestHttpFrontend, RequestTcpFrontend, RulePosition, SoftStop, Status, SubscribeEvents, TlsVersion, }, }; @@ -551,51 +554,57 @@ impl CommandManager { .into(), ) } -} -fn get_fingerprint_from_certificate_path(certificate_path: &str) -> anyhow::Result { - let bytes = Config::load_file_bytes(certificate_path) - .with_context(|| format!("could not load certificate file on path {certificate_path}"))?; + pub fn query_certificates( + &mut self, + fingerprint: Option, + domain: Option, + query_workers: bool, + ) -> Result<(), anyhow::Error> { + let filters = QueryCertificatesFilters { + domain, + fingerprint, + }; - let parsed_bytes = calculate_fingerprint(&bytes).with_context(|| { - format!("could not calculate fingerprint for the certificate at {certificate_path}") - })?; + if query_workers { + self.send_request(RequestType::QueryCertificatesFromWorkers(filters).into()) + } else { + self.send_request(RequestType::QueryCertificatesFromTheState(filters).into()) + } + } - Ok(Fingerprint(parsed_bytes)) -} + pub fn query_cluster( + &mut self, + cluster_id: Option, + domain: Option, + ) -> Result<(), anyhow::Error> { + if cluster_id.is_some() && domain.is_some() { + bail!("Error: Either request an cluster ID or a domain name"); + } -fn decode_fingerprint(fingerprint: &str) -> anyhow::Result { - let bytes = hex::decode(fingerprint) - .with_context(|| "Failed at decoding the string (expected hexadecimal data)")?; - Ok(Fingerprint(bytes)) -} + let request = if let Some(ref cluster_id) = cluster_id { + RequestType::QueryClusterById(cluster_id.to_string()).into() + } else if let Some(ref domain) = domain { + let splitted: Vec = + domain.splitn(2, '/').map(|elem| elem.to_string()).collect(); + + if splitted.is_empty() { + bail!("Domain can't be empty"); + } + + let query_domain = QueryClusterByDomain { + hostname: splitted + .get(0) + .with_context(|| "Domain can't be empty")? + .clone(), + path: splitted.get(1).cloned().map(|path| format!("/{path}")), // We add the / again because of the splitn removing it + }; + + RequestType::QueryClustersByDomain(query_domain).into() + } else { + RequestType::QueryClustersHashes(QueryClustersHashes {}).into() + }; -fn load_full_certificate( - certificate_path: &str, - certificate_chain_path: &str, - key_path: &str, - versions: Vec, - names: Vec, -) -> Result { - let certificate = Config::load_file(certificate_path) - .with_context(|| format!("Could not load certificate file on path {certificate_path}"))?; - - let certificate_chain = Config::load_file(certificate_chain_path) - .map(split_certificate_chain) - .with_context(|| { - format!("could not load certificate chain on path: {certificate_chain_path}") - })?; - - let key = Config::load_file(key_path) - .with_context(|| format!("Could not load key file on path {key_path}"))?; - - let versions = versions.iter().map(|v| *v as i32).collect(); - - Ok(CertificateAndKey { - certificate, - certificate_chain, - key, - versions, - names, - }) + self.send_request(request) + } } diff --git a/command/Cargo.toml b/command/Cargo.toml index c7f25cb4c..62de3cf90 100644 --- a/command/Cargo.toml +++ b/command/Cargo.toml @@ -42,6 +42,7 @@ serde = { version = "^1.0.188", features = ["derive"] } serde_json = "^1.0.107" sha2 = "^0.10.8" trailer = "^0.1.2" +prettytable-rs = { version = "^0.10.0", default-features = false } pool = "^0.1.4" poule = "^0.3.2" thiserror = "^1.0.49" diff --git a/command/src/certificate.rs b/command/src/certificate.rs index b9c33a1f5..406ff0aab 100644 --- a/command/src/certificate.rs +++ b/command/src/certificate.rs @@ -1,6 +1,6 @@ use std::{collections::HashSet, fmt, str::FromStr}; -use hex::FromHex; +use hex::{FromHex, FromHexError}; use serde::de::{self, Visitor}; use sha2::{Digest, Sha256}; use x509_parser::{ @@ -8,17 +8,24 @@ use x509_parser::{ pem::{parse_x509_pem, Pem}, }; -use crate::proto::command::TlsVersion; +use crate::{ + config::{Config, ConfigError}, + proto::command::{CertificateAndKey, TlsVersion}, +}; // ----------------------------------------------------------------------------- // CertificateError -#[derive(thiserror::Error, Clone, Debug)] +#[derive(thiserror::Error, Debug)] pub enum CertificateError { #[error("Could not parse PEM certificate from bytes: {0}")] InvalidCertificate(String), #[error("failed to parse tls version '{0}'")] InvalidTlsVersion(String), + #[error("could not load file on path {path}: {error}")] + LoadFile { path: String, error: ConfigError }, + #[error("Failed at decoding the hex encoded certificate: {0}")] + DecodeError(FromHexError), } // ----------------------------------------------------------------------------- @@ -168,3 +175,59 @@ pub fn split_certificate_chain(mut chain: String) -> Vec { v } + +pub fn get_fingerprint_from_certificate_path( + certificate_path: &str, +) -> Result { + let bytes = + Config::load_file_bytes(certificate_path).map_err(|e| CertificateError::LoadFile { + path: certificate_path.to_string(), + error: e, + })?; + + let parsed_bytes = calculate_fingerprint(&bytes)?; + + Ok(Fingerprint(parsed_bytes)) +} + +pub fn decode_fingerprint(fingerprint: &str) -> Result { + let bytes = + hex::decode(fingerprint).map_err(|hex_error| CertificateError::DecodeError(hex_error))?; + Ok(Fingerprint(bytes)) +} + +pub fn load_full_certificate( + certificate_path: &str, + certificate_chain_path: &str, + key_path: &str, + versions: Vec, + names: Vec, +) -> Result { + let certificate = + Config::load_file(certificate_path).map_err(|e| CertificateError::LoadFile { + path: certificate_path.to_string(), + error: e, + })?; + + let certificate_chain = Config::load_file(certificate_chain_path) + .map(split_certificate_chain) + .map_err(|e| CertificateError::LoadFile { + path: certificate_chain_path.to_string(), + error: e, + })?; + + let key = Config::load_file(key_path).map_err(|e| CertificateError::LoadFile { + path: key_path.to_string(), + error: e, + })?; + + let versions = versions.iter().map(|v| *v as i32).collect(); + + Ok(CertificateAndKey { + certificate, + certificate_chain, + key, + versions, + names, + }) +} diff --git a/command/src/command.proto b/command/src/command.proto index ff175f422..c6e6345d5 100644 --- a/command/src/command.proto +++ b/command/src/command.proto @@ -463,6 +463,7 @@ message ResponseContent { } } +// a map of worker_id -> ResponseContent message WorkerResponses { map map = 1; } diff --git a/command/src/proto/display.rs b/command/src/proto/display.rs index fd2e2dad7..322210344 100644 --- a/command/src/proto/display.rs +++ b/command/src/proto/display.rs @@ -1,8 +1,21 @@ -use std::fmt::{Display, Formatter}; +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + fmt::{Display, Formatter}, +}; + +use prettytable::{cell, row, Row, Table}; +use time::format_description; +use x509_parser::time::ASN1Time; -use crate::proto::command::{ - request::RequestType, CertificateAndKey, CertificateSummary, QueryCertificatesFilters, - TlsVersion, +use crate::proto::{ + command::{ + filtered_metrics, request::RequestType, response_content::ContentType, AggregatedMetrics, + AvailableMetrics, CertificateAndKey, CertificateSummary, CertificatesWithFingerprints, + ClusterMetrics, FilteredMetrics, ListOfCertificatesByAddress, ListedFrontends, + ListenersList, QueryCertificatesFilters, RequestCounts, Response, ResponseContent, + ResponseStatus, TlsVersion, WorkerInfos, WorkerMetrics, WorkerResponses, + }, + DisplayError, }; impl Display for CertificateAndKey { @@ -91,3 +104,824 @@ pub fn format_request_type(request_type: &RequestType) -> String { RequestType::QueryCertificatesFromWorkers(_) => "QueryCertificatesFromWorkers".to_owned(), } } + +pub fn print_json_response(input: &T) -> Result<(), DisplayError> { + let pretty_json = serde_json::to_string_pretty(&input).map_err(DisplayError::Json)?; + println!("{pretty_json}"); + Ok(()) +} + +impl Response { + pub fn display(&self, json: bool) -> Result<(), DisplayError> { + match self.status() { + ResponseStatus::Ok => println!("Success: {}", self.message), + ResponseStatus::Failure => println!("Failure: {}", self.message), + ResponseStatus::Processing => { + return Err(DisplayError::WrongResponseType( + "ResponseStatus::Processing".to_string(), + )) + } + } + + let content = match &self.content { + Some(content) => content, + None => return Ok(println!("No content")), + }; + + content.display(json) + } +} + +impl ResponseContent { + fn display(&self, json: bool) -> Result<(), DisplayError> { + let content_type = match &self.content_type { + Some(content_type) => content_type, + None => return Ok(println!("No content")), + }; + + if json { + return print_json_response(&content_type); + } + + // in this exception, the clusters + + match content_type { + ContentType::Workers(worker_infos) => print_status(worker_infos), + ContentType::Metrics(aggr_metrics) => print_metrics(aggr_metrics), + ContentType::FrontendList(frontends) => print_frontends(frontends), + ContentType::ListenersList(listeners) => print_listeners(listeners), + ContentType::WorkerMetrics(worker_metrics) => print_worker_metrics(&worker_metrics), + ContentType::AvailableMetrics(list) => print_available_metrics(&list), + ContentType::RequestCounts(request_counts) => print_request_counts(&request_counts), + ContentType::CertificatesWithFingerprints(certs) => { + print_certificates_with_fingerprints(certs) + } + ContentType::WorkerResponses(worker_responses) => { + // exception when displaying clusters + if worker_responses.contain_cluster_infos() { + print_cluster_responses(worker_responses) + } else if worker_responses.contain_cluster_hashes() { + print_cluster_hashes(worker_responses) + } else { + print_responses_by_worker(worker_responses, json) + } + } + ContentType::Clusters(_) | ContentType::ClusterHashes(_) => Ok(()), // not displayed directly, see print_cluster_responses + ContentType::CertificatesByAddress(certs) => print_certificates_by_address(certs), + ContentType::Event(_event) => Ok(()), // not event displayed yet! + } + } +} + +impl WorkerResponses { + fn contain_cluster_infos(&self) -> bool { + for (_worker_id, response) in self.map.iter() { + if let Some(content_type) = &response.content_type { + if matches!(content_type, ContentType::Clusters(_)) { + return true; + } + } + } + false + } + + fn contain_cluster_hashes(&self) -> bool { + for (_worker_id, response) in self.map.iter() { + if let Some(content_type) = &response.content_type { + if matches!(content_type, ContentType::ClusterHashes(_)) { + return true; + } + } + } + false + } +} + +pub fn print_status(worker_infos: &WorkerInfos) -> Result<(), DisplayError> { + let mut table = Table::new(); + table.set_format(*prettytable::format::consts::FORMAT_BOX_CHARS); + table.add_row(row!["worker id", "pid", "run state"]); + + for worker_info in &worker_infos.vec { + let row = row!(worker_info.id, worker_info.pid, worker_info.run_state); + table.add_row(row); + } + + table.printstd(); + Ok(()) +} + +pub fn print_metrics(aggregated_metrics: &AggregatedMetrics) -> Result<(), DisplayError> { + // main process metrics + println!("\nMAIN PROCESS\n============"); + print_proxy_metrics(&aggregated_metrics.main); + + // workers + for (worker_id, worker_metrics) in aggregated_metrics.workers.iter() { + println!("\nWorker {worker_id}\n========="); + print_worker_metrics(worker_metrics)?; + } + Ok(()) +} + +fn print_proxy_metrics(proxy_metrics: &BTreeMap) { + let filtered = filter_metrics(proxy_metrics); + print_gauges_and_counts(&filtered); + print_percentiles(&filtered); +} + +fn print_worker_metrics(worker_metrics: &WorkerMetrics) -> Result<(), DisplayError> { + print_proxy_metrics(&worker_metrics.proxy); + print_cluster_metrics(&worker_metrics.clusters); + + Ok(()) +} + +fn print_cluster_metrics(cluster_metrics: &BTreeMap) { + for (cluster_id, cluster_metrics_data) in cluster_metrics.iter() { + println!("\nCluster {cluster_id}\n--------"); + + let filtered = filter_metrics(&cluster_metrics_data.cluster); + print_gauges_and_counts(&filtered); + print_percentiles(&filtered); + + for backend_metrics in cluster_metrics_data.backends.iter() { + println!("\n{cluster_id}/{}\n--------", backend_metrics.backend_id); + let filtered = filter_metrics(&backend_metrics.metrics); + print_gauges_and_counts(&filtered); + print_percentiles(&filtered); + } + } +} + +fn filter_metrics( + metrics: &BTreeMap, +) -> BTreeMap { + let mut filtered_metrics = BTreeMap::new(); + + for (metric_key, filtered_value) in metrics.iter() { + filtered_metrics.insert( + metric_key.replace('\t', ".").to_string(), + filtered_value.clone(), + ); + } + filtered_metrics +} + +fn print_gauges_and_counts(filtered_metrics: &BTreeMap) { + let mut titles: Vec = filtered_metrics + .iter() + .filter_map(|(title, filtered_data)| match filtered_data.inner { + Some(filtered_metrics::Inner::Count(_)) | Some(filtered_metrics::Inner::Gauge(_)) => { + Some(title.to_owned()) + } + _ => None, + }) + .collect(); + + // sort the titles so they always appear in the same order + titles.sort(); + + if titles.is_empty() { + return; + } + + let mut table = Table::new(); + table.set_format(*prettytable::format::consts::FORMAT_BOX_CHARS); + + table.set_titles(Row::new(vec![cell!(""), cell!("gauge"), cell!("count")])); + + for title in titles { + let mut row = vec![cell!(title)]; + match filtered_metrics.get(&title) { + Some(filtered_metrics) => match filtered_metrics.inner { + Some(filtered_metrics::Inner::Count(c)) => { + row.push(cell!("")); + row.push(cell!(c)) + } + Some(filtered_metrics::Inner::Gauge(c)) => { + row.push(cell!(c)); + row.push(cell!("")) + } + _ => {} + }, + _ => row.push(cell!("")), + } + table.add_row(Row::new(row)); + } + + table.printstd(); +} + +fn print_percentiles(filtered_metrics: &BTreeMap) { + let mut percentile_titles: Vec = filtered_metrics + .iter() + .filter_map(|(title, filtered_data)| match filtered_data.inner.clone() { + Some(filtered_metrics::Inner::Percentiles(_)) => Some(title.to_owned()), + _ => None, + }) + .collect(); + + // sort the metrics so they always appear in the same order + percentile_titles.sort(); + + if percentile_titles.is_empty() { + return; + } + + let mut percentile_table = Table::new(); + percentile_table.set_format(*prettytable::format::consts::FORMAT_BOX_CHARS); + + percentile_table.set_titles(Row::new(vec![ + cell!("Percentiles"), + cell!("samples"), + cell!("p50"), + cell!("p90"), + cell!("p99"), + cell!("p99.9"), + cell!("p99.99"), + cell!("p99.999"), + cell!("p100"), + ])); + + for title in percentile_titles { + if let Some(FilteredMetrics { + inner: Some(filtered_metrics::Inner::Percentiles(percentiles)), + }) = filtered_metrics.get(&title) + { + percentile_table.add_row(Row::new(vec![ + cell!(title), + cell!(percentiles.samples), + cell!(percentiles.p_50), + cell!(percentiles.p_90), + cell!(percentiles.p_99), + cell!(percentiles.p_99_9), + cell!(percentiles.p_99_99), + cell!(percentiles.p_99_999), + cell!(percentiles.p_100), + ])); + } else { + println!("Something went VERY wrong here"); + } + } + + percentile_table.printstd(); +} + +fn print_available_metrics(available_metrics: &AvailableMetrics) -> Result<(), DisplayError> { + println!("Available metrics on the proxy level:"); + for metric_name in &available_metrics.proxy_metrics { + println!("\t{metric_name}"); + } + println!("Available metrics on the cluster level:"); + for metric_name in &available_metrics.cluster_metrics { + println!("\t{metric_name}"); + } + Ok(()) +} + +fn print_frontends(frontends: &ListedFrontends) -> Result<(), DisplayError> { + trace!(" We received this frontends to display {:#?}", frontends); + // HTTP frontends + if !frontends.http_frontends.is_empty() { + let mut table = Table::new(); + table.set_format(*prettytable::format::consts::FORMAT_BOX_CHARS); + table.add_row(row!["HTTP frontends "]); + table.add_row(row![ + "cluster_id", + "address", + "hostname", + "path", + "method", + "position", + "tags" + ]); + for http_frontend in frontends.http_frontends.iter() { + table.add_row(row!( + http_frontend + .cluster_id + .clone() + .unwrap_or("Deny".to_owned()), + http_frontend.address.to_string(), + http_frontend.hostname.to_string(), + format!("{:?}", http_frontend.path), + format!("{:?}", http_frontend.method), + format!("{:?}", http_frontend.position), + format_tags_to_string(&http_frontend.tags) + )); + } + table.printstd(); + } + + // HTTPS frontends + if !frontends.https_frontends.is_empty() { + let mut table = Table::new(); + table.set_format(*prettytable::format::consts::FORMAT_BOX_CHARS); + table.add_row(row!["HTTPS frontends"]); + table.add_row(row![ + "cluster_id", + "address", + "hostname", + "path", + "method", + "position", + "tags" + ]); + for https_frontend in frontends.https_frontends.iter() { + table.add_row(row!( + https_frontend + .cluster_id + .clone() + .unwrap_or("Deny".to_owned()), + https_frontend.address.to_string(), + https_frontend.hostname.to_string(), + format!("{:?}", https_frontend.path), + format!("{:?}", https_frontend.method), + format!("{:?}", https_frontend.position), + format_tags_to_string(&https_frontend.tags) + )); + } + table.printstd(); + } + + // TCP frontends + if !frontends.tcp_frontends.is_empty() { + let mut table = Table::new(); + table.set_format(*prettytable::format::consts::FORMAT_BOX_CHARS); + table.add_row(row!["TCP frontends "]); + table.add_row(row!["Cluster ID", "address", "tags"]); + for tcp_frontend in frontends.tcp_frontends.iter() { + table.add_row(row!( + tcp_frontend.cluster_id, + tcp_frontend.address, + format_tags_to_string(&tcp_frontend.tags) + )); + } + table.printstd(); + } + Ok(()) +} + +pub fn print_listeners(listeners_list: &ListenersList) -> Result<(), DisplayError> { + println!("\nHTTP LISTENERS\n================"); + + for (_, http_listener) in listeners_list.http_listeners.iter() { + let mut table = Table::new(); + table.set_format(*prettytable::format::consts::FORMAT_BOX_CHARS); + table.add_row(row![ + "socket address", + format!("{:?}", http_listener.address) + ]); + table.add_row(row![ + "public address", + format!("{:?}", http_listener.public_address), + ]); + table.add_row(row!["404", http_listener.answer_404]); + table.add_row(row!["503", http_listener.answer_503]); + table.add_row(row!["expect proxy", http_listener.expect_proxy]); + table.add_row(row!["sticky name", http_listener.sticky_name]); + table.add_row(row!["front timeout", http_listener.front_timeout]); + table.add_row(row!["back timeout", http_listener.back_timeout]); + table.add_row(row!["connect timeout", http_listener.connect_timeout]); + table.add_row(row!["request timeout", http_listener.request_timeout]); + table.add_row(row!["activated", http_listener.active]); + table.printstd(); + } + + println!("\nHTTPS LISTENERS\n================"); + + for (_, https_listener) in listeners_list.https_listeners.iter() { + let mut table = Table::new(); + table.set_format(*prettytable::format::consts::FORMAT_BOX_CHARS); + let mut tls_versions = String::new(); + for tls_version in https_listener.versions.iter() { + tls_versions.push_str(&format!("{tls_version:?}\n")); + } + + table.add_row(row![ + "socket address", + format!("{:?}", https_listener.address) + ]); + table.add_row(row![ + "public address", + format!("{:?}", https_listener.public_address) + ]); + table.add_row(row!["404", https_listener.answer_404,]); + table.add_row(row!["503", https_listener.answer_503,]); + table.add_row(row!["versions", tls_versions]); + table.add_row(row![ + "cipher list", + list_string_vec(&https_listener.cipher_list), + ]); + table.add_row(row![ + "cipher suites", + list_string_vec(&https_listener.cipher_suites), + ]); + table.add_row(row![ + "signature algorithms", + list_string_vec(&https_listener.signature_algorithms), + ]); + table.add_row(row![ + "groups list", + list_string_vec(&https_listener.groups_list), + ]); + table.add_row(row!["key", format!("{:?}", https_listener.key),]); + table.add_row(row!["expect proxy", https_listener.expect_proxy,]); + table.add_row(row!["sticky name", https_listener.sticky_name,]); + table.add_row(row!["front timeout", https_listener.front_timeout,]); + table.add_row(row!["back timeout", https_listener.back_timeout,]); + table.add_row(row!["connect timeout", https_listener.connect_timeout,]); + table.add_row(row!["request timeout", https_listener.request_timeout,]); + table.add_row(row!["activated", https_listener.active]); + table.printstd(); + } + + println!("\nTCP LISTENERS\n================"); + + if !listeners_list.tcp_listeners.is_empty() { + let mut table = Table::new(); + table.set_format(*prettytable::format::consts::FORMAT_BOX_CHARS); + table.add_row(row!["TCP frontends"]); + table.add_row(row![ + "socket address", + "public address", + "expect proxy", + "front timeout", + "back timeout", + "connect timeout", + "activated" + ]); + for (_, tcp_listener) in listeners_list.tcp_listeners.iter() { + table.add_row(row![ + format!("{:?}", tcp_listener.address), + format!("{:?}", tcp_listener.public_address), + tcp_listener.expect_proxy, + tcp_listener.front_timeout, + tcp_listener.back_timeout, + tcp_listener.connect_timeout, + tcp_listener.active, + ]); + } + table.printstd(); + } + Ok(()) +} + +fn print_cluster_responses(worker_responses: &WorkerResponses) -> Result<(), DisplayError> { + let mut cluster_table = create_cluster_table( + vec!["id", "sticky_session", "https_redirect"], + &worker_responses.map, + ); + + let mut frontend_table = + create_cluster_table(vec!["id", "hostname", "path"], &worker_responses.map); + + let mut https_frontend_table = + create_cluster_table(vec!["id", "hostname", "path"], &worker_responses.map); + + let mut tcp_frontend_table = create_cluster_table(vec!["id", "address"], &worker_responses.map); + + let mut backend_table = create_cluster_table( + vec!["backend id", "IP address", "Backup"], + &worker_responses.map, + ); + + let worker_ids: HashSet<&String> = worker_responses.map.keys().collect(); + + let mut cluster_infos = HashMap::new(); + let mut http_frontends = HashMap::new(); + let mut https_frontends = HashMap::new(); + let mut tcp_frontends = HashMap::new(); + let mut backends = HashMap::new(); + + for (worker_id, response_content) in worker_responses.map.iter() { + if let Some(ContentType::Clusters(clusters)) = &response_content.content_type { + for cluster in clusters.vec.iter() { + if cluster.configuration.is_some() { + let entry = cluster_infos.entry(cluster).or_insert(Vec::new()); + entry.push(worker_id.to_owned()); + } + + for frontend in cluster.http_frontends.iter() { + let entry = http_frontends.entry(frontend).or_insert(Vec::new()); + entry.push(worker_id.to_owned()); + } + + for frontend in cluster.https_frontends.iter() { + let entry = https_frontends.entry(frontend).or_insert(Vec::new()); + entry.push(worker_id.to_owned()); + } + + for frontend in cluster.tcp_frontends.iter() { + let entry = tcp_frontends.entry(frontend).or_insert(Vec::new()); + entry.push(worker_id.to_owned()); + } + + for backend in cluster.backends.iter() { + let entry = backends.entry(backend).or_insert(Vec::new()); + entry.push(worker_id.to_owned()); + } + } + } + } + + println!("Cluster level configuration:\n"); + + for (cluster_info, workers_the_cluster_is_present_on) in cluster_infos.iter() { + let mut row = Vec::new(); + row.push(cell!(cluster_info + .configuration + .as_ref() + .map(|conf| conf.cluster_id.to_owned()) + .unwrap_or_else(|| String::from("None")))); + row.push(cell!(cluster_info + .configuration + .as_ref() + .map(|conf| conf.sticky_session) + .unwrap_or_else(|| false))); + row.push(cell!(cluster_info + .configuration + .as_ref() + .map(|conf| conf.https_redirect) + .unwrap_or_else(|| false))); + + for worker in workers_the_cluster_is_present_on { + if worker_ids.contains(worker) { + row.push(cell!("X")); + } else { + row.push(cell!("")); + } + } + + cluster_table.add_row(Row::new(row)); + } + + cluster_table.printstd(); + + println!("\nHTTP frontends configuration for:\n"); + + for (key, values) in http_frontends.iter() { + let mut row = Vec::new(); + match &key.cluster_id { + Some(cluster_id) => row.push(cell!(cluster_id)), + None => row.push(cell!("-")), + } + row.push(cell!(key.hostname)); + row.push(cell!(key.path)); + + for val in values.iter() { + if worker_ids.contains(val) { + row.push(cell!("X")); + } else { + row.push(cell!("")); + } + } + + frontend_table.add_row(Row::new(row)); + } + + frontend_table.printstd(); + + println!("\nHTTPS frontends configuration for:\n"); + + for (key, values) in https_frontends.iter() { + let mut row = Vec::new(); + match &key.cluster_id { + Some(cluster_id) => row.push(cell!(cluster_id)), + None => row.push(cell!("-")), + } + row.push(cell!(key.hostname)); + row.push(cell!(key.path)); + + for val in values.iter() { + if worker_ids.contains(val) { + row.push(cell!("X")); + } else { + row.push(cell!("")); + } + } + + https_frontend_table.add_row(Row::new(row)); + } + + https_frontend_table.printstd(); + + println!("\nTCP frontends configuration:\n"); + + for (key, values) in tcp_frontends.iter() { + let mut row = vec![cell!(key.cluster_id), cell!(format!("{}", key.address))]; + + for val in values.iter() { + if worker_ids.contains(val) { + row.push(cell!(String::from("X"))); + } else { + row.push(cell!(String::from(""))); + } + } + + tcp_frontend_table.add_row(Row::new(row)); + } + + tcp_frontend_table.printstd(); + + println!("\nbackends configuration:\n"); + + for (key, values) in backends.iter() { + let mut row = vec![ + cell!(key.backend_id), + cell!(format!("{}", key.address)), + cell!(key + .backup + .map(|b| if b { "X" } else { "" }) + .unwrap_or_else(|| "")), + ]; + + for val in values { + if worker_ids.contains(&val) { + row.push(cell!("X")); + } else { + row.push(cell!("")); + } + } + + backend_table.add_row(Row::new(row)); + } + + backend_table.printstd(); + + Ok(()) +} + +/// display all clusters in a simplified table showing their hashes +fn print_cluster_hashes(worker_responses: &WorkerResponses) -> Result<(), DisplayError> { + let mut clusters_table = Table::new(); + clusters_table.set_format(*prettytable::format::consts::FORMAT_BOX_CHARS); + let mut header = vec![cell!("cluster id")]; + for worker_id in worker_responses.map.keys() { + header.push(cell!(format!("worker {}", worker_id))); + } + header.push(cell!("desynchronized")); + clusters_table.add_row(Row::new(header)); + + let mut cluster_hashes = HashMap::new(); + + for response_content in worker_responses.map.values() { + if let Some(ContentType::ClusterHashes(hashes)) = &response_content.content_type { + for (cluster_id, hash) in hashes.map.iter() { + cluster_hashes + .entry(cluster_id) + .or_insert(Vec::new()) + .push(hash); + } + } + } + + for (cluster_id, hashes) in cluster_hashes.iter() { + let mut row = vec![cell!(cluster_id)]; + for val in hashes.iter() { + row.push(cell!(format!("{val}"))); + } + + let hs: HashSet<&u64> = hashes.iter().cloned().collect(); + if hs.len() > 1 { + row.push(cell!("X")); + } else { + row.push(cell!("")); + } + + clusters_table.add_row(Row::new(row)); + } + + clusters_table.printstd(); + Ok(()) +} + +fn print_responses_by_worker( + worker_responses: &WorkerResponses, + json: bool, +) -> Result<(), DisplayError> { + for (worker_id, content) in worker_responses.map.iter() { + println!("Worker {}", worker_id); + content.display(json)?; + } + + Ok(()) +} + +fn print_certificates_with_fingerprints( + certs: &CertificatesWithFingerprints, +) -> Result<(), DisplayError> { + if certs.certs.is_empty() { + return Ok(println!("No certificates match your request.")); + } + + print_certificates_with_validity(&certs.certs)?; + Ok(()) +} + +pub fn print_certificates_with_validity( + certs: &BTreeMap, +) -> Result<(), DisplayError> { + let mut table = Table::new(); + table.set_format(*prettytable::format::consts::FORMAT_CLEAN); + table.add_row(row![ + "fingeprint", + "valid not before", + "valide not after", + "domain names", + ]); + + for (fingerprint, cert) in certs { + let (_unparsed, pem_certificate) = + x509_parser::pem::parse_x509_pem(cert.certificate.as_bytes()) + .expect("Could not parse pem certificate"); + + let x509_certificate = pem_certificate + .parse_x509() + .expect("Could not parse x509 certificate"); + + let validity = x509_certificate.validity(); + + table.add_row(row!( + fingerprint, + format_datetime(validity.not_before)?, + format_datetime(validity.not_after)?, + concatenate_vector(&cert.names), + )); + } + table.printstd(); + + Ok(()) +} + +fn print_certificates_by_address(list: &ListOfCertificatesByAddress) -> Result<(), DisplayError> { + for certs in list.certificates.iter() { + println!("\t{}:", certs.address); + + for summary in certs.certificate_summaries.iter() { + println!("\t\t{}", summary); + } + } + Ok(()) +} + +fn print_request_counts(request_counts: &RequestCounts) -> Result<(), DisplayError> { + let mut table = Table::new(); + table.set_format(*prettytable::format::consts::FORMAT_BOX_CHARS); + table.add_row(row!["request type", "count"]); + + for (request_type, count) in &request_counts.map { + table.add_row(row!(request_type, count)); + } + table.printstd(); + Ok(()) +} + +fn format_tags_to_string(tags: &BTreeMap) -> String { + tags.iter() + .map(|(k, v)| format!("{k}={v}")) + .collect::>() + .join(", ") +} + +fn list_string_vec(vec: &[String]) -> String { + let mut output = String::new(); + for item in vec.iter() { + output.push_str(item); + output.push('\n'); + } + output +} + +// ISO 8601 +fn format_datetime(asn1_time: ASN1Time) -> Result { + let datetime = asn1_time.to_datetime(); + + let formatted = datetime + .format(&format_description::well_known::Iso8601::DEFAULT) + .map_err(|_| DisplayError::DateTime)?; + Ok(formatted) +} + +/// Creates an empty table of the form +/// ```text +/// ┌────────────┬─────────────┬───────────┬────────┐ +/// │ │ header │ header │ header │ +/// ├────────────┼─────────────┼───────────┼────────┤ +/// │ cluster_id │ │ │ │ +/// ├────────────┼─────────────┼───────────┼────────┤ +/// │ cluster_id │ │ │ │ +/// ├────────────┼─────────────┼───────────┼────────┤ +/// │ cluster_id │ │ │ │ +/// └────────────┴─────────────┴───────────┴────────┘ +/// ``` +fn create_cluster_table(headers: Vec<&str>, data: &BTreeMap) -> Table { + let mut table = Table::new(); + table.set_format(*prettytable::format::consts::FORMAT_BOX_CHARS); + let mut row_header: Vec<_> = headers.iter().map(|h| cell!(h)).collect(); + for ref key in data.keys() { + row_header.push(cell!(&key)); + } + table.add_row(Row::new(row_header)); + table +} diff --git a/command/src/proto/mod.rs b/command/src/proto/mod.rs index 48f7972f6..516ed9e03 100644 --- a/command/src/proto/mod.rs +++ b/command/src/proto/mod.rs @@ -4,6 +4,18 @@ pub mod command; /// Implementation of fmt::Display for the protobuf types, used in the CLI pub mod display; +#[derive(thiserror::Error, Debug)] +pub enum DisplayError { + #[error("Could not display content")] + DisplayContent(String), + #[error("Error while parsing response to JSON")] + Json(serde_json::Error), + #[error("got the wrong response content type: {0}")] + WrongResponseType(String), + #[error("Could not format the datetime to ISO 8601")] + DateTime, +} + // Simple helper to build ResponseContent from ContentType impl From for command::ResponseContent { fn from(value: command::response_content::ContentType) -> Self { diff --git a/e2e/src/tests/tests.rs b/e2e/src/tests/tests.rs index 284caf5e8..00db4ad81 100644 --- a/e2e/src/tests/tests.rs +++ b/e2e/src/tests/tests.rs @@ -1409,7 +1409,6 @@ pub fn try_head() -> State { State::Success } - #[test] fn test_sync() { assert_eq!(try_sync(10, 100), State::Success); diff --git a/lib/src/tls.rs b/lib/src/tls.rs index 44081449b..f37df22de 100644 --- a/lib/src/tls.rs +++ b/lib/src/tls.rs @@ -148,7 +148,7 @@ impl Clone for ParsedCertificateAndKey { // ----------------------------------------------------------------------------- // GenericCertificateResolverError enum -#[derive(thiserror::Error, Clone, Debug)] +#[derive(thiserror::Error, Debug)] pub enum GenericCertificateResolverError { #[error("failed to get common name and subject alternate names from pem, {0}")] InvalidCommonNameAndSubjectAlternateNames(CertificateError),