Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Support for Tracing #453

Merged
merged 1 commit into from
Oct 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
53 changes: 29 additions & 24 deletions akd/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,26 @@ readme = "../README.md"
whatsapp_v1 = ["akd_core/whatsapp_v1"]
experimental = ["akd_core/experimental"]

rand = ["dep:rand"]
# Default features mix (experimental + audit-proof protobuf mgmt support)
default = [
"public_auditing",
"parallel_vrf",
"parallel_insert",
"preload_history",
"greedy_lookup_preload",
"experimental",
]

bench = ["experimental", "public_tests", "tokio/rt-multi-thread"]
# Greedy loading of lookup proof nodes
greedy_lookup_preload = []
public_auditing = ["dep:protobuf", "akd_core/protobuf"]
# Parallelize node insertion during publish
parallel_insert = []
# Parallelize VRF calculations during publish
parallel_vrf = ["akd_core/parallel_vrf"]
# Enable pre-loading of the nodes when generating history proofs
preload_history = []
public_tests = [
"rand",
"dep:colored",
Expand All @@ -25,30 +43,16 @@ public_tests = [
"akd_core/rand",
"dep:paste",
]
public_auditing = ["dep:protobuf", "akd_core/protobuf"]
serde_serialization = ["dep:serde", "akd_core/serde_serialization"]
rand = ["dep:rand"]
# Collect runtime metrics on db access calls + timing
runtime_metrics = []
# Parallelize VRF calculations during publish
parallel_vrf = ["akd_core/parallel_vrf"]
# Parallelize node insertion during publish
parallel_insert = []
# Enable pre-loading of the nodes when generating history proofs
preload_history = []
serde_serialization = ["dep:serde", "akd_core/serde_serialization"]
# TESTING ONLY: Artifically slow the in-memory database (for benchmarking)
slow_internal_db = []
# Greedy loading of lookup proof nodes
greedy_lookup_preload = []

# Default features mix (experimental + audit-proof protobuf mgmt support)
default = [
"public_auditing",
"parallel_vrf",
"parallel_insert",
"preload_history",
"greedy_lookup_preload",
"experimental",
]
# Tracing instrumentation
tracing = ["dep:tracing"]
# Tracing-based instrumentation
tracing_instrument = ["tracing/attributes"]

[dependencies]
## Required dependencies ##
Expand All @@ -63,12 +67,13 @@ log = { version = "0.4", features = ["kv_unstable"] }
tokio = { version = "1", features = ["sync", "time", "rt"] }

## Optional dependencies ##
serde = { version = "1", features = ["derive"], optional = true }
rand = { version = "0.8", optional = true }
colored = { version = "2", optional = true }
once_cell = { version = "1", optional = true }
protobuf = { version = "3", optional = true }
paste = { version = "1", optional = true }
protobuf = { version = "3", optional = true }
rand = { version = "0.8", optional = true }
serde = { version = "1", features = ["derive"], optional = true }
tracing = {version = "0.1.40", optional = true }

[dev-dependencies]
criterion = "0.5"
Expand Down
16 changes: 11 additions & 5 deletions akd/src/append_only_zks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@

use crate::hash::EMPTY_DIGEST;
use crate::helper_structs::LookupInfo;
use crate::log::{debug, info};
use crate::storage::manager::StorageManager;
use crate::storage::types::StorageType;
use crate::tree_node::{
Expand All @@ -22,8 +23,8 @@ use crate::{
AppendOnlyProof, AzksElement, AzksValue, Digest, Direction, MembershipProof, NodeLabel,
NonMembershipProof, PrefixOrdering, SiblingProof, SingleAppendOnlyProof, SizeOf, ARITY,
};

use async_recursion::async_recursion;
use log::info;
use std::cmp::Ordering;
#[cfg(feature = "greedy_lookup_preload")]
use std::collections::HashSet;
Expand Down Expand Up @@ -528,7 +529,7 @@ impl Azks {
}
}

/// Builds all of the POSSIBLE paths along the route from root node to
/// Builds all the POSSIBLE paths along the route from root node to
/// leaf node. This will be grossly over-estimating the true size of the
/// tree and the number of nodes required to be fetched, however
/// it allows a single batch-get call in necessary scenarios
Expand Down Expand Up @@ -564,7 +565,7 @@ impl Azks {
Ok(results)
}

/// Preload for a single lookup operation by loading all of the nodes along
/// Preload for a single lookup operation by loading all the nodes along
/// the direct path, and the children of resolved nodes on the path. This
/// minimizes the number of batch_get operations to the storage layer which are
/// called
Expand Down Expand Up @@ -676,13 +677,14 @@ impl Azks {
.collect();
}

info!("Preload of tree ({} nodes) completed", load_count);
debug!("Preload of tree ({} nodes) completed", load_count);
dillonrg marked this conversation as resolved.
Show resolved Hide resolved

Ok(load_count)
}

/// Returns the Merkle membership proof for the trie as it stood at epoch
// Assumes the verifier has access to the root at epoch
#[cfg_attr(feature = "tracing_instrument", tracing::instrument(skip_all))]
pub async fn get_membership_proof<TC: Configuration, S: Database>(
&self,
storage: &StorageManager<S>,
Expand All @@ -697,6 +699,7 @@ impl Azks {
/// In a compressed trie, the proof consists of the longest prefix
/// of the label that is included in the trie, as well as its children, to show that
/// none of the children is equal to the given label.
#[cfg_attr(feature = "tracing_instrument", tracing::instrument(skip_all))]
pub async fn get_non_membership_proof<TC: Configuration, S: Database>(
&self,
storage: &StorageManager<S>,
Expand Down Expand Up @@ -759,6 +762,7 @@ impl Azks {
/// **RESTRICTIONS**: Note that `start_epoch` and `end_epoch` are valid only when the following are true
/// * `start_epoch` <= `end_epoch`
/// * `start_epoch` and `end_epoch` are both existing epochs of this AZKS
#[cfg_attr(feature = "tracing_instrument", tracing::instrument(skip_all))]
pub async fn get_append_only_proof<TC: Configuration, S: Database + 'static>(
&self,
storage: &StorageManager<S>,
Expand Down Expand Up @@ -802,7 +806,7 @@ impl Azks {
load_count
);
}
storage.log_metrics(log::Level::Info).await;
storage.log_metrics().await;

let (unchanged, leaves) = Self::get_append_only_proof_helper::<TC, _>(
latest_epoch,
Expand Down Expand Up @@ -1027,6 +1031,7 @@ impl Azks {
}

/// Gets the root hash for this azks
#[cfg_attr(feature = "tracing_instrument", tracing::instrument(skip_all))]
pub async fn get_root_hash<TC: Configuration, S: Database>(
&self,
storage: &StorageManager<S>,
Expand All @@ -1037,6 +1042,7 @@ impl Azks {

/// Gets the root hash of the tree at the latest epoch if the passed epoch
/// is equal to the latest epoch. Will return an error otherwise.
#[cfg_attr(feature = "tracing_instrument", tracing::instrument(skip_all))]
pub(crate) async fn get_root_hash_safe<TC: Configuration, S: Database>(
&self,
storage: &StorageManager<S>,
Expand Down
4 changes: 3 additions & 1 deletion akd/src/auditor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ use crate::{
};

/// Verifies an audit proof, given start and end hashes for a merkle patricia tree.
#[cfg_attr(feature = "tracing_instrument", tracing::instrument(skip_all))]
pub async fn audit_verify<TC: Configuration>(
hashes: Vec<Digest>,
proof: AppendOnlyProof,
Expand Down Expand Up @@ -52,7 +53,8 @@ pub async fn audit_verify<TC: Configuration>(
Ok(())
}

/// Helper for audit, verifies an append-only proof
/// Helper for audit, verifies an append-only proof.
#[cfg_attr(feature = "tracing_instrument", tracing::instrument(skip_all))]
pub async fn verify_consecutive_append_only<TC: Configuration>(
proof: &SingleAppendOnlyProof,
start_hash: Digest,
Expand Down
Loading
Loading