Skip to content

Commit

Permalink
use only our sync
Browse files Browse the repository at this point in the history
  • Loading branch information
ec2 committed Oct 1, 2024
1 parent 3dde460 commit 3eae93e
Show file tree
Hide file tree
Showing 4 changed files with 12 additions and 56 deletions.
28 changes: 2 additions & 26 deletions src/bindgen/wallet.rs
Original file line number Diff line number Diff line change
Expand Up @@ -126,32 +126,8 @@ impl WebWallet {
self.inner.suggest_scan_ranges().await
}

/// Synchronize the wallet with the blockchain up to the tip using zcash_client_backend's algo
pub async fn sync(&self) -> Result<(), Error> {
assert!(!thread::is_web_worker_thread());

let db = self.inner.clone();

let sync_handler = thread::Builder::new()
.name("sync".to_string())
.spawn_async(|| async {
assert!(thread::is_web_worker_thread());
tracing::debug!(
"Current num threads (wasm_thread) {}",
rayon::current_num_threads()
);

let db = db;
db.sync().await.unwrap_throw();
})
.unwrap_throw()
.join_async();
sync_handler.await.unwrap();
Ok(())
}

/// Synchronize the wallet with the blockchain up to the tip using our newest and bestest
pub async fn sync3(&self) -> Result<(), Error> {
pub async fn sync(&self) -> Result<(), Error> {
assert!(!thread::is_web_worker_thread());
tracing::debug!("SYNC 3 Main!!!!");
let db = self.inner.clone();
Expand All @@ -166,7 +142,7 @@ impl WebWallet {
);

let db = db;
db.sync3().await.unwrap_throw();
db.sync().await.unwrap_throw();
})
.unwrap_throw()
.join_async();
Expand Down
7 changes: 1 addition & 6 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,6 @@ use wasm_bindgen::prelude::*;
/// The maximum number of checkpoints to store in each shard-tree
pub const PRUNING_DEPTH: usize = 100;


use zcash_client_memory::MemoryWalletDb;
use zcash_primitives::consensus;

#[cfg(feature = "wasm-parallel")]
pub use wasm_bindgen_rayon::init_thread_pool;

Expand All @@ -34,5 +30,4 @@ pub fn init_thread_pool(_threads: usize) {}
#[wasm_bindgen]
pub struct BlockRange(pub u32, pub u32);


pub mod sync3;
pub mod sync;
9 changes: 6 additions & 3 deletions src/sync3/mod.rs → src/sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -92,9 +92,12 @@ where
// any shielded scanning, to ensure that we discover any UTXOs between the old
// fully-scanned height and the current chain tip.
// #[cfg(feature = "transparent-inputs")]
let account_ids = db_data.read().await.get_account_ids().map_err(Error::Wallet)?;
for account_id in account_ids
{
let account_ids = db_data
.read()
.await
.get_account_ids()
.map_err(Error::Wallet)?;
for account_id in account_ids {
let start_height = db_data
.read()
.await
Expand Down
24 changes: 3 additions & 21 deletions src/wallet.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ use tonic::{
};

use crate::error::Error;
use crate::{sync3, BlockRange};
use crate::BlockRange;

use serde::{Serialize, Serializer};
use std::fmt::Debug;
Expand Down Expand Up @@ -41,7 +41,6 @@ use zcash_primitives::transaction::fees::zip317::FeeRule;
use zcash_primitives::transaction::TxId;
use zcash_proofs::prover::LocalTxProver;

use zcash_client_backend::sync::run;
const BATCH_SIZE: u32 = 10000;

/// # A Zcash wallet
Expand Down Expand Up @@ -224,33 +223,16 @@ where
})?)
}

pub async fn sync3(&self) -> Result<(), Error> {
let mut client = self.client.clone();
// TODO: This should be held in the Wallet struct so we can download in parallel
let db_cache = MemBlockCache::new();

sync3::run(
&mut client,
&self.network.clone(),
&db_cache,
self.db.clone(),
BATCH_SIZE,
)
.await
.map_err(Into::into)
}

pub async fn sync(&self) -> Result<(), Error> {
let mut client = self.client.clone();
// TODO: This should be held in the Wallet struct so we can download in parallel
let db_cache = MemBlockCache::new();

let mut db = self.db.write().await;
run(
crate::sync::run(
&mut client,
&self.network.clone(),
&db_cache,
&mut *db,
self.db.clone(),
BATCH_SIZE,
)
.await
Expand Down

0 comments on commit 3eae93e

Please sign in to comment.