Skip to content

Commit

Permalink
the term hash_tag gets more meaningful name of community
Browse files Browse the repository at this point in the history
  • Loading branch information
gmawdo committed Dec 28, 2024
1 parent 20a66fb commit cc7c852
Show file tree
Hide file tree
Showing 8 changed files with 134 additions and 134 deletions.
14 changes: 7 additions & 7 deletions benches/readdir_benchmark.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,11 @@ lazy_static! {

async fn setup_test_data(fs: &SharesFS, size: usize) -> Result<(), nfsstat3> {
let root_id = 1u64;
let (_namespace_id, hash_tag) = SharesFS::get_namespace_id_and_hash_tag().await;
let (_namespace_id, community) = SharesFS::get_namespace_id_and_community().await;

// Create root directory with metadata
fs.create_test_entry(0, "/", root_id).await?;
fs.data_store.hset_multiple(&format!("{}/", hash_tag), &[
fs.data_store.hset_multiple(&format!("{}/", community), &[
("type", "2"), ("mode", "0755"), ("nlink", "2"), ("uid", "0"), ("gid", "0"),
("size", "4096"), ("fileid", &root_id.to_string()), ("used", "4096"), ("rdev", "0"),
("access_time_secs", "0"), ("access_time_nsecs", "0"), ("modification_time_secs", "0"),
Expand All @@ -30,7 +30,7 @@ async fn setup_test_data(fs: &SharesFS, size: usize) -> Result<(), nfsstat3> {
// Create test directory with metadata
let test_dir_id = 2u64;
fs.create_test_entry(root_id, "/test_dir", test_dir_id).await?;
fs.data_store.hset_multiple(&format!("{}/test_dir", hash_tag), &[
fs.data_store.hset_multiple(&format!("{}/test_dir", community), &[
("type", "2"), ("mode", "0755"), ("nlink", "2"), ("uid", "0"), ("gid", "0"),
("size", "4096"), ("fileid", &test_dir_id.to_string()), ("used", "4096"), ("rdev", "0"),
("access_time_secs", "0"), ("access_time_nsecs", "0"), ("modification_time_secs", "0"),
Expand All @@ -42,7 +42,7 @@ async fn setup_test_data(fs: &SharesFS, size: usize) -> Result<(), nfsstat3> {
let file_id = (i + 3) as u64;
let path = format!("/test_dir/file_{}", i);
fs.create_test_entry(test_dir_id, &path, file_id).await?;
fs.data_store.hset_multiple(&format!("{}{}", hash_tag, path), &[
fs.data_store.hset_multiple(&format!("{}{}", community, path), &[
("type", "1"), ("mode", "0644"), ("nlink", "1"), ("uid", "0"), ("gid", "0"),
("size", "0"), ("fileid", &file_id.to_string()), ("used", "0"), ("rdev", "0"),
("access_time_secs", "0"), ("access_time_nsecs", "0"), ("modification_time_secs", "0"),
Expand All @@ -54,18 +54,18 @@ async fn setup_test_data(fs: &SharesFS, size: usize) -> Result<(), nfsstat3> {

#[allow(dead_code)]
async fn print_fs_structure(fs: &SharesFS) -> Result<(), nfsstat3> {
let (namespace_id, hash_tag) = SharesFS::get_namespace_id_and_hash_tag().await;
let (namespace_id, community) = SharesFS::get_namespace_id_and_community().await;

println!("\nFilesystem Structure:");
println!("--------------------");

// Get all nodes from the data store
let nodes_key = format!("{}/{}_nodes", hash_tag, namespace_id);
let nodes_key = format!("{}/{}_nodes", community, namespace_id);
let _nodes = fs.data_store.zscan_match(&nodes_key, "").await
.map_err(|_| nfsstat3::NFS3ERR_IO)?;

// Get all path mappings
let path_key = format!("{}/{}_id_to_path", hash_tag, namespace_id);
let path_key = format!("{}/{}_id_to_path", community, namespace_id);
let mappings = fs.data_store.hgetall(&path_key).await
.map_err(|_| nfsstat3::NFS3ERR_IO)?;

Expand Down
18 changes: 9 additions & 9 deletions src/backingstore/redis_data_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -192,9 +192,9 @@ impl DataStore for RedisDataStore {

async fn init_user_directory(&self, mount_path: &str) -> Result<(), DataStoreError> {
let mut conn = self.pool.get().map_err(|_| DataStoreError::ConnectionError)?;
let (namespace_id, hash_tag) = SharesFS::get_namespace_id_and_hash_tag().await;
let (namespace_id, community) = SharesFS::get_namespace_id_and_community().await;
let path = format!("/{}", namespace_id);
let key = format!("{}{}", hash_tag, mount_path);
let key = format!("{}{}", community, mount_path);
let exists_response: bool = conn.exists(&key).map_err(|_| DataStoreError::OperationFailed)?;

if exists_response {
Expand All @@ -206,11 +206,11 @@ impl DataStore for RedisDataStore {
let permissions = 777;
let score = if mount_path == "/" { 1.0 } else { 2.0 };

let nodes = format!("{}/{}_nodes", hash_tag, namespace_id);
let nodes = format!("{}/{}_nodes", community, namespace_id);
let key_exists: bool = conn.exists(&nodes).map_err(|_| DataStoreError::OperationFailed)?;

let fileid: u64 = if key_exists {
conn.incr(format!("{}/{}_next_fileid", hash_tag, namespace_id), 1)
conn.incr(format!("{}/{}_next_fileid", community, namespace_id), 1)
.map_err(|_| DataStoreError::OperationFailed)?
} else {
1
Expand All @@ -222,7 +222,7 @@ impl DataStore for RedisDataStore {

// Instead of using pipeline, execute commands individually
let _: () = conn.zadd(
format!("{}/{}_nodes", hash_tag, namespace_id),
format!("{}/{}_nodes", community, namespace_id),
mount_path,
score
).map_err(|_| DataStoreError::OperationFailed)?;
Expand Down Expand Up @@ -251,27 +251,27 @@ impl DataStore for RedisDataStore {

// In the init_user_directory function, modify the hset_multiple call:
let _: () = conn.hset_multiple(
format!("{}{}", hash_tag, mount_path),
format!("{}{}", community, mount_path),
&hash_fields
).map_err(|_| DataStoreError::OperationFailed)?;

// Set path to id mapping
let _: () = conn.hset(
format!("{}{}_path_to_id", hash_tag, path),
format!("{}{}_path_to_id", community, path),
mount_path,
fileid
).map_err(|_| DataStoreError::OperationFailed)?;

// Set id to path mapping
let _: () = conn.hset(
format!("{}{}_id_to_path", hash_tag, path),
format!("{}{}_id_to_path", community, path),
fileid.to_string(),
mount_path
).map_err(|_| DataStoreError::OperationFailed)?;

if fileid == 1 {
let _: () = conn.set(
format!("{}{}_next_fileid", hash_tag, path),
format!("{}{}_next_fileid", community, path),
1
).map_err(|_| DataStoreError::OperationFailed)?;
}
Expand Down
22 changes: 11 additions & 11 deletions src/backingstore/rocksdb_data_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,11 +53,11 @@ impl DataStore for RocksDBDataStore {
}

async fn init_user_directory(&self, mount_path: &str) -> Result<(), DataStoreError> {
let (namespace_id, hash_tag) = SharesFS::get_namespace_id_and_hash_tag().await;
let (namespace_id, community) = SharesFS::get_namespace_id_and_community().await;
debug!("namespace_id: {:?}", namespace_id);
debug!("hash_tag: {:?}", hash_tag);
debug!("community: {:?}", community);
let path = format!("/{}", namespace_id);
let key = format!("{}{}", hash_tag, mount_path);
let key = format!("{}{}", community, mount_path);
debug!("===============rocksdb init_user_directory({})", key);

// Check if the directory already exists
Expand All @@ -70,20 +70,20 @@ impl DataStore for RocksDBDataStore {
let permissions = 777;
let score = if mount_path == "/" { 1.0 } else { 2.0 };

let nodes = format!("{}/{}_nodes", hash_tag, namespace_id);
let nodes = format!("{}/{}_nodes", community, namespace_id);
debug!("===============rocksdb init_user_directory({}) nodes", nodes);
let key_exists: bool = self.db.get(&nodes).map_err(|_| DataStoreError::OperationFailed)?.is_some();
debug!("===============rocksdb init_user_directory({}) key_exists?", key_exists);

let next_fileid_key = format!("{}/{}_next_fileid", hash_tag, namespace_id);
let next_fileid_key = format!("{}/{}_next_fileid", community, namespace_id);
let fileid = self.incr(&next_fileid_key).await?;

let system_time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
let epoch_seconds = system_time.as_secs();
let epoch_nseconds = system_time.subsec_nanos();

// Add to sorted set (equivalent to Redis ZADD)
let nodes_key = format!("{}/{}_nodes:{}", hash_tag, namespace_id, mount_path);
let nodes_key = format!("{}/{}_nodes:{}", community, namespace_id, mount_path);
self.db.put(nodes_key.as_bytes(), score.to_string().as_bytes())
.map_err(|_| DataStoreError::OperationFailed)?;

Expand All @@ -110,26 +110,26 @@ impl DataStore for RocksDBDataStore {
];

// Use hset_multiple instead of individual puts
let key = format!("{}{}", hash_tag, mount_path);
let key = format!("{}{}", community, mount_path);
self.hset_multiple(&key, &hash_fields).await?;

// Set path to id mapping
let path_to_id_key = format!("{}{}_path_to_id", hash_tag, path);
let path_to_id_key = format!("{}{}_path_to_id", community, path);
self.db.put(
format!("{}:{}", path_to_id_key, mount_path).as_bytes(),
fileid_str.as_bytes()
).map_err(|_| DataStoreError::OperationFailed)?;

// Set id to path mapping
let id_to_path_key = format!("{}{}_id_to_path", hash_tag, path);
let id_to_path_key = format!("{}{}_id_to_path", community, path);
self.db.put(
format!("{}:{}", id_to_path_key, fileid_str).as_bytes(),
mount_path.as_bytes()
).map_err(|_| DataStoreError::OperationFailed)?;

if fileid == 1 {
self.db.put(
format!("{}{}_next_fileid", hash_tag, path).as_bytes(),
format!("{}{}_next_fileid", community, path).as_bytes(),
b"1"
).map_err(|_| DataStoreError::OperationFailed)?;
}
Expand Down Expand Up @@ -317,7 +317,7 @@ impl DataStore for RocksDBDataStore {
let mut results = Vec::new();

// The key format should match what we use in zadd
// In zadd we use: format!("{}/{}_nodes:{}", hash_tag, namespace_id, mount_path)
// In zadd we use: format!("{}/{}_nodes:{}", community, namespace_id, mount_path)
let prefix = format!("{}", key);

// Iterate over all entries with this prefix
Expand Down
4 changes: 2 additions & 2 deletions src/kernel/vfs/api.rs
Original file line number Diff line number Diff line change
Expand Up @@ -232,8 +232,8 @@ pub trait NFSFileSystem: Sync {
}

async fn get_id_from_path(&self, path: &str, data_store: &dyn DataStore) -> Result<fileid3, nfsstat3> {
let (namespace_id, hash_tag) = SharesFS::get_namespace_id_and_hash_tag().await;
let key = format!("{}/{}_path_to_id", hash_tag, namespace_id);
let (namespace_id, community) = SharesFS::get_namespace_id_and_community().await;
let key = format!("{}/{}_path_to_id", community, namespace_id);

let id_str = data_store.hget(&key, path)
.await
Expand Down
50 changes: 25 additions & 25 deletions src/sharesfs/directories.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,17 +18,17 @@ use std::os::unix::ffi::OsStrExt;
use tracing::debug;
impl SharesFS {
pub async fn rename_directory_file(&self, from_path: &str, to_path: &str) -> Result<(), nfsstat3> {
let (namespace_id, hash_tag) = SharesFS::get_namespace_id_and_hash_tag().await;
let (namespace_id, community) = SharesFS::get_namespace_id_and_community().await;
//Rename the metadata hashkey
let _ = self.data_store.rename(
&format!("{}{}", hash_tag, from_path),
&format!("{}{}", hash_tag, to_path)
&format!("{}{}", community, from_path),
&format!("{}{}", community, to_path)
).await.map_err(|_| nfsstat3::NFS3ERR_IO);
//Rename entries in hashset
debug!("rename_directory_file {:?} {:?}", from_path, to_path);

// Create a pattern to match all keys under the old path
let pattern = format!("{}{}{}", hash_tag, from_path, "/*");
let pattern = format!("{}{}{}", community, from_path, "/*");

// RETRIEVEall keys matching the pattern
debug!("Retrieve all keys matching the pattern {:?}", pattern);
Expand All @@ -52,7 +52,7 @@ pub async fn rename_directory_file(&self, from_path: &str, to_path: &str) -> Res
.map_err(|_| nfsstat3::NFS3ERR_IO);
}
//Rename entries in sorted set (_nodes)
let key = format!("{}/{}_nodes", hash_tag, namespace_id);
let key = format!("{}/{}_nodes", community, namespace_id);

// RETRIEVE all members of the sorted set with their scores
debug!("Retrieve all members of the sorted set with their scores {:?}", key);
Expand Down Expand Up @@ -130,8 +130,8 @@ pub async fn rename_directory_file(&self, from_path: &str, to_path: &str) -> Res
}
}
//Rename entries in path_to_id and id_to_path hash
let path_to_id_key = format!("{}/{}_path_to_id", hash_tag, namespace_id);
let id_to_path_key = format!("{}/{}_id_to_path", hash_tag, namespace_id);
let path_to_id_key = format!("{}/{}_path_to_id", community, namespace_id);
let id_to_path_key = format!("{}/{}_id_to_path", community, namespace_id);

// Retrieve all the members of path_to_id hash
debug!("Retrieve all the members of path_to_id hash for key {:?}", path_to_id_key);
Expand Down Expand Up @@ -208,7 +208,7 @@ pub async fn rename_directory_file(&self, from_path: &str, to_path: &str) -> Res
return Err(nfsstat3::NFS3ERR_IO);
}

let _ = self.data_store.hset_multiple(&format!("{}{}", hash_tag, to_path),
let _ = self.data_store.hset_multiple(&format!("{}{}", community, to_path),
&[
("change_time_secs", &epoch_seconds.to_string()),
("change_time_nsecs", &epoch_nseconds.to_string()),
Expand Down Expand Up @@ -267,7 +267,7 @@ pub async fn rename_directory_file(&self, from_path: &str, to_path: &str) -> Res
return Err(nfsstat3::NFS3ERR_IO); // Replace with appropriate nfsstat3 error
}

let _ = self.data_store.hset_multiple(&format!("{}{}", hash_tag, new_directory_path),
let _ = self.data_store.hset_multiple(&format!("{}{}", community, new_directory_path),
&[
("change_time_secs", &epoch_seconds.to_string()),
("change_time_nsecs", &epoch_nseconds.to_string()),
Expand All @@ -287,16 +287,16 @@ pub async fn rename_directory_file(&self, from_path: &str, to_path: &str) -> Res

pub async fn remove_directory_file(&self, path: &str) -> Result<(), nfsstat3> {

let (namespace_id, hash_tag) = SharesFS::get_namespace_id_and_hash_tag().await;
let (namespace_id, community) = SharesFS::get_namespace_id_and_community().await;
let pattern = format!("{}/*", path);
let sorted_set_key = format!("{}/{}_nodes", hash_tag, namespace_id);
let sorted_set_key = format!("{}/{}_nodes", community, namespace_id);
let match_found = self.get_member_keys(&pattern, &sorted_set_key).await?;
if match_found {
return Err(nfsstat3::NFS3ERR_NOTEMPTY);
}
debug!("remove_directory_file {:?}", path);
let dir_id = self.data_store.hget(
&format!("{}/{}_path_to_id", hash_tag, namespace_id),
&format!("{}/{}_path_to_id", community, namespace_id),
path
).await;

Expand All @@ -306,29 +306,29 @@ pub async fn remove_directory_file(&self, path: &str) -> Result<(), nfsstat3> {
};
// Remove the directory
// Remove the node from the sorted set
debug!("Remove the node from the sorted set {:?}", format!("{}/{}_nodes", hash_tag, namespace_id));
debug!("Remove the node from the sorted set {:?}", format!("{}/{}_nodes", community, namespace_id));
let _ = self.data_store.zrem(
&format!("{}/{}_nodes", hash_tag, namespace_id),
&format!("{}/{}_nodes", community, namespace_id),
path
).await.map_err(|_| nfsstat3::NFS3ERR_IO);

// Delete the metadata hash associated with the node
debug!("Delete the metadata hash associated with the node {:?}", format!("{}{}", hash_tag, path));
let _ = self.data_store.delete(&format!("{}{}", hash_tag, path))
debug!("Delete the metadata hash associated with the node {:?}", format!("{}{}", community, path));
let _ = self.data_store.delete(&format!("{}{}", community, path))
.await
.map_err(|_| nfsstat3::NFS3ERR_IO);

// Remove the directory from the path-to-id mapping
debug!("Remove the directory from the path-to-id mapping {:?}", format!("{}/{}_path_to_id", hash_tag, namespace_id));
debug!("Remove the directory from the path-to-id mapping {:?}", format!("{}/{}_path_to_id", community, namespace_id));
let _ = self.data_store.hdel(
&format!("{}/{}_path_to_id", hash_tag, namespace_id),
&format!("{}/{}_path_to_id", community, namespace_id),
path
).await.map_err(|_| nfsstat3::NFS3ERR_IO);

// Remove the directory from the id-to-path mapping
debug!("Remove the directory from the id-to-path mapping {:?}", format!("{}/{}_id_to_path", hash_tag, namespace_id));
debug!("Remove the directory from the id-to-path mapping {:?}", format!("{}/{}_id_to_path", community, namespace_id));
let _ = self.data_store.hdel(
&format!("{}/{}_id_to_path", hash_tag, namespace_id),
&format!("{}/{}_id_to_path", community, namespace_id),
&value
).await.map_err(|_| nfsstat3::NFS3ERR_IO);

Expand All @@ -337,8 +337,8 @@ pub async fn remove_directory_file(&self, path: &str) -> Result<(), nfsstat3> {
}

pub async fn handle_mkdir(&self, dirid: fileid3, dirname: &filename3) -> Result<(fileid3, fattr3), nfsstat3> {
let (namespace_id, hash_tag) = SharesFS::get_namespace_id_and_hash_tag().await;
let key1 = format!("{}/{}_id_to_path", hash_tag, namespace_id);
let (namespace_id, community) = SharesFS::get_namespace_id_and_community().await;
let key1 = format!("{}/{}_id_to_path", community, namespace_id);

// Get parent directory path from the share store
let parent_path: String = self.data_store.hget(
Expand All @@ -361,11 +361,11 @@ pub async fn remove_directory_file(&self, path: &str) -> Result<(), nfsstat3> {

debug!("mkdir: {:?}", new_dir_path);

// let key2 = format!("{}/{}_path_to_id", hash_tag, namespace_id);
// let key2 = format!("{}/{}_path_to_id", community, namespace_id);

// Check if directory already exists
let exists: bool = match self.data_store.zscore(
&format!("{}/{}_nodes", hash_tag, namespace_id),
&format!("{}/{}_nodes", community, namespace_id),
&new_dir_path
).await {
Ok(Some(_)) => true,
Expand All @@ -381,7 +381,7 @@ pub async fn remove_directory_file(&self, path: &str) -> Result<(), nfsstat3> {
}

// Create new directory ID
let key = format!("{}/{}_next_fileid", hash_tag, namespace_id);
let key = format!("{}/{}_next_fileid", community, namespace_id);

let new_dir_id: fileid3 = match self.data_store.incr(&key).await {
Ok(id) => {
Expand Down
Loading

0 comments on commit cc7c852

Please sign in to comment.