diff --git a/config.js b/config.js index 7faa54bbde..0a73d84e23 100644 --- a/config.js +++ b/config.js @@ -717,6 +717,8 @@ config.NSFS_WHITELIST = []; // NSFS_RESTORE_ENABLED can override internal autodetection and will force // the use of restore for all objects. config.NSFS_RESTORE_ENABLED = false; +config.NSFS_HEALTH_ENDPOINT_RETRY_COUNT = 3 +config.NSFS_HEALTH_ENDPOINT_RETRY_DELAY = 10 //Quota config.QUOTA_LOW_THRESHOLD = 80; diff --git a/docs/non_containerized_NSFS.md b/docs/non_containerized_NSFS.md index 4a923a6d8e..6800c3bbf6 100644 --- a/docs/non_containerized_NSFS.md +++ b/docs/non_containerized_NSFS.md @@ -226,7 +226,7 @@ NSFS Health status can be fetched using the command line. Run `--help` to get al NOTE - health script execution requires root permissions. ``` - sudo node usr/local/noobaa-core/src/cmd/health [--https_port,--all_account_details, --all_bucket_details] + sudo node usr/local/noobaa-core/src/cmd/health [--https_port, --all_account_details, --all_bucket_details] ``` output: @@ -244,12 +244,14 @@ NOTE - health script execution requires root permissions. { "name": "nsfs", "service_status": "active", - "pid": "1204" + "pid": "1204", + "error_type": "PERSISTENT" }, { "name": "rsyslog", "service_status": "inactive", - "pid": "0" + "pid": "0", + "error_type": "PERSISTENT" } ], "endpoint": { @@ -258,41 +260,50 @@ NOTE - health script execution requires root permissions. "response_code": 200, "response_message": "Endpoint running successfuly." }, - "total_fork_count": 0, - "running_workers": [] - } - }, - "invalid_accounts": [ - { - "name": "naveen", - "storage_path": "/tmp/nsfs_root_invalid/", - "code": "STORAGE_NOT_EXIST" - } - ], - "valid_accounts": [ - { - "name": "naveen", - "storage_path": "/tmp/nsfs_root" - } - ], - "invalid_buckets": [ - { - "name": "bucket1.json", - "config_path": "/etc/noobaa.conf.d/buckets/bucket1.json", - "code": "INVALID_CONFIG" + "total_fork_count": 1, + "running_workers": [ + "1" + ] }, - { - "name": "bucket3", - "storage_path": "/tmp/nsfs_root/bucket3", - "code": "STORAGE_NOT_EXIST" - } - ], - "valid_buckets": [ - { - "name": "bucket2", - "storage_path": "/tmp/nsfs_root/bucket2" - } - ] + "error_type": "TEMPORARY" + }, + "accounts_status": { + "invalid_accounts": [ + { + "name": "naveen", + "storage_path": "/tmp/nsfs_root_invalid/", + "code": "STORAGE_NOT_EXIST" + } + ], + "valid_accounts": [ + { + "name": "naveen", + "storage_path": "/tmp/nsfs_root" + } + ], + "error_type": "PERSISTENT" + }, + "buckets_status": { + "invalid_buckets": [ + { + "name": "bucket1.json", + "config_path": "/etc/noobaa.conf.d/buckets/bucket1.json", + "code": "INVALID_CONFIG" + }, + { + "name": "bucket3", + "storage_path": "/tmp/nsfs_root/bucket3", + "code": "STORAGE_NOT_EXIST" + } + ], + "valid_buckets": [ + { + "name": "bucket2", + "storage_path": "/tmp/nsfs_root/bucket2" + } + ], + "error_type": "PERSISTENT" + } } } ``` @@ -320,6 +331,8 @@ NOTE - health script execution requires root permissions. `valid_buckets`: List all the valid buckets if `all_bucket_details` flag is `true`. +`error_type` : This property could have two values, `PERSISTENT` and `TEMPORARY`, It means the retry could fix the issue or not. For `TEMPORARY` error types multiple retries are initiated from Noobaa side before updating the status with failed status. Right now only Noobaa endpoint has the error type `TEMPORARY`. + In this health output, `bucket2`'s storage path is invalid and the directory mentioned in `new_buckets_path` for `user1` is missing or not accessible. Endpoint curl command returns an error response(`"endpoint_response":404`) if one or more buckets point to an invalid bucket storage path. ### Health Error Codes diff --git a/src/cmd/health.js b/src/cmd/health.js index 77f948c548..9bb0e52fe9 100644 --- a/src/cmd/health.js +++ b/src/cmd/health.js @@ -89,6 +89,11 @@ const fork_response_code = { }, }; +const health_errors_tyes = { + PERSISTENT: 'PERSISTENT', + TEMPORARY: 'TEMPORARY', +} + //suppress aws sdk related commands. process.env.AWS_SDK_JS_SUPPRESS_MAINTENANCE_MODE_MESSAGE = '1'; @@ -129,24 +134,61 @@ class NSFSHealth { name: NSFS_SERVICE, service_status: service_status, pid: pid, + error_type: health_errors_tyes.PERSISTENT, }, { name: RSYSLOG_SERVICE, service_status: rsyslog.service_status, pid: rsyslog.pid, + error_type: health_errors_tyes.PERSISTENT, }], endpoint: { - endpoint_state + endpoint_state, + error_type: health_errors_tyes.TEMPORARY, }, - invalid_accounts: account_details.invalid_storages, - valid_accounts: account_details.valid_storages, - invalid_buckets: bucket_details.invalid_storages, - valid_buckets: bucket_details.valid_storages, + accounts_status: { + invalid_accounts: account_details.invalid_storages, + valid_accounts: account_details.valid_storages, + error_type: health_errors_tyes.PERSISTENT, + }, + buckets_status: { + invalid_buckets: bucket_details.invalid_storages, + valid_buckets: bucket_details.valid_storages, + error_type: health_errors_tyes.PERSISTENT, + } } }; + if (!this.all_account_details) { + delete health.checks.accounts_status; + } + if (!this.all_bucket_details) { + delete health.checks.buckets_status; + } return health; } + async get_endpoint_response() { + let endpoint_state; + try { + await P.retry({ + attempts: config.NSFS_HEALTH_ENDPOINT_RETRY_COUNT, + delay_ms: config.NSFS_HEALTH_ENDPOINT_RETRY_DELAY, + func: async () => { + endpoint_state = await this.get_endpoint_fork_response(); + if (endpoint_state.response.response_code === fork_response_code.NOT_RUNNING.response_code) { + throw new Error('Noobaa endpoint is not running, all the retries failed'); + } + } + }); + } catch(err) { + console.log('Error while pinging endpoint host :' + HOSTNAME + ', port ' + this.https_port, err); + return { + response: fork_response_code.NOT_RUNNING.response_code, + }; + } + return endpoint_state; + } + async get_error_code(nsfs_status, pid, rsyslog_status, endpoint_response_code) { if (nsfs_status !== "active" || pid === "0") { return health_errors.NSFS_SERVICE_FAILED; @@ -188,7 +230,7 @@ class NSFSHealth { } } - async get_endpoint_response() { + async get_endpoint_fork_response() { let url_path = '/total_fork_count'; const worker_ids = []; let total_fork_count = 0; @@ -257,24 +299,35 @@ class NSFSHealth { }; } - async get_bucket_storage_status(config_root) { +async get_bucket_storage_status(config_root) { const bucket_details = await this.get_storage_status(config_root, 'bucket', this.all_bucket_details); return bucket_details; } - async get_account_storage_status(config_root) { +async get_account_storage_status(config_root) { const account_details = await this.get_storage_status(config_root, 'account', this.all_account_details); return account_details; } - async get_storage_status(config_root, type, all_details) { +async get_storage_status(config_root, type, all_details) { const fs_context = this.get_root_fs_context(); - const entries = await nb_native().fs.readdir(fs_context, this.get_config_path(config_root, type)); - const config_files = entries.filter(entree => !native_fs_utils.isDirectory(entree) && entree.name.endsWith('.json')); + const config_root_type_path = this.get_config_path(config_root, type); const invalid_storages = []; const valid_storages = []; + //check for account and buckets dir paths + try { + await nb_native().fs.stat(fs_context, config_root_type_path); + } catch (err) { + dbg.log1(`Config root path missing ${type} folder in ${config_root_type_path}`); + return { + invalid_storages: invalid_storages, + valid_storages: valid_storages + }; + } + const entries = await nb_native().fs.readdir(fs_context, config_root_type_path); + const config_files = entries.filter(entree => !native_fs_utils.isDirectory(entree) && entree.name.endsWith('.json')); for (const config_file of config_files) { - const config_file_path = path.join(this.get_config_path(config_root, type), config_file.name); + const config_file_path = path.join(config_root_type_path, config_file.name); let config_data; let storage_path; try { @@ -315,7 +368,7 @@ class NSFSHealth { }; } - get_config_path(config_root, type) { +get_config_path(config_root, type) { return path.join(config_root, type === 'bucket' ? '/buckets' : '/accounts'); } } diff --git a/src/deploy/standalone/noobaa_rsyslog.conf b/src/deploy/standalone/noobaa_rsyslog.conf index eb9a3def19..3d101fb9a6 100644 --- a/src/deploy/standalone/noobaa_rsyslog.conf +++ b/src/deploy/standalone/noobaa_rsyslog.conf @@ -6,8 +6,7 @@ #### MODULES #### # The imjournal module bellow is now used as a message source instead of imuxsock. -module(load="imuxsock" # provides support for local system logging (e.g. via logger command) - SysSock.Use="off") # Turn off message reception via local log socket; +# $ModLoad imuxsock # Turn off message reception via local log socket; #$ModLoad imjournal # provides access to the systemd journal #$ModLoad imklog # reads kernel messages (the same are read from journald) #$ModLoad immark # provides --MARK-- message capability @@ -24,17 +23,17 @@ module(load="imuxsock" # provides support for local system logging (e.g. via #### GLOBAL DIRECTIVES #### # Where to place auxiliary files -global(workDirectory="/var/lib/rsyslog") +$WorkDirectory /var/lib/rsyslog # Use default timestamp format -module(load="builtin:omfile" Template="RSYSLOG_TraditionalFileFormat") +# $ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat # File syncing capability is disabled by default. This feature is usually not required, # not useful and an extreme performance hit #$ActionFileEnableSync on # Include all config files in /etc/rsyslog.d/ -include(file="/etc/rsyslog.d/*.conf" mode="optional") +# $IncludeConfig /etc/rsyslog.d/*.conf # Turn off message reception via local log socket; # local messages are retrieved through imjournal now. diff --git a/src/test/unit_tests/test_nc_nsfs_health.js b/src/test/unit_tests/test_nc_nsfs_health.js index f44716eb30..f99e4083ea 100644 --- a/src/test/unit_tests/test_nc_nsfs_health.js +++ b/src/test/unit_tests/test_nc_nsfs_health.js @@ -30,14 +30,16 @@ mocha.describe('nsfs nc health', function() { const config_root = path.join(tmp_fs_path, 'config_root_nsfs_health'); const root_path = path.join(tmp_fs_path, 'root_path_nsfs_health/'); + const config_root_invalid = path.join(tmp_fs_path, 'config_root_nsfs_health_invalid'); const accounts_schema_dir = 'accounts'; const buckets_schema_dir = 'buckets'; - let health; + let Health; mocha.before(async () => { await P.all(_.map([accounts_schema_dir, buckets_schema_dir], async dir => fs_utils.create_fresh_path(`${config_root}/${dir}`))); await fs_utils.create_fresh_path(root_path); + await fs_utils.create_fresh_path(config_root_invalid); }); mocha.after(async () => { fs_utils.folder_delete(`${config_root}`); @@ -52,14 +54,14 @@ mocha.describe('nsfs nc health', function() { const bucket1 = { name: bucket_name, path: new_buckets_path + '/bucket1' }; mocha.before(async () => { const https_port = 6443; - health = new NSFSHealth({ config_root, https_port }); + Health = new NSFSHealth({ config_root, https_port }); await fs_utils.create_fresh_path(new_buckets_path); await fs_utils.file_must_exist(new_buckets_path); await fs_utils.create_fresh_path(new_buckets_path + '/bucket1'); await fs_utils.file_must_exist(new_buckets_path + '/bucket1'); await write_config_file(config_root, accounts_schema_dir, acount_name, account1); await write_config_file(config_root, buckets_schema_dir, bucket_name, bucket1); - const get_service_memory_usage = sinon.stub(health, "get_service_memory_usage"); + const get_service_memory_usage = sinon.stub(Health, "get_service_memory_usage"); get_service_memory_usage.onFirstCall().returns(Promise.resolve(100)); }); @@ -71,127 +73,182 @@ mocha.describe('nsfs nc health', function() { }); mocha.it('Health all condition is success', async function() { - const get_service_state = sinon.stub(health, "get_service_state"); + const get_service_state = sinon.stub(Health, "get_service_state"); get_service_state.onFirstCall().returns(Promise.resolve({ service_status: 'active', pid: 100 })) .onSecondCall().returns(Promise.resolve({ service_status: 'active', pid: 200 })); - const get_endpoint_response = sinon.stub(health, "get_endpoint_response"); + const get_endpoint_response = sinon.stub(Health, "get_endpoint_response"); get_endpoint_response.onFirstCall().returns(Promise.resolve({response: {response_code: 'RUNNING', total_fork_count: 0}})); - health.all_account_details = true; - health.all_bucket_details = true; - const health_status = await health.nc_nsfs_health(); + Health.all_account_details = true; + Health.all_bucket_details = true; + const health_status = await Health.nc_nsfs_health(); assert.strictEqual(health_status.status, 'OK'); - assert.strictEqual(health_status.checks.invalid_buckets.length, 0); - assert.strictEqual(health_status.checks.valid_accounts.length, 1); - assert.strictEqual(health_status.checks.valid_accounts[0].name, 'account1'); - assert.strictEqual(health_status.checks.valid_buckets.length, 1); - assert.strictEqual(health_status.checks.valid_buckets[0].name, 'bucket1'); + assert.strictEqual(health_status.checks.buckets_status.invalid_buckets.length, 0); + assert.strictEqual(health_status.checks.accounts_status.valid_accounts.length, 1); + assert.strictEqual(health_status.checks.accounts_status.valid_accounts[0].name, 'account1'); + assert.strictEqual(health_status.checks.buckets_status.valid_buckets.length, 1); + assert.strictEqual(health_status.checks.buckets_status.valid_buckets[0].name, 'bucket1'); }); mocha.it('NSFS service is inactive', async function() { - health.get_service_state.restore(); - health.get_endpoint_response.restore(); - const get_service_state = sinon.stub(health, "get_service_state"); + Health.get_service_state.restore(); + Health.get_endpoint_response.restore(); + const get_service_state = sinon.stub(Health, "get_service_state"); get_service_state.onFirstCall().returns(Promise.resolve({ service_status: 'inactive', pid: 0 })) .onSecondCall().returns(Promise.resolve({ service_status: 'active', pid: 200 })); - const get_endpoint_response = sinon.stub(health, "get_endpoint_response"); + const get_endpoint_response = sinon.stub(Health, "get_endpoint_response"); get_endpoint_response.onFirstCall().returns(Promise.resolve({response: {response_code: 'RUNNING', total_fork_count: 0}})); - const health_status = await health.nc_nsfs_health(); + const health_status = await Health.nc_nsfs_health(); assert.strictEqual(health_status.status, 'NOTOK'); assert.strictEqual(health_status.error.error_code, 'NOOBAA_NSFS_SERVICE_FAILED'); }); mocha.it('NSFS rsyslog service is inactive', async function() { - health.get_service_state.restore(); - health.get_endpoint_response.restore(); - const get_service_state = sinon.stub(health, "get_service_state"); + Health.get_service_state.restore(); + Health.get_endpoint_response.restore(); + const get_service_state = sinon.stub(Health, "get_service_state"); get_service_state.onFirstCall().returns(Promise.resolve({ service_status: 'active', pid: 1000 })) .onSecondCall().returns(Promise.resolve({ service_status: 'inactive', pid: 0 })); - const get_endpoint_response = sinon.stub(health, "get_endpoint_response"); + const get_endpoint_response = sinon.stub(Health, "get_endpoint_response"); get_endpoint_response.onFirstCall().returns(Promise.resolve({response: {response_code: 'RUNNING', total_fork_count: 0}})); - const health_status = await health.nc_nsfs_health(); + const health_status = await Health.nc_nsfs_health(); assert.strictEqual(health_status.status, 'NOTOK'); assert.strictEqual(health_status.error.error_code, 'RSYSLOG_SERVICE_FAILED'); }); mocha.it('NSFS endpoint return error response is inactive', async function() { - health.get_service_state.restore(); - health.get_endpoint_response.restore(); - const get_service_state = sinon.stub(health, "get_service_state"); + Health.get_service_state.restore(); + Health.get_endpoint_response.restore(); + const get_service_state = sinon.stub(Health, "get_service_state"); get_service_state.onFirstCall().returns(Promise.resolve({ service_status: 'active', pid: 1000 })) .onSecondCall().returns(Promise.resolve({ service_status: 'active', pid: 2000 })); - const get_endpoint_response = sinon.stub(health, "get_endpoint_response"); + const get_endpoint_response = sinon.stub(Health, "get_endpoint_response"); get_endpoint_response.onFirstCall().returns(Promise.resolve({response: {response_code: 'MISSING_FORKS', total_fork_count: 3, running_workers: ['1', '3']}})); - const health_status = await health.nc_nsfs_health(); + const health_status = await Health.nc_nsfs_health(); assert.strictEqual(health_status.status, 'NOTOK'); assert.strictEqual(health_status.error.error_code, 'NSFS_ENDPOINT_FORK_MISSING'); }); mocha.it('NSFS account with invalid storage path', async function() { - health.get_service_state.restore(); - health.get_endpoint_response.restore(); + Health.get_service_state.restore(); + Health.get_endpoint_response.restore(); const account_invalid = { name: 'account_invalid', nsfs_account_config: { new_buckets_path: new_buckets_path + '/invalid' } }; await write_config_file(config_root, accounts_schema_dir, account_invalid.name, account_invalid); - const get_service_state = sinon.stub(health, "get_service_state"); + const get_service_state = sinon.stub(Health, "get_service_state"); get_service_state.onFirstCall().returns(Promise.resolve({ service_status: 'active', pid: 1000 })) .onSecondCall().returns(Promise.resolve({ service_status: 'active', pid: 2000 })); - const get_endpoint_response = sinon.stub(health, "get_endpoint_response"); + const get_endpoint_response = sinon.stub(Health, "get_endpoint_response"); get_endpoint_response.onFirstCall().returns(Promise.resolve({response: {response_code: 'RUNNING', total_fork_count: 0}})); - const health_status = await health.nc_nsfs_health(); + const health_status = await Health.nc_nsfs_health(); assert.strictEqual(health_status.status, 'OK'); - assert.strictEqual(health_status.checks.invalid_accounts.length, 1); - assert.strictEqual(health_status.checks.invalid_accounts[0].name, 'account_invalid'); - fs_utils.file_delete(path.join(config_root, accounts_schema_dir, account_invalid.name + '.json')); + assert.strictEqual(health_status.checks.accounts_status.invalid_accounts.length, 1); + assert.strictEqual(health_status.checks.accounts_status.invalid_accounts[0].name, 'account_invalid'); + await fs_utils.file_delete(path.join(config_root, accounts_schema_dir, account_invalid.name + '.json')); }); mocha.it('NSFS bucket with invalid storage path', async function() { - health.get_service_state.restore(); - health.get_endpoint_response.restore(); + Health.get_service_state.restore(); + Health.get_endpoint_response.restore(); const bucket_invalid = { name: 'bucket_invalid', path: new_buckets_path + '/bucket1/invalid' }; await write_config_file(config_root, buckets_schema_dir, bucket_invalid.name, bucket_invalid); - const get_service_state = sinon.stub(health, "get_service_state"); + const get_service_state = sinon.stub(Health, "get_service_state"); get_service_state.onFirstCall().returns(Promise.resolve({ service_status: 'active', pid: 1000 })) .onSecondCall().returns(Promise.resolve({ service_status: 'active', pid: 2000 })); - const get_endpoint_response = sinon.stub(health, "get_endpoint_response"); + const get_endpoint_response = sinon.stub(Health, "get_endpoint_response"); get_endpoint_response.onFirstCall().returns(Promise.resolve({response: {response_code: 'RUNNING', total_fork_count: 0}})); - const health_status = await health.nc_nsfs_health(); + const health_status = await Health.nc_nsfs_health(); assert.strictEqual(health_status.status, 'OK'); - assert.strictEqual(health_status.checks.invalid_buckets.length, 1); - assert.strictEqual(health_status.checks.invalid_buckets[0].name, 'bucket_invalid'); - fs_utils.file_delete(path.join(config_root, buckets_schema_dir, bucket_invalid.name + '.json')); + assert.strictEqual(health_status.checks.buckets_status.invalid_buckets.length, 1); + assert.strictEqual(health_status.checks.buckets_status.invalid_buckets[0].name, 'bucket_invalid'); + await fs_utils.file_delete(path.join(config_root, buckets_schema_dir, bucket_invalid.name + '.json')); }); mocha.it('NSFS invalid bucket schema json', async function() { - health.get_service_state.restore(); - health.get_endpoint_response.restore(); + Health.get_service_state.restore(); + Health.get_endpoint_response.restore(); const bucket_invalid_schema = { name: 'bucket_invalid_schema', path: new_buckets_path }; await write_config_file(config_root, buckets_schema_dir, bucket_invalid_schema.name, bucket_invalid_schema, "invalid"); - const get_service_state = sinon.stub(health, "get_service_state"); + const get_service_state = sinon.stub(Health, "get_service_state"); get_service_state.onFirstCall().returns(Promise.resolve({ service_status: 'active', pid: 1000 })) .onSecondCall().returns(Promise.resolve({ service_status: 'active', pid: 2000 })); - const get_endpoint_response = sinon.stub(health, "get_endpoint_response"); + const get_endpoint_response = sinon.stub(Health, "get_endpoint_response"); get_endpoint_response.onFirstCall().returns(Promise.resolve({response: {response_code: 'RUNNING', total_fork_count: 0}})); - const health_status = await health.nc_nsfs_health(); + const health_status = await Health.nc_nsfs_health(); assert.strictEqual(health_status.status, 'OK'); - assert.strictEqual(health_status.checks.invalid_buckets.length, 1); - assert.strictEqual(health_status.checks.invalid_buckets[0].name, 'bucket_invalid_schema.json'); - fs_utils.file_delete(path.join(config_root, buckets_schema_dir, bucket_invalid_schema.name + '.json')); + assert.strictEqual(health_status.checks.buckets_status.invalid_buckets.length, 1); + assert.strictEqual(health_status.checks.buckets_status.invalid_buckets[0].name, 'bucket_invalid_schema.json'); + await fs_utils.file_delete(path.join(config_root, buckets_schema_dir, bucket_invalid_schema.name + '.json')); }); mocha.it('NSFS invalid account schema json', async function() { - health.get_service_state.restore(); - health.get_endpoint_response.restore(); + Health.get_service_state.restore(); + Health.get_endpoint_response.restore(); const account_invalid_schema = { name: 'account_invalid_schema', path: new_buckets_path }; await write_config_file(config_root, accounts_schema_dir, account_invalid_schema.name, account_invalid_schema, "invalid"); - const get_service_state = sinon.stub(health, "get_service_state"); + const get_service_state = sinon.stub(Health, "get_service_state"); get_service_state.onFirstCall().returns(Promise.resolve({ service_status: 'active', pid: 1000 })) .onSecondCall().returns(Promise.resolve({ service_status: 'active', pid: 2000 })); - const get_endpoint_response = sinon.stub(health, "get_endpoint_response"); + const get_endpoint_response = sinon.stub(Health, "get_endpoint_response"); get_endpoint_response.onFirstCall().returns(Promise.resolve({response: {response_code: 'RUNNING', total_fork_count: 0}})); - const health_status = await health.nc_nsfs_health(); + const health_status = await Health.nc_nsfs_health(); assert.strictEqual(health_status.status, 'OK'); - assert.strictEqual(health_status.checks.invalid_accounts.length, 1); - assert.strictEqual(health_status.checks.invalid_accounts[0].name, 'account_invalid_schema.json'); - fs_utils.file_delete(path.join(config_root, buckets_schema_dir, account_invalid_schema.name + '.json')); + assert.strictEqual(health_status.checks.accounts_status.invalid_accounts.length, 1); + assert.strictEqual(health_status.checks.accounts_status.invalid_accounts[0].name, 'account_invalid_schema.json'); + await fs_utils.file_delete(path.join(config_root, accounts_schema_dir, account_invalid_schema.name + '.json')); + }); + + mocha.it('Health all condition is success, all_account_details is false', async function() { + Health.get_service_state.restore(); + Health.get_endpoint_response.restore(); + Health.all_account_details = false; + Health.all_bucket_details = true; + const get_service_state = sinon.stub(Health, "get_service_state"); + get_service_state.onFirstCall().returns(Promise.resolve({ service_status: 'active', pid: 100 })) + .onSecondCall().returns(Promise.resolve({ service_status: 'active', pid: 200 })); + const get_endpoint_response = sinon.stub(Health, "get_endpoint_response"); + get_endpoint_response.onFirstCall().returns(Promise.resolve({response: {response_code: 'RUNNING', total_fork_count: 0}})); + const health_status = await Health.nc_nsfs_health(); + assert.strictEqual(health_status.status, 'OK'); + assert.strictEqual(health_status.checks.buckets_status.invalid_buckets.length, 0); + assert.strictEqual(health_status.checks.buckets_status.valid_buckets.length, 1); + assert.strictEqual(health_status.checks.buckets_status.valid_buckets[0].name, 'bucket1'); + assert.strictEqual(health_status.checks.accounts_status, undefined); + }); + + mocha.it('Health all condition is success, all_bucket_details is false', async function() { + Health.get_service_state.restore(); + Health.get_endpoint_response.restore(); + Health.all_account_details = true; + Health.all_bucket_details = false; + const get_service_state = sinon.stub(Health, "get_service_state"); + get_service_state.onFirstCall().returns(Promise.resolve({ service_status: 'active', pid: 100 })) + .onSecondCall().returns(Promise.resolve({ service_status: 'active', pid: 200 })); + const get_endpoint_response = sinon.stub(Health, "get_endpoint_response"); + get_endpoint_response.onFirstCall().returns(Promise.resolve({response: {response_code: 'RUNNING', total_fork_count: 0}})); + const health_status = await Health.nc_nsfs_health(); + assert.strictEqual(health_status.status, 'OK'); + assert.strictEqual(health_status.checks.buckets_status, undefined); + assert.strictEqual(health_status.checks.accounts_status.valid_accounts.length, 1); + assert.strictEqual(health_status.checks.accounts_status.valid_accounts[0].name, 'account1'); + assert.strictEqual(health_status.checks.accounts_status.invalid_accounts.length, 0); + }); + + mocha.it('Config root path without bucket and account folders', async function() { + Health.get_service_state.restore(); + Health.get_endpoint_response.restore(); + Health.all_account_details = true; + Health.all_bucket_details = true; + Health.config_root = config_root_invalid; + const get_service_state = sinon.stub(Health, "get_service_state"); + get_service_state.onFirstCall().returns(Promise.resolve({ service_status: 'active', pid: 100 })) + .onSecondCall().returns(Promise.resolve({ service_status: 'active', pid: 200 })); + const get_endpoint_response = sinon.stub(Health, "get_endpoint_response"); + get_endpoint_response.onFirstCall().returns(Promise.resolve({response: {response_code: 'RUNNING', total_fork_count: 0}})); + const health_status = await Health.nc_nsfs_health(); + assert.strictEqual(health_status.status, 'OK'); + assert.strictEqual(health_status.checks.buckets_status.valid_buckets.length, 0); + assert.strictEqual(health_status.checks.buckets_status.invalid_buckets.length, 0); + assert.strictEqual(health_status.checks.accounts_status.valid_accounts.length, 0); + assert.strictEqual(health_status.checks.accounts_status.invalid_accounts.length, 0); }); }); });