diff --git a/README.md b/README.md
index 71e1bbe..74cd9b1 100644
--- a/README.md
+++ b/README.md
@@ -76,6 +76,62 @@ discovery_types: |
[ "crawler", "archive" ]
```
+### `tests`
+
+A list of tests to run during a scan.
+
+_Example:_
+
+```yaml
+tests: |
+ [ "common_files", "id_enumeration" ]
+```
+
+_Recommended tests:_
+
+| | | | |
+| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| **Test name** | **Description** | **Value** | **Detectable vulnerabilities** |
+| **Broken JWT Authentication** | Tests for secure implementation of JSON Web Token (JWT) in the application | `jwt` | - [Broken JWT Authentication](https://docs.brightsec.com/docs/broken-jwt-authentication) |
+| **Broken JWT Authentication** | Tests for secure implementation of JSON Web Token (JWT) in the application | `jwt` | - [Broken JWT Authentication](https://docs.brightsec.com/docs/broken-jwt-authentication) |
+| **Broken SAML Authentication** | Tests for secure implementation of SAML authentication in the application | `broken_saml_auth` | - [Broken SAML Authentication](https://docs.brightsec.com/docs/broken-saml-authentication) |
+| **Brute Force Login** | Tests for availability of commonly used credentials | `brute_force_login` | - [Brute Force Login](https://docs.brightsec.com/docs/brute-force-login) |
+| **Business Constraint Bypass** | Tests if the limitation of number of retrievable items via an API call is configured properly | `business_constraint_bypass` | - [Business Constraint Bypass](https://docs.brightsec.com/docs/business-constraint-bypass) |
+| **Client-Side XSS**
_(DOM Cross-Site Scripting)_ | Tests if various application DOM parameters are vulnerable to JavaScript injections | `dom_xss` | - [Reflective Cross-site scripting (rXSS)](https://docs.brightsec.com/docs/reflective-cross-site-scripting-rxss)
- [Persistent Cross-site scripting (pXSS)](https://docs.brightsec.com/docs/persistent-cross-site-scripting-pxss) |
+| **Common Files Exposure** | Tests if common files that should not be accessible are accessible | `common_files` | - [Exposed Common File](https://docs.brightsec.com/docs/exposed-common-file) |
+| **Cookie Security Check** | Tests if the application uses and implements cookies with secure attributes | `cookie_security` | - [Sensitive Cookie in HTTPS Session Without Secure Attribute](https://docs.brightsec.com/docs/sensitive-cookie-in-https-session-without-secure-attribute)
- [Sensitive Cookie Without HttpOnly Flag](https://docs.brightsec.com/docs/sensitive-cookie-without-httponly-flag)
- [Sensitive Cookie Weak Session ID](https://docs.brightsec.com/docs/sensitive-cookie-weak-session-id) |
+| **Cross-Site Request Forgery (CSRF)** | Tests application forms for vulnerable cross-site filling and submitting | `csrf` | - [Unauthorized Cross-Site Request Forgery (CSRF)](https://docs.brightsec.com/docs/unauthorized-cross-site-request-forgery-csrf)
- [Authorized Cross-Site Request Forgery (CSRF)](https://docs.brightsec.com/docs/authorized-cross-site-request-forgery-csrf) |
+| **Cross-Site Scripting (XSS)** | Tests if various application parameters are vulnerable to JavaScript injections | `xss` | - [Reflective Cross-Site Scripting (rXSS)](https://docs.brightsec.com/docs/reflective-cross-site-scripting-rxss)
- [Persistent Cross-Site Scripting (pXSS)](https://docs.brightsec.com/docs/persistent-cross-site-scripting-pxss) |
+| **Default Login Location** | Tests if login form location in the target application is easy to guess and accessible | `default_login_location` | - [Default Login Location](https://docs.brightsec.com/docs/default-login-location) |
+| **Directory Listing** | Tests if server-side directory listing is possible | `directory_listing` | - [Directory Listing](https://docs.brightsec.com/docs/directory-listing) |
+| **Email Header Injection** | Tests if it is possible to send emails to other addresses through the target application mailing server, which can lead to spam and phishing | `email_injection` | - [Email Header Injection](https://docs.brightsec.com/docs/email-header-injection) |
+| **Exposed AWS S3 Buckets Details**
_(Open Buckets)_ | Tests if exposed AWS S3 links lead to anonymous read access to the bucket | `open_buckets` | - [Exposed AWS S3 Buckets Details](https://docs.brightsec.com/docs/open-bucket) |
+| **Exposed Database Details**
_(Open Database)_ | Tests if exposed database connection strings are open to public connections | `open_buckets` | - [Exposed Database Details](https://docs.brightsec.com/docs/open-database)
- [Exposed Database Connection String](https://docs.brightsec.com/docs/exposed-database-connection-string) |
+| **Full Path Disclosure (FPD)** | Tests if various application parameters are vulnerable to exposure of errors that include full webroot path | `full_path_disclosure` | - [Full Path Disclosure](https://docs.brightsec.com/docs/full-path-disclosure) |
+| **Headers Security Check** | Tests for proper Security Headers configuration | `header_security` | - [Misconfigured Security Headers](https://docs.brightsec.com/docs/misconfigured-security-headers)
- [Missing Security Headers](https://docs.brightsec.com/docs/missing-security-headers)
- [Insecure Content Secure Policy Configuration](https://docs.brightsec.com/docs/insecure-content-secure-policy-configuration) |
+| **HTML Injection** | Tests if various application parameters are vulnerable to HTML injection | `html_injection` | - [HTML Injection](https://docs.brightsec.com/docs/html-injection) |
+| **Improper Assets Management** | Tests if older or development versions of API endpoints are exposed and can be used to get unauthorized access to data and privileges | `improper_asset_management` | - [Improper Assets Management](https://docs.brightsec.com/docs/improper-assets-management) |
+| **Insecure HTTP Method**
_(HTTP Method Fuzzer)_ | Tests enumeration of possible HTTP methods for vulnerabilities | `http_method_fuzzing` | - [Insecure HTTP Method](https://docs.brightsec.com/docs/insecure-http-method) |
+| **Insecure TLS Configuration** | Tests SSL/TLS ciphers and configurations for vulnerabilities | `insecure_tls_configuration` | - [Insecure TLS Configuration](https://docs.brightsec.com/docs/insecure-tls-configuration) |
+| **Known JavaScript Vulnerabilities**
_(JavaScript Vulnerabilities Scanning)_ | Tests for known JavaScript component vulnerabilities | `retire_js` | - [JavaScript Component with Known Vulnerabilities](https://docs.brightsec.com/docs/javascript-component-with-known-vulnerabilities) |
+| **Known WordPress Vulnerabilities**
_(WordPress Scan)_ | Tests for known WordPress vulnerabilities and tries to enumerate a list of users | `wordpress` | - [WordPress Component with Known Vulnerabilities](https://docs.brightsec.com/docs/wordpress-component-with-known-vulnerabilities) |
+| **LDAP Injection** | Tests if various application parameters are vulnerable to unauthorized LDAP access | `ldapi` | - [LDAP Injection](https://docs.brightsec.com/docs/ldap-injection)
- [LDAP Error](https://docs.brightsec.com/docs/ldap-error) |
+| **Local File Inclusion (LFI)** | Tests if various application parameters are vulnerable to loading of unauthorized local system resources | `lfi` | - [Local File Inclusion (LFI)](https://docs.brightsec.com/docs/local-file-inclusion-lfi) |
+| **Mass Assignment** | Tests if it is possible to create requests with additional parameters to gain privilege escalation | `mass_assignment` | - [Mass Assignment](https://docs.brightsec.com/docs/mass-assignment) |
+| **OS Command Injection** | Tests if various application parameters are vulnerable to Operation System (OS) commands injection | `osi` | - [OS Command Injection](https://docs.brightsec.com/docs/os-command-injection) |
+| **Prototype Pollution** | Tests if it is possible to inject properties into existing JavaScript objects | `proto_pollution` | - [Prototype Pollution](https://docs.brightsec.com/docs/prototype-pollution) |
+| **Remote File Inclusion (RFI)** | Tests if various application parameters are vulnerable to loading of unauthorized remote system resources | `rfi` | - [Remote File Inclusion (RFI)](https://docs.brightsec.com/docs/remote-file-inclusion-rfi) |
+| **Secret Tokens Leak** | Tests for exposure of secret API tokens or keys in the target application | `secret_tokens` | - [Secret Tokens Leak](https://docs.brightsec.com/docs/secret-tokens-leak) |
+| **Server Side Template Injection (SSTI)** | Tests if various application parameters are vulnerable to server-side code execution | `ssti` | - [Server Side Template Injection (SSTI)](https://docs.brightsec.com/docs/server-side-template-injection-ssti) |
+| **Server Side Request Forgery (SSRF)** | Tests if various application parameters are vulnerable to internal resources access | `ssrf` | - [Server Side Request Forgery (SSRF)](https://docs.brightsec.com/docs/server-side-request-forgery-ssrf) |
+| **SQL Injection (SQLI)** | SQL Injection tests vulnerable parameters for SQL database access | `sqli` | - [SQL Injection: Blind Boolean Based](https://docs.brightsec.com/docs/sql-injection-blind-boolean-based)
- [SQL Injection: Blind Time Based](https://docs.brightsec.com/docs/sql-injection-blind-time-based)
- [SQL Injection](https://docs.brightsec.com/docs/sql-injection)
- [SQL Database Error Message in Response](https://docs.brightsec.com/docs/sql-database-error-message-in-response) |
+| **Unrestricted File Upload** | Tests if file upload mechanisms are validated properly and denies upload of malicious content | `file_upload` | - [Unrestricted File Upload](https://docs.brightsec.com/docs/unrestricted-file-upload) |
+| **Unsafe Date Range**
_(Date Manipulation)_ | Tests if date ranges are set and validated properly | `date_manipulation` | - [Unsafe Date Range](https://docs.brightsec.com/docs/unsafe-date-range) |
+| **Unsafe Redirect**
_(Unvalidated Redirect)_ | Tests if various application parameters are vulnerable to injection of a malicious link which can redirect a user without validation | `unvalidated_redirect` | - [Unsafe Redirect](https://docs.brightsec.com/docs/unsafe-redirect) |
+| **User ID Enumeration** | Tests if it is possible to collect valid user ID data by interacting with the target application | `id_enumeration` | - [Enumerable Integer-Based ID](https://docs.brightsec.com/docs/enumerable-integer-based-id) |
+| **Version Control System Data Leak** | Tests if it is possible to access Version Control System (VCS) resources | `version_control_systems` | - [Version Control System Data Leak](https://docs.brightsec.com/docs/version-control-system-data-leak) |
+| **XML External Entity Injection** | Tests if various XML parameters are vulnerable to XML parsing of unauthorized external entities | `xxe` | - [XML External Entity Injection](https://docs.brightsec.com/docs/xml-external-entity-injection) |
+
### `file_id`
**Required** if the discovery type is set to `archive` or `oas`. ID of a HAR-file or an OpenAPI schema you want to use for a scan. You can get the ID of an uploaded HAR-file or an OpenAPI schema in the **Storage** section on [app.neuralegion.com](https://app.neuralegion.com/login).
diff --git a/action.yml b/action.yml
index ef39707..8b105aa 100644
--- a/action.yml
+++ b/action.yml
@@ -38,6 +38,9 @@ inputs:
description: 'Scan Name'
default: 'GitHub Scan'
required: false
+ tests:
+ description: 'A list of tests which you want to run during a scan.'
+ required: false
outputs:
url:
diff --git a/src/config.ts b/src/config.ts
new file mode 100644
index 0000000..e5b7f01
--- /dev/null
+++ b/src/config.ts
@@ -0,0 +1,120 @@
+import { Discovery, validateDiscovery } from './discovery';
+import { TestType, validateTests } from './tests';
+import { URL } from 'url';
+
+export interface RequestExclusion {
+ patterns?: string[];
+ methods?: string[];
+}
+
+export interface Exclusions {
+ params?: string[];
+ requests?: RequestExclusion[];
+}
+
+export interface Config {
+ name: string;
+ discoveryTypes: Discovery[];
+ exclusions?: Exclusions;
+ module?: string;
+ crawlerUrls?: string[];
+ fileId?: string;
+ hostsFilter?: string[];
+ tests?: TestType[];
+}
+
+const invalidUrlProtocols: ReadonlySet = new Set([
+ 'javascript:',
+ 'file:',
+ 'data:',
+ 'mailto:',
+ 'ftp:',
+ 'blob:',
+ 'about:',
+ 'ssh:',
+ 'tel:',
+ 'view-source:',
+ 'ws:',
+ 'wss:'
+]);
+
+export const isValidUrl = (url: string) => {
+ try {
+ const { protocol } = new URL(url);
+
+ return !invalidUrlProtocols.has(protocol);
+ } catch {
+ return false;
+ }
+};
+
+function validateCrawlerUrls(
+ crawlerUrls: string[] | undefined,
+ discoveryTypes: Discovery[]
+) {
+ if (crawlerUrls) {
+ if (!discoveryTypes.includes(Discovery.CRAWLER)) {
+ throw new Error(
+ `Invalid discovery. When specifying a crawler URLs, the discovery type must be "crawler". The current discovery types are: ${discoveryTypes.join(
+ ', '
+ )}`
+ );
+ }
+
+ if (!crawlerUrls.length) {
+ throw new Error('No crawler URLs configured.');
+ }
+ } else {
+ if (discoveryTypes.includes(Discovery.CRAWLER)) {
+ throw new Error(
+ `Invalid discovery. When setting a discovery type to either "crawler", the crawler URLs must be provided.`
+ );
+ }
+ }
+}
+
+function validateFileId(
+ fileId: string | undefined,
+ discoveryTypes: Discovery[]
+) {
+ if (fileId) {
+ if (
+ !(
+ discoveryTypes.includes(Discovery.OAS) ||
+ discoveryTypes.includes(Discovery.ARCHIVE)
+ )
+ ) {
+ throw new Error(
+ `Invalid discovery. When specifying a file ID, the discovery type must be either "oas" or "archive". The current discovery types are: ${discoveryTypes.join(
+ ', '
+ )}`
+ );
+ }
+ } else {
+ if (
+ discoveryTypes.includes(Discovery.OAS) ||
+ discoveryTypes.includes(Discovery.ARCHIVE)
+ ) {
+ throw new Error(
+ `Invalid discovery. When setting a discovery type to either "oas" or "archive", the file ID must be provided.`
+ );
+ }
+ }
+}
+
+export const validateConfig = ({
+ fileId,
+ crawlerUrls,
+ discoveryTypes,
+ tests
+}: Config) => {
+ validateDiscovery(discoveryTypes);
+
+ validateFileId(fileId, discoveryTypes);
+
+ validateCrawlerUrls(crawlerUrls, discoveryTypes);
+
+ if (tests) {
+ validateTests(tests);
+ }
+};
diff --git a/src/discovery.ts b/src/discovery.ts
new file mode 100644
index 0000000..913d136
--- /dev/null
+++ b/src/discovery.ts
@@ -0,0 +1,49 @@
+export enum Discovery {
+ ARCHIVE = 'archive',
+ CRAWLER = 'crawler',
+ OAS = 'oas'
+}
+
+export const validateDiscovery = (discoveryTypes: Discovery[]) => {
+ if (discoveryTypes.some((x: Discovery) => !isValidDiscovery(x))) {
+ throw new Error('Unknown discovery type supplied.');
+ }
+
+ const uniqueDiscoveryTypes = new Set(discoveryTypes);
+
+ if (uniqueDiscoveryTypes.size !== discoveryTypes.length) {
+ throw new Error('Discovery contains duplicate values.');
+ }
+
+ if (uniqueDiscoveryTypes.size !== 1) {
+ disallowDiscoveryCombination(uniqueDiscoveryTypes);
+ }
+};
+
+const isValidDiscovery = (x: Discovery) => Object.values(Discovery).includes(x);
+
+const disallowDiscoveryCombination = (discoveryTypes: Set): void => {
+ const disallowedCombinations = getDisallowedDiscoveryCombination([
+ ...discoveryTypes
+ ]);
+
+ if (disallowedCombinations.length) {
+ const [firstInvalidCombination]: [Discovery, readonly Discovery[]][] =
+ disallowedCombinations;
+
+ throw new Error(
+ `The discovery list cannot include both ${
+ firstInvalidCombination?.[0]
+ } and any of ${firstInvalidCombination?.[1].join(', ')} simultaneously.`
+ );
+ }
+};
+
+const disallowedDiscoveryCombinations = new Map([
+ [Discovery.OAS, [Discovery.CRAWLER, Discovery.ARCHIVE]]
+]);
+
+const getDisallowedDiscoveryCombination = (discoveryTypes: Discovery[]) =>
+ [...disallowedDiscoveryCombinations].filter(
+ ([x]: [Discovery, readonly Discovery[]]) => discoveryTypes.includes(x)
+ );
diff --git a/src/index.ts b/src/index.ts
index 44804fd..0530be2 100644
--- a/src/index.ts
+++ b/src/index.ts
@@ -1,25 +1,8 @@
-import * as core from '@actions/core';
+import { TestType } from './tests';
+import { Discovery } from './discovery';
+import { Config, RequestExclusion, validateConfig } from './config';
import { HttpClient } from '@actions/http-client';
-
-interface RequestExclusion {
- patterns: string[];
- methods: string[];
-}
-
-interface Exclusions {
- params?: string[];
- requests?: RequestExclusion[];
-}
-
-interface NewScan {
- name: string;
- discoveryTypes: string[];
- exclusions?: Exclusions;
- module?: string;
- crawlerUrls?: string[];
- fileId?: string;
- hostsFilter?: string[];
-}
+import * as core from '@actions/core';
interface Scan {
id: string;
@@ -44,7 +27,8 @@ const fileId = core.getInput('file_id');
const crawlerUrls = getArray('crawler_urls');
const excludedParams = getArray('exclude_params');
const excludedEntryPoints = getArray('exclude_entry_points');
-const discoveryTypesIn = getArray('discovery_types');
+const tests = getArray('tests');
+const discoveryTypesIn = getArray('discovery_types');
const module_in = core.getInput('module');
const hostsFilter = getArray('hosts_filter');
const type = core.getInput('type');
@@ -81,11 +65,11 @@ const retest = async (uuid: string, scanName?: string) => {
}
};
-const create = async (scan: NewScan) => {
+const create = async (config: Config) => {
try {
const response = await client.postJson(
`${baseUrl}/api/v1/scans`,
- scan
+ config
);
if (response.statusCode < 300 && response.result) {
@@ -110,7 +94,8 @@ if (restartScanID) {
discoveryTypesIn ||
module_in ||
hostsFilter ||
- type
+ type ||
+ tests
)
) {
retest(restartScanID, name);
@@ -122,19 +107,29 @@ if (restartScanID) {
} else {
const module = module_in || 'dast';
const discoveryTypes = !discoveryTypesIn?.length
- ? ['archive']
+ ? [Discovery.ARCHIVE]
: discoveryTypesIn;
-
- create({
+ const uniqueTests = tests ? [...new Set(tests)] : undefined;
+ const config: Config = {
name,
discoveryTypes,
module,
crawlerUrls,
fileId,
hostsFilter,
+ tests: uniqueTests,
exclusions: {
requests: excludedEntryPoints,
params: excludedParams
}
- });
+ };
+
+ try {
+ validateConfig(config);
+ } catch (e: any) {
+ core.setFailed(e.message);
+ throw e;
+ }
+
+ create(config);
}
diff --git a/src/tests.ts b/src/tests.ts
new file mode 100644
index 0000000..0d37e93
--- /dev/null
+++ b/src/tests.ts
@@ -0,0 +1,113 @@
+import * as core from '@actions/core';
+
+export enum TestType {
+ ANGULAR_CSTI = 'angular_csti',
+ BACKUP_LOCATIONS = 'backup_locations',
+ BROKEN_SAML_AUTH = 'broken_saml_auth',
+ BRUTE_FORCE_LOGIN = 'brute_force_login',
+ BUSINESS_CONSTRAINT_BYPASS = 'business_constraint_bypass',
+ COMMON_FILES = 'common_files',
+ COOKIE_SECURITY = 'cookie_security',
+ CSRF = 'csrf',
+ CVE = 'cve_test',
+ DATE_MANIPULATION = 'date_manipulation',
+ DEFAULT_LOGIN_LOCATION = 'default_login_location',
+ DIRECTORY_LISTING = 'directory_listing',
+ DOM_XSS = 'dom_xss',
+ EMAIL_INJECTION = 'email_injection',
+ EXCESSIVE_DATA_EXPOSURE = 'excessive_data_exposure',
+ EXPOSED_COUCH_DB_APIS = 'exposed_couch_db_apis',
+ FILE_UPLOAD = 'file_upload',
+ FULL_PATH_DISCLOSURE = 'full_path_disclosure',
+ GRAPHQL_INTROSPECTION = 'graphql_introspection',
+ HEADER_SECURITY = 'header_security',
+ HRS = 'hrs',
+ HTML_INJECTION = 'html_injection',
+ HTTP_METHOD_FUZZING = 'http_method_fuzzing',
+ HTTP_RESPONSE_SPLITTING = 'http_response_splitting',
+ ID_ENUMERATION = 'id_enumeration',
+ IMPROPER_ASSET_MANAGEMENT = 'improper_asset_management',
+ INSECURE_TLS_CONFIGURATION = 'insecure_tls_configuration',
+ JWT = 'jwt',
+ LDAPI = 'ldapi',
+ LFI = 'lfi',
+ LRRL = 'lrrl',
+ MASS_ASSIGNMENT = 'mass_assignment',
+ NOSQL = 'nosql',
+ OPEN_BUCKETS = 'open_buckets',
+ OPEN_DATABASE = 'open_database',
+ OSI = 'osi',
+ PROTO_POLLUTION = 'proto_pollution',
+ RETIRE_JS = 'retire_js',
+ RFI = 'rfi',
+ S3_TAKEOVER = 'amazon_s3_takeover',
+ SECRET_TOKENS = 'secret_tokens',
+ SERVER_SIDE_JS_INJECTION = 'server_side_js_injection',
+ SQLI = 'sqli',
+ SSRF = 'ssrf',
+ SSTI = 'ssti',
+ UNVALIDATED_REDIRECT = 'unvalidated_redirect',
+ VERSION_CONTROL_SYSTEMS = 'version_control_systems',
+ WEBDAV = 'webdav',
+ WORDPRESS = 'wordpress',
+ XPATHI = 'xpathi',
+ XSS = 'xss',
+ XXE = 'xxe'
+}
+
+export const expensiveTests: readonly TestType[] = [
+ TestType.BUSINESS_CONSTRAINT_BYPASS,
+ TestType.CVE,
+ TestType.DATE_MANIPULATION,
+ TestType.EXCESSIVE_DATA_EXPOSURE,
+ TestType.ID_ENUMERATION,
+ TestType.LRRL,
+ TestType.MASS_ASSIGNMENT,
+ TestType.RETIRE_JS,
+ // not implemented yet by the engine
+ TestType.ANGULAR_CSTI,
+ TestType.BACKUP_LOCATIONS,
+ TestType.EXPOSED_COUCH_DB_APIS,
+ TestType.HTTP_RESPONSE_SPLITTING,
+ TestType.HRS
+];
+
+export const exclusiveTests: readonly TestType[] = [TestType.LRRL];
+
+export const isValidTest = (test: TestType) =>
+ Object.values(TestType).includes(test);
+
+export const hasExpensiveTests = (tests: TestType[]) =>
+ tests.some(x => expensiveTests.includes(x));
+
+export const hasExclusiveTests = (tests: TestType[]) =>
+ tests.some(x => exclusiveTests.includes(x)) && tests.length !== 1;
+
+export const validateTests = (uniqueTests: TestType[]): void => {
+ const invalidTests = uniqueTests.filter(x => !isValidTest(x));
+
+ if (invalidTests.length) {
+ throw new Error(
+ `${invalidTests.join(
+ ', '
+ )} tests are invalid. Please re-configure the scan.`
+ );
+ }
+
+ if (hasExclusiveTests(uniqueTests)) {
+ const chosenTests = uniqueTests.filter(x => exclusiveTests.includes(x));
+ throw new Error(
+ `${chosenTests.join(
+ ', '
+ )} tests are mutually exclusive with other tests. Please re-configure the scan.`
+ );
+ }
+
+ if (hasExpensiveTests(uniqueTests)) {
+ const chosenTests = uniqueTests.filter(x => expensiveTests.includes(x));
+ const warningMessage = `${chosenTests.join(
+ ', '
+ )} tests are expensive. Please use them with caution.`;
+ core.warning(warningMessage);
+ }
+};