diff --git a/doc/source/user_documentation.rst b/doc/source/user_documentation.rst index 8843f4c64..d46896a0f 100644 --- a/doc/source/user_documentation.rst +++ b/doc/source/user_documentation.rst @@ -2547,3 +2547,281 @@ The code for neural network optimization through evolutionary algorithm used in .. Adding cell classes .. -------------------- +Running a Batch Job (Beta) +=================== + +The NetPyNE batchtools subpackage provides a method of automating job submission and reporting:: + + + batch<-->\ /---> configuration_0 >---\ + \ / specs---\ + \<--->dispatcher_0 sim_0 + \ \ comm ---/ + \ \---< results_0 <---/ + \ + \ /---> configuration_1 >---\ + \ / specs---\ + \<--->dispatcher_1 sim_1 + \ \ comm ---/ + \ \---< results_1 <---/ + \ + \ + ... + + + +1. Setting up batchtools +----- +Beyond the necessary dependency installations for NetPyNE and NEURON, several additional `pip` installations are required. + +The NetPyNE installation should be handled as a development installation of the repository branch `batch`:: + + git clone https://github.com/Neurosim-lab/netpyne.git + cd netpyne + git checkout batch + pip install -e . + +The batchtools installation either:: + + pip install -u batchtk + +or a development install (recommended):: + + git clone https://github.com/jchen6727/batchtk.git + cd batchtk + pip install -e . + +Ray is a dependency for batchtools, and should be installed with the following command:: + + pip install -u ray[default] + +2. Examples +----- +Examples of NetPyNE batchtools usage can be found in the ``examples`` directory `here `_. + +Examples of the underlying batchtk package can be in the ``examples`` directory `here `_. + +3. Retrieving batch configuration values through the ``specs`` object +----- +Each simulation is able to retrieve relevant configurations through the ``specs`` object, and communicate with +the dispatcher through the ``comm`` object. + +importing the relevant objects:: + + from netpyne.batchtools import specs, comm + cfg = specs.SimConfig() # create a SimConfig object + netParams = specs.NetParams() # create a netParams object + +``netpyne.batchtools.specs`` behaves similarly to ``netpyne.sim.specs`` except in the following cases: + +* ``netpyne.batchtools.specs`` automatically captures relevant configuration mappings created by the ``dispatcher`` upon initialization + + * these mappings can be retrieved via ``specs.get_mappings()`` + +* the SimConfig object created by ``netpyne.batch.specs.SimConfig()`` will update itself with relevant configuration mappings through the ``update()`` method:: + + from netpyne.batchtools import specs # import the custom batch specs + cfg = specs.SimConfig() # create a SimConfig object + cfg.update() # update the cfg object with any relevant mappings for this particular batch job + +The ``update`` method will update the ``SimConfig`` object with the configuration mappings captured in ``specs`` (see: ``specs.get_mappings()``) + +This replaces the previous idiom for updating the SimConfig object with mappings from the batched job submission:: + + try: + from __main__ import cfg # import SimConfig object with params from parent module + except: + from cfg import cfg # if no simConfig in parent module, import directly from tut8_cfg module + + + +4. Communicating results to the ``dispatcher`` with the ``comm`` object +----- + +Prior batched simulations relied on ``.pkl`` files to communicate data. The ``netpyne.batch`` subpackage uses a specific ``comm`` object to send custom data back +The ``comm`` object determines the method of communication based on the batch job submission type. + +In terms of the simulation, the following functions are available to the user: + +* **comm.initialize()**: establishes a connection with the batch ``dispatcher`` for sending data + +* **comm.send()**: sends ```` to the batch ``dispatcher`` + * for ``search`` jobs, it is important to match the data sent with the metric specified in the search function + +* **comm.close()**: closes and cleans up the connection with the batch ``dispatcher`` + +5. Specifying a batch job +----- +Batch job handling is implemented with methods from ``netpyne.batchtools.search`` + +**search**:: + + def search(job_type: str, # the submission engine to run a single simulation (e.g. 'sge', 'sh') + comm_type: str, # the method of communication between host dispatcher and the simulation (e.g. 'socket', 'filesystem') + run_config: Dict, # batch configuration, (keyword: string pairs to customize the submit template) + params: Dict, # search space (dictionary of parameter keys: tune search spaces) + algorithm: Optional[str] = "variant_generator", # search algorithm to use, see SEARCH_ALG_IMPORT for available options + label: Optional[str] = 'search', # label for the search + output_path: Optional[str] = '../batch', # directory for storing generated files + checkpoint_path: Optional[str] = '../ray', # directory for storing checkpoint files + max_concurrent: Optional[int] = 1, # number of concurrent trials to run at one time + batch: Optional[bool] = True, # whether concurrent trials should run synchronously or asynchronously + num_samples: Optional[int] = 1, # number of trials to run + metric: Optional[str] = "loss", # metric to optimize (this should match some key: value pair in the returned data + mode: Optional[str] = "min", # either 'min' or 'max' (whether to minimize or maximize the metric + algorithm_config: Optional[dict] = None, # additional configuration for the search algorithm + ) -> tune.ResultGrid: # results of the search + +The basic search implemented with the ``search`` function uses ``ray.tune`` as the search algorithm backend, returning a ``tune.ResultGrid`` which can be used to evaluate the search space and results. It takes the following parameters; + +* **job_type**: either "``sge``" or "``sh``", specifying how the job should be submitted, "``sge``" will submit batch jobs through the Sun Grid Engine. "``sh``" will submit bach jobs through the shell on a local machine +* **comm_type**: either "``socket``" or "``filesystem``", specifying how the job should communicate with the dispatcher +* **run_config**: a dictionary of keyword: string pairs to customize the submit template, the expected keyword: string pairs are dependent on the job_type:: + + ======= + sge + ======= + queue: the queue to submit the job to (#$ -q {queue}) + cores: the number of cores to request for the job (#$ -pe smp {cores}) + vmem: the amount of memory to request for the job (#$ -l h_vmem={vmem}) + realtime: the amount of time to request for the job (#$ -l h_rt={realtime}) + command: the command to run for the job + + example: + run_config = { + 'queue': 'cpu.q', # request job to be run on the 'cpu.q' queue + 'cores': 8, # request 8 cores for the job + 'vmem': '8G', # request 8GB of memory for the job + 'realtime': '24:00:00', # set timeout of the job to 24 hours + 'command': 'mpiexec -n $NSLOTS -hosts $(hostname) nrniv -python -mpi init.py' + } # set the command to be run to 'mpiexec -n $NSLOTS -hosts $(hostname) nrniv -python -mpi init.py' + + ======= + sh + ======= + command: the command to run for the job + + example: + run_config = { + 'command': 'mpiexec -n 8 nrniv -python -mpi init.py' + } # set the command to be run + +* **params**: a dictionary of config values to perform the search over. The keys of the dictionary should match the keys of the config object to be updated. Lists or numpy generators >2 values will force a grid search over the values; otherwise, a list of two values will create a uniform distribution sample space. + + **usage 1**: updating a constant value specified in the ``SimConfig`` object :: + + # take a config object with the following parameter ``foo`` + cfg = specs.SimConfig() + cfg.foo = 0 + cfg.update() + + # specify a search space for ``foo`` such that a simulation will run with: + # cfg.foo = 0 + # cfg.foo = 1 + # cfg.foo = 2 + # ... + # cfg.foo = 9 + + # using: + params = { + 'foo': range(10) + } + + **usage 2**: updating a nested object in the ``SimConfig`` object:: + + # to update a nested object, the package uses the `.` operator to specify reflection into the object. + # take a config object with the following parameter object ``foo`` + cfg = specs.SimConfig() + cfg.foo = {'bar': 0, 'baz': 0} + cfg.update() + + # specify a search space for ``foo['bar']`` with `foo.bar` such that a simulation will run: + # cfg.foo['bar'] = 0 + # cfg.foo['bar'] = 1 + # cfg.foo['bar'] = 2 + # ... + # cfg.foo['bar'] = 9 + + # using: + params = { + 'foo.bar': range(10) + } + + # this reflection works with nested objects as well... + # i.e. + # cfg.foo = {'bar': {'baz': 0}} + # params = {'foo.bar.baz': range(10)} + +* **algorithm** : the search algorithm (supported within ``ray.tune``) + + **Supported algorithms**:: + + * "variant_generator": grid and random based search of the parameter space (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + * "random": grid and random based search of the parameter space (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + * "axe": optimization algorithm (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + * "bayesopt": optimization algorithm (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + * "hyperopt": optimization algorithm (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + * "bohb": optimization algorithm (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + * "nevergrad": optimization algorithm (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + * "optuna": optimization algorithm (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + * "hebo": optimization algorithm (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + * "sigopt": optimization algorithm (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + * "zoopt": optimization algorithm (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) + +* **label**: a label for the search, used for output file naming + +* **output_path**: the directory for storing generated files, can be a relative or absolute path + +* **checkpoint_path**: the directory for storing checkpoint files in case the search needs to be restored, can be a relative or absolute path + +* **max_concurrent**: the number of concurrent trials to run at one time, it is recommended to keep in mind the resource usage of each trial to avoid overscheduling + +* **batch**: whether concurrent trials should run synchronously or asynchronously + +* **num_samples**: the number of trials to run, for any grid search, each value in the grid will be sampled ``num_samples`` times. + +* **metric**: the metric to optimize (this should match some key: value pair in the returned data) + +* **mode**: either 'min' or 'max' (whether to minimize or maximize the metric) + +* **algorithm_config**: additional configuration for the search algorithm (see the `optuna docs `_) + +6. Performing parameter optimization searches (CA3 example) +----- +The ``examples`` directory `here `_ shows both a ``grid`` based search as well as an ``optuna`` based optimization. + +In the ``CA3`` example, we tune the ``PYR->BC`` ``NMDA`` and ``AMPA`` synaptic weights, as well as the ``BC->PYR`` ``GABA`` synaptic weight. Note the search space is defined:: + + # from optuna_search.py + params = {'nmda.PYR->BC' : [1e-3, 1.8e-3], + 'ampa.PYR->BC' : [0.2e-3, 0.5e-3], + 'gaba.BC->PYR' : [0.4e-3, 1.0e-3], + } + +in both ``optuna_search.py``, defining the upper and lower bounds of the search space, while in ``grid_search.py`` the search space is defined:: + + # from grid_search.py + params = {'nmda.PYR->BC' : numpy.linspace(1e-3, 1.8e-3, 3), + 'ampa.PYR->BC' : numpy.linspace(0.2e-3, 0.5e-3, 3), + 'gaba.BC->PYR' : numpy.linspace(0.4e-3, 1.0e-3, 3), + } + +which defines ``3x3x3`` specific values to search over + +Note that the ``metric`` specifies a specific ``string`` (``loss``) to report and optimize around. This value is generated and ``sent`` by the ``init.py`` simulation:: + + # from init.py + results['PYR_loss'] = (results['PYR'] - 3.33875)**2 + results['BC_loss'] = (results['BC'] - 19.725 )**2 + results['OLM_loss'] = (results['OLM'] - 3.470 )**2 + results['loss'] = (results['PYR_loss'] + results['BC_loss'] + results['OLM_loss']) / 3 + out_json = json.dumps({**inputs, **results}) + + print(out_json) + #TODO put all of this in a single function. + comm.send(out_json) + comm.close() + +The ``out_json`` output contains a dictionary which includes the ``loss`` metric (calculated as the MSE between observed and expected values) + +In a multi-objective optimization, the relevant ``PYR_loss``, ``BC_loss``, and ``OLM_loss`` components are additionally included (see ``mo_optuna_search.py``) \ No newline at end of file diff --git a/netpyne/batchtools/__init__.py b/netpyne/batchtools/__init__.py index c380458d2..ff51f981a 100644 --- a/netpyne/batchtools/__init__.py +++ b/netpyne/batchtools/__init__.py @@ -1,11 +1,17 @@ from netpyne.batchtools.runners import NetpyneRunner from batchtk.runtk import dispatchers + +from netpyne.batchtools import submits from batchtk import runtk specs = NetpyneRunner() - from netpyne.batchtools.comm import Comm +dispatchers = dispatchers +submits = submits +runtk = runtk + + comm = Comm() diff --git a/netpyne/batchtools/docs/batchtools.rst b/netpyne/batchtools/docs/batchtools.rst index 38bf6e7c9..499340fbd 100644 --- a/netpyne/batchtools/docs/batchtools.rst +++ b/netpyne/batchtools/docs/batchtools.rst @@ -20,7 +20,41 @@ The NetPyNE batchtools subpackage provides a method of automating job submission ... -1. Retrieving batch configuration values through the ``specs`` object + + +1. Setting up batchtools +----- +Beyond the necessary dependency installations for NetPyNE and NEURON, several additional `pip` installations are required. + +The NetPyNE installation should be handled as a development installation of the repository branch `batch`:: + + git clone https://github.com/Neurosim-lab/netpyne.git + cd netpyne + git checkout batch + pip install -e . + +The batchtools installation either:: + + pip install -u batchtk + +or a development install (recommended):: + + git clone https://github.com/jchen6727/batchtk.git + cd batchtk + pip install -e . + +Ray is a dependency for batchtools, and should be installed with the following command:: + + pip install -u ray[default] + +2. Examples +----- +Examples of NetPyNE batchtools usage can be found in the ``examples`` directory `here `_. + +Examples of the underlying batchtk package can be in the ``examples`` directory `here `_. + +3. Retrieving batch configuration values through the ``specs`` object + ----- Each simulation is able to retrieve relevant configurations through the ``specs`` object, and communicate with the dispatcher through the ``comm`` object. @@ -54,7 +88,9 @@ This replaces the previous idiom for updating the SimConfig object with mappings -2. Communicating results to the ``dispatcher`` with the ``comm`` object + +4. Communicating results to the ``dispatcher`` with the ``comm`` object + ----- Prior batched simulations relied on ``.pkl`` files to communicate data. The ``netpyne.batch`` subpackage uses a specific ``comm`` object to send custom data back @@ -66,9 +102,12 @@ In terms of the simulation, the following functions are available to the user: * **comm.send()**: sends ```` to the batch ``dispatcher`` + * for ``search`` jobs, it is important to match the data sent with the metric specified in the search function + * **comm.close()**: closes and cleans up the connection with the batch ``dispatcher`` -3. Specifying a batch job +5. Specifying a batch job + ----- Batch job handling is implemented with methods from ``netpyne.batchtools.search`` @@ -202,5 +241,45 @@ The basic search implemented with the ``search`` function uses ``ray.tune`` as t * **mode**: either 'min' or 'max' (whether to minimize or maximize the metric) -* **algorithm_config**: additional configuration for the search algorithm (see: https://docs.ray.io/en/latest/tune/api/suggestion.html) +* **algorithm_config**: additional configuration for the search algorithm (see the `optuna docs `_) + +6. Performing parameter optimization searches (CA3 example) +----- +The ``examples`` directory `here `_ shows both a ``grid`` based search as well as an ``optuna`` based optimization. + +In the ``CA3`` example, we tune the ``PYR->BC`` ``NMDA`` and ``AMPA`` synaptic weights, as well as the ``BC->PYR`` ``GABA`` synaptic weight. Note the search space is defined:: + + # from optuna_search.py + params = {'nmda.PYR->BC' : [1e-3, 1.8e-3], + 'ampa.PYR->BC' : [0.2e-3, 0.5e-3], + 'gaba.BC->PYR' : [0.4e-3, 1.0e-3], + } + +in both ``optuna_search.py``, defining the upper and lower bounds of the search space, while in ``grid_search.py`` the search space is defined:: + + # from grid_search.py + params = {'nmda.PYR->BC' : numpy.linspace(1e-3, 1.8e-3, 3), + 'ampa.PYR->BC' : numpy.linspace(0.2e-3, 0.5e-3, 3), + 'gaba.BC->PYR' : numpy.linspace(0.4e-3, 1.0e-3, 3), + } + +which defines ``3x3x3`` specific values to search over + +Note that the ``metric`` specifies a specific ``string`` (``loss``) to report and optimize around. This value is generated and ``sent`` by the ``init.py`` simulation:: + + # from init.py + results['PYR_loss'] = (results['PYR'] - 3.33875)**2 + results['BC_loss'] = (results['BC'] - 19.725 )**2 + results['OLM_loss'] = (results['OLM'] - 3.470 )**2 + results['loss'] = (results['PYR_loss'] + results['BC_loss'] + results['OLM_loss']) / 3 + out_json = json.dumps({**inputs, **results}) + + print(out_json) + #TODO put all of this in a single function. + comm.send(out_json) + comm.close() + +The ``out_json`` output contains a dictionary which includes the ``loss`` metric (calculated as the MSE between observed and expected values) + +In a multi-objective optimization, the relevant ``PYR_loss``, ``BC_loss``, and ``OLM_loss`` components are additionally included (see ``mo_optuna_search.py``) diff --git a/netpyne/batchtools/examples/CA3/grid_search.py b/netpyne/batchtools/examples/CA3/grid_search.py index fdebf651b..ed1860def 100644 --- a/netpyne/batchtools/examples/CA3/grid_search.py +++ b/netpyne/batchtools/examples/CA3/grid_search.py @@ -23,11 +23,13 @@ 'realtime': '00:30:00', 'command': 'mpiexec -n $NSLOTS -hosts $(hostname) nrniv -python -mpi init.py'} -run_config = shell_config -search(job_type = 'sh', +run_config = sge_config + +search(job_type = 'sge', # or shell comm_type = 'socket', - label = 'search', + label = 'grid', + params = params, output_path = '../grid_batch', checkpoint_path = '../ray', diff --git a/netpyne/batchtools/examples/CA3/mo_optuna_search.py b/netpyne/batchtools/examples/CA3/mo_optuna_search.py new file mode 100644 index 000000000..25f478051 --- /dev/null +++ b/netpyne/batchtools/examples/CA3/mo_optuna_search.py @@ -0,0 +1,50 @@ +from netpyne.batchtools.search import ray_optuna_search +from netpyne.batchtools import dispatchers, submits +import batchtk + +from ray import tune + +params = {'nmda.PYR->BC' : tune.uniform(1e-3, 1.8e-3), + #'nmda.PYR->OLM': tune.uniform(0.4e-3, 1.0e-3), + #'nmda.PYR->PYR': tune.uniform(0.001e-3, 0.007e-3), + 'ampa.PYR->BC' : tune.uniform(0.2e-3, 0.5e-3), + #'ampa.PYR->OLM': tune.uniform(0.2e-3, 0.5e-3), + #'ampa.PYR->PYR': tune.uniform(0.01e-3, 0.03e-3), + #'gaba.BC->BC' : tune.uniform(1e-3, 7e-3), + 'gaba.BC->PYR' : tune.uniform(0.4e-3, 1.0e-3), + #'gaba.OLM->PYR': tune.uniform(40e-3, 100e-3), + } + +# use batch_shell_config if running directly on the machine +shell_config = {'command': 'mpiexec -np 4 nrniv -python -mpi init.py',} + +# use batch_sge_config if running on a +sge_config = { + 'queue': 'cpu.q', + 'cores': 5, + 'vmem': '4G', + 'realtime': '00:30:00', + 'command': 'mpiexec -n $NSLOTS -hosts $(hostname) nrniv -python -mpi init.py'} + +run_config = sge_config + +Dispatcher = dispatchers.INETDispatcher +Submit = submits.SGESubmitSOCK +metrics = ['PYR_loss', 'BC_loss', 'OLM_loss', 'loss'] + +ray_study = ray_optuna_search( + dispatcher_constructor = Dispatcher, + submit_constructor=Submit, + params = params, + run_config = run_config, + max_concurrent = 3, + output_path = '../mo_batch', + checkpoint_path = '../ray', + label = 'mo_search', + num_samples = 15, + metric = metrics, + mode = ['min', 'min', 'min', 'loss'],) + +results = { + metric: ray_study.results.get_best_result(metric, 'min') for metric in metrics +} diff --git a/netpyne/batchtools/examples/CA3/optuna_search.py b/netpyne/batchtools/examples/CA3/optuna_search.py index 30cf45f6f..3ee09881d 100644 --- a/netpyne/batchtools/examples/CA3/optuna_search.py +++ b/netpyne/batchtools/examples/CA3/optuna_search.py @@ -22,11 +22,12 @@ 'realtime': '00:30:00', 'command': 'mpiexec -n $NSLOTS -hosts $(hostname) nrniv -python -mpi init.py'} -run_config = shell_config -search(job_type = 'sh', +run_config = sge_config + +search(job_type = 'sge', # or sh comm_type = 'socket', - label = 'search', + label = 'optuna', params = params, output_path = '../optuna_batch', checkpoint_path = '../ray', diff --git a/netpyne/batchtools/examples/jupyter/batchtools.ipynb b/netpyne/batchtools/examples/jupyter/batchtools.ipynb new file mode 100644 index 000000000..22fe3489f --- /dev/null +++ b/netpyne/batchtools/examples/jupyter/batchtools.ipynb @@ -0,0 +1,314 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "source": [ + "Jupyter Tutorial: The NetPyNE batchtools subpackage\n", + "How to use the `specs` and `comm` to communicate with the `batchtools` `dispatcher`\n" + ], + "metadata": { + "collapsed": false + }, + "id": "89ec6ca2392a9a0d" + }, + { + "cell_type": "markdown", + "source": [ + "For each individual `sim`, communication with the `batchtools` `dispatcher` occurs through the `specs` and `comm` objects" + ], + "metadata": { + "collapsed": false + }, + "id": "be50f40d8e61a944" + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "from netpyne.batchtools import specs, comm" + ], + "metadata": { + "collapsed": false + }, + "id": "6f321aedb7faf945", + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "the `specs` object is an instantiation of a custom class extending the `batchtk` `Runner` ..." + ], + "metadata": { + "collapsed": false + }, + "id": "5f2f08f0b5e582c3" + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "help(type(specs))" + ], + "metadata": { + "collapsed": false + }, + "id": "29fa261236494bc3", + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "From this `specs` object, we can similarly call `specs.NetParams` and `specs.SimConfig` to create the NetPyNE objects..." + ], + "metadata": { + "collapsed": false + }, + "id": "64ead24451bbad4a" + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "help(specs.NetParams)\n", + "help(specs.SimConfig)" + ], + "metadata": { + "collapsed": false + }, + "id": "43d263d080800019", + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "The `batchtools` job submission tool uses `environmental variables` to pass values to our `config` object created by `specs.SimConfig`, these `environmental variables` are captured during the `specs` `object creation` which occurs during the batchtools `import` (from the `batchtools` `__init__.py`:\n", + "```\n", + "from netpyne.batchtools.runners import NetpyneRunner\n", + "specs = NetpyneRunner()\n", + "```" + ], + "metadata": { + "collapsed": false + }, + "id": "710cc6084bd7af02" + }, + { + "cell_type": "markdown", + "source": [ + "Let's `export` some `environmental variables` to pass values to our `config` object. When this is handled by the `batchtools` `subpackage`, this occurs automatically..." + ], + "metadata": { + "collapsed": false + }, + "id": "52704684f5e80f3c" + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "%env STRRUNTK0 =foo.bar=baz\n", + "%env FLOATRUNTK1 =float_val=7.7\n", + "from netpyne.batchtools import NetpyneRunner\n", + "specs = NetpyneRunner()" + ], + "metadata": { + "collapsed": false + }, + "id": "50de117ff7f43aa6", + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "One way of retrieving these values is by calling `specs.get_mappings()`" + ], + "metadata": { + "collapsed": false + }, + "id": "fac14e517044b980" + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "print(specs.get_mappings())" + ], + "metadata": { + "collapsed": false + }, + "id": "257fad390f4abce", + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Now, let's create our `config` object using the `specs.SimConfig()` constructor\n", + "This `config` object will hold a `dictionary` such that the initial values `foo['bar']` = `not_baz` and a `float_val` = `3.3`" + ], + "metadata": { + "collapsed": false + }, + "id": "92d41061bb828744" + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "cfg = specs.SimConfig()\n", + "cfg.foo = {'bar': 'not_baz', 'qux': 'quux'}\n", + "cfg.float_val = 3.3\n", + "print(\"cfg.foo['bar'] = {}\".format(cfg.foo['bar']))\n", + "print(\"cfg.float_val = {}\".format(cfg.float_val))" + ], + "metadata": { + "collapsed": false + }, + "id": "ca121d6ab30c3e7b", + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Finally, calling the `cfg.update_cfg()` method will overwrite the original values with our environment values, (`baz` and `7.7`)...\n", + "\n", + "in NetPyNE, this was originally handled with the:\n", + "```\n", + "try:\n", + " from __main__ import cfg\n", + "except:\n", + " from cfg import cfg\n", + "```\n", + "API idiom in the `netParams.py` file...\n", + " \n", + "as well as the \n", + "```\n", + "cfg, netParams = sim.readCmdLineArgs(simConfigDefault='src/cfg.py', netParamsDefault='src/netParams.py')\n", + "```\n", + "API idiom in the `init.py` file...\n", + "\n", + "using the `batchtools` subpackage, we can treat the `cfg` as an object and pass it between scripts via `import` statements...\n", + "in `netParams.py`...\n", + "```\n", + "from cfg import cfg\n", + "cfg.update()\n", + "```\n", + "in `init.py`...\n", + "```\n", + "from netParams import cfg, netParams\n", + "sim.createSimulateAnalyze(simConfig=cfg, netParams=netParams)\n", + "```" + ], + "metadata": { + "collapsed": false + }, + "id": "6ea43f729d0685d4" + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "print(\"prior to cfg.update()\")\n", + "print(\"cfg.foo['bar'] = {}\".format(cfg.foo['bar']))\n", + "print(\"cfg.float_val = {}\".format(cfg.float_val))\n", + "print()\n", + "cfg.update() # call update_cfg to update values in the cfg object with values assigned by batch\n", + "print(\"after the cfg.update()\")\n", + "print(\"cfg.foo['bar'] = {}\".format(cfg.foo['bar']))\n", + "print(\"cfg.float_val = {}\".format(cfg.float_val))" + ], + "metadata": { + "collapsed": false + }, + "id": "a9426b6e6594961", + "execution_count": null + }, + { + "cell_type": "markdown", + "source": [ + "Finally, the `comm object` is used to report to the monitoring `dispatcher object`\n", + "the means of communication is dependent on which `dispatcher object` is instantiated, and communicated through environmental variables\n", + "in this case, since there is no `dispatcher object` the `comm` methods will simply perform `pass operations`" + ], + "metadata": { + "collapsed": false + }, + "id": "65bbb0ef2c76295a" + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "comm.initialize() # initializes comm object, establishing channel to communicate with the host dispatcher object" + ], + "metadata": { + "collapsed": false + }, + "id": "e9141d91d6e02aa3", + "execution_count": null + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "print(comm.is_host()) # returns a boolean IF the calling process is the 0th ranked parallelcontext, similar to sim.pc.rank == 0" + ], + "metadata": { + "collapsed": false + }, + "id": "5ed6a524bd8a3e0b", + "execution_count": null + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "comm.send('message') # sends 'message' to the `dispatcher object`" + ], + "metadata": { + "collapsed": false + }, + "id": "1966edbf32649352", + "execution_count": null + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "comm.close() #finalizes communication, closes any resources used to communicate with the `dispatcher object`" + ], + "metadata": { + "collapsed": false + }, + "id": "34f021af4127363c" + }, + { + "cell_type": "markdown", + "source": [], + "metadata": { + "collapsed": false + }, + "id": "648746fff96b8a72" + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/netpyne/batchtools/search.py b/netpyne/batchtools/search.py index 724e4ee6e..694a19a48 100644 --- a/netpyne/batchtools/search.py +++ b/netpyne/batchtools/search.py @@ -28,8 +28,8 @@ def ray_optuna_search(dispatcher_constructor: Callable, # constructor for the di max_concurrent: Optional[int] = 1, # number of concurrent trials to run at one time batch: Optional[bool] = True, # whether concurrent trials should run synchronously or asynchronously num_samples: Optional[int] = 1, # number of trials to run - metric: Optional[str] = "loss", # metric to optimize (this should match some key: value pair in the returned data - mode: Optional[str] = "min", # either 'min' or 'max' (whether to minimize or maximize the metric + metric: Optional[str|list|tuple] = "loss", # metric to optimize (this should match some key: value pair in the returned data + mode: Optional[str|list|tuple] = "min", # either 'min' or 'max' (whether to minimize or maximize the metric optuna_config: Optional[dict] = None, # additional configuration for the optuna search algorithm ) -> namedtuple('Study', ['algo', 'results']): """