From 63eec838ade9632b3aeaf807009c16d81d18b63b Mon Sep 17 00:00:00 2001 From: CBroz1 Date: Thu, 21 Dec 2023 14:32:46 -0600 Subject: [PATCH] print -> logger --- pyproject.toml | 2 +- src/spyglass/common/common_behav.py | 26 ++-- src/spyglass/common/common_device.py | 34 +++--- src/spyglass/common/common_dio.py | 15 ++- src/spyglass/common/common_ephys.py | 114 +++++++++++------- src/spyglass/common/common_filter.py | 18 +-- src/spyglass/common/common_interval.py | 4 +- src/spyglass/common/common_lab.py | 10 +- src/spyglass/common/common_nwbfile.py | 24 ++-- src/spyglass/common/common_position.py | 12 +- src/spyglass/common/common_ripple.py | 4 +- src/spyglass/common/common_sensors.py | 19 +-- src/spyglass/common/common_session.py | 32 ++--- src/spyglass/common/common_subject.py | 6 +- src/spyglass/common/common_task.py | 64 ++++++---- src/spyglass/common/populate_all_common.py | 48 ++++---- .../common/prepopulate/prepopulate.py | 44 ++++--- src/spyglass/data_import/insert_sessions.py | 9 +- src/spyglass/decoding/clusterless.py | 1 - src/spyglass/decoding/sorted_spikes.py | 12 +- src/spyglass/decoding/visualization.py | 3 +- .../decoding/visualization_2D_view.py | 7 +- .../figurl_views/SpikeSortingRecordingView.py | 13 +- src/spyglass/figurl_views/SpikeSortingView.py | 24 ++-- .../prepare_spikesortingview_data.py | 10 +- src/spyglass/lfp/analysis/v1/lfp_band.py | 7 +- src/spyglass/lfp/v1/lfp.py | 23 ++-- .../v1/lfp_artifact_difference_detection.py | 15 +-- src/spyglass/linearization/v0/main.py | 4 +- src/spyglass/linearization/v1/main.py | 5 +- src/spyglass/lock/file_lock.py | 4 +- src/spyglass/position/position_merge.py | 26 ++-- src/spyglass/position/v1/dlc_utils.py | 2 +- src/spyglass/ripple/v1/ripple.py | 6 +- src/spyglass/sharing/sharing_kachery.py | 14 +-- src/spyglass/spikesorting/curation_figurl.py | 27 +---- src/spyglass/spikesorting/imported.py | 6 +- .../spikesorting/merged_sorting_extractor.py | 4 +- src/spyglass/spikesorting/sortingview.py | 18 ++- .../spikesorting/sortingview_helper_fn.py | 29 ++--- .../spikesorting/spikesorting_artifact.py | 15 ++- .../spikesorting/spikesorting_curation.py | 12 +- .../spikesorting/spikesorting_populator.py | 49 ++++---- .../spikesorting/spikesorting_recording.py | 26 ++-- .../spikesorting/spikesorting_sorting.py | 25 ++-- src/spyglass/spikesorting/v1/artifact.py | 10 +- .../spikesorting/v1/figurl_curation.py | 7 +- .../spikesorting/v1/metric_curation.py | 16 ++- src/spyglass/spikesorting/v1/recording.py | 11 +- src/spyglass/spikesorting/v1/sorting.py | 10 +- src/spyglass/utils/__init__.py | 3 +- src/spyglass/utils/database_settings.py | 10 +- src/spyglass/utils/dj_helper_fn.py | 11 +- src/spyglass/utils/dj_merge_tables.py | 56 ++++++--- src/spyglass/utils/dj_mixin.py | 9 +- src/spyglass/utils/logging.py | 32 +++++ src/spyglass/utils/nwb_helper_fn.py | 28 +++-- 57 files changed, 589 insertions(+), 486 deletions(-) create mode 100644 src/spyglass/utils/logging.py diff --git a/pyproject.toml b/pyproject.toml index 76acb9ca3..22a397e17 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -48,7 +48,7 @@ dependencies = [ "numpy<1.24", "ipympl", "tqdm", - "pubnub<6.4.0", + "pubnub<6.4.0", # TODO: remove this when sortingview is updated "pynwb>=2.2.0,<3", "hdmf>=3.4.6", "datajoint>=0.13.6", diff --git a/src/spyglass/common/common_behav.py b/src/spyglass/common/common_behav.py index b551ce1b2..1ae20b0cf 100644 --- a/src/spyglass/common/common_behav.py +++ b/src/spyglass/common/common_behav.py @@ -14,7 +14,7 @@ from spyglass.common.common_session import Session # noqa: F401 from spyglass.common.common_task import TaskEpoch from spyglass.settings import video_dir -from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.utils import SpyglassMixin, logger from spyglass.utils.nwb_helper_fn import ( get_all_spatial_series, get_data_interface, @@ -174,7 +174,7 @@ def fetch1_dataframe(self): id_rp = [(n["id"], n["raw_position"]) for n in self.fetch_nwb()] if len(set(rp.interval for _, rp in id_rp)) > 1: - print("WARNING: loading DataFrame with multiple intervals.") + logger.warn("Loading DataFrame with multiple intervals.") df_list = [ pd.DataFrame( @@ -270,7 +270,7 @@ def make(self, key): "associated_files" ) or nwbf.processing.get("associated files") if associated_files is None: - print( + logger.info( "Unable to import StateScriptFile: no processing module named " + '"associated_files" found in {nwb_file_name}.' ) @@ -280,7 +280,7 @@ def make(self, key): if not isinstance( associated_file_obj, ndx_franklab_novela.AssociatedFiles ): - print( + logger.info( f"Data interface {associated_file_obj.name} within " + '"associated_files" processing module is not ' + "of expected type ndx_franklab_novela.AssociatedFiles\n" @@ -293,7 +293,7 @@ def make(self, key): epoch_list = associated_file_obj.task_epochs.split(",") # only insert if this is the statescript file - print(associated_file_obj.description) + logger.info(associated_file_obj.description) if ( "statescript".upper() in associated_file_obj.description.upper() or "state_script".upper() @@ -306,7 +306,7 @@ def make(self, key): key["file_object_id"] = associated_file_obj.object_id self.insert1(key) else: - print("not a statescript file") + logger.info("not a statescript file") @schema @@ -347,7 +347,7 @@ def _no_transaction_make(self, key, verbose=True): ) if videos is None: - print(f"No video data interface found in {nwb_file_name}\n") + logger.warn(f"No video data interface found in {nwb_file_name}\n") return else: videos = videos.time_series @@ -388,7 +388,7 @@ def _no_transaction_make(self, key, verbose=True): is_found = True if not is_found and verbose: - print( + logger.info( f"No video found corresponding to file {nwb_file_name}, " + f"epoch {interval_list_name}" ) @@ -471,7 +471,7 @@ def _no_transaction_make(self, key): # Skip populating if no pos interval list names if len(pos_intervals) == 0: - print(f"NO POS INTERVALS FOR {key}; {no_pop_msg}") + logger.error(f"NO POS INTERVALS FOR {key}; {no_pop_msg}") return valid_times = (IntervalList & key).fetch1("valid_times") @@ -508,7 +508,7 @@ def _no_transaction_make(self, key): # Check that each pos interval was matched to only one epoch if len(matching_pos_intervals) != 1: - print( + logger.error( f"Found {len(matching_pos_intervals)} pos intervals for {key}; " + f"{no_pop_msg}\n{matching_pos_intervals}" ) @@ -517,7 +517,7 @@ def _no_transaction_make(self, key): # Insert into table key["position_interval_name"] = matching_pos_intervals[0] self.insert1(key, allow_direct_insert=True) - print( + logger.info( "Populated PosIntervalMap for " + f'{nwb_file_name}, {key["interval_list_name"]}' ) @@ -569,7 +569,7 @@ def convert_epoch_interval_name_to_position_interval_name( ) if len(pos_query) == 0: - print(f"No position intervals found for {key}") + logger.info(f"No position intervals found for {key}") return [] if len(pos_query) == 1: @@ -596,7 +596,7 @@ def get_interval_list_name_from_epoch(nwb_file_name: str, epoch: int) -> str: ).fetch("interval_list_name") if len(interval_names) != 1: - print( + logger.info( f"Found {len(interval_names)} interval list names found for " + f"{nwb_file_name} epoch {epoch}" ) diff --git a/src/spyglass/common/common_device.py b/src/spyglass/common/common_device.py index 26a0dbec5..223862c81 100644 --- a/src/spyglass/common/common_device.py +++ b/src/spyglass/common/common_device.py @@ -1,10 +1,10 @@ import datajoint as dj import ndx_franklab_novela +from spyglass.common.errors import PopulateException from spyglass.utils.dj_mixin import SpyglassMixin - -from ..utils.nwb_helper_fn import get_nwb_file -from .errors import PopulateException +from spyglass.utils.logging import logger +from spyglass.utils.nwb_helper_fn import get_nwb_file schema = dj.schema("common_device") @@ -14,7 +14,6 @@ class DataAcquisitionDeviceSystem(SpyglassMixin, dj.Manual): definition = """ # Known data acquisition device system names. data_acquisition_device_system: varchar(80) - --- """ @@ -23,7 +22,6 @@ class DataAcquisitionDeviceAmplifier(SpyglassMixin, dj.Manual): definition = """ # Known data acquisition device amplifier names. data_acquisition_device_amplifier: varchar(80) - --- """ @@ -84,12 +82,12 @@ def insert_from_nwbfile(cls, nwbf, config): cls._add_device(new_device_dict) if ndx_devices: - print( + logger.info( "Inserted or referenced data acquisition device(s): " + f"{ndx_devices.keys()}" ) else: - print("No conforming data acquisition device metadata found.") + logger.warn("No conforming data acquisition device metadata found.") @classmethod def get_all_device_names(cls, nwbf, config) -> tuple: @@ -158,7 +156,7 @@ def _add_device(cls, new_device_dict): ).tolist() if name not in all_values: # no entry with the same name exists, prompt user to add a new entry - print( + logger.info( f"\nData acquisition device '{name}' was not found in the " f"database. The current values are: {all_values}. " "Please ensure that the device you want to add does not already" @@ -216,7 +214,7 @@ def _add_system(cls, system): "data_acquisition_device_system" ).tolist() if system not in all_values: - print( + logger.info( f"\nData acquisition device system '{system}' was not found in" f" the database. The current values are: {all_values}. " "Please ensure that the system you want to add does not already" @@ -267,7 +265,7 @@ def _add_amplifier(cls, amplifier): "data_acquisition_device_amplifier" ).tolist() if amplifier not in all_values: - print( + logger.info( f"\nData acquisition device amplifier '{amplifier}' was not " f"found in the database. The current values are: {all_values}. " "Please ensure that the amplifier you want to add does not " @@ -337,9 +335,9 @@ def insert_from_nwbfile(cls, nwbf): cls.insert1(device_dict, skip_duplicates=True) device_name_list.append(device_dict["camera_name"]) if device_name_list: - print(f"Inserted camera devices {device_name_list}") + logger.info(f"Inserted camera devices {device_name_list}") else: - print("No conforming camera device metadata found.") + logger.warn("No conforming camera device metadata found.") return device_name_list @@ -442,7 +440,7 @@ def insert_from_nwbfile(cls, nwbf, config): # the ones in the database query = Probe & {"probe_id": new_probe_dict["probe_id"]} if len(query) > 0: - print( + logger.info( f"Probe ID '{new_probe_dict['probe_id']}' already exists in" " the database. Spyglass will use that and not create a new" " Probe, Shanks, or Electrodes." @@ -457,9 +455,9 @@ def insert_from_nwbfile(cls, nwbf, config): cls.Electrode.insert1(electrode, skip_duplicates=True) if all_probes_types: - print(f"Inserted probes {all_probes_types}") + logger.info(f"Inserted probes {all_probes_types}") else: - print("No conforming probe metadata found.") + logger.warn("No conforming probe metadata found.") return all_probes_types @@ -579,7 +577,7 @@ def _add_probe_type(cls, new_probe_type_dict): probe_type = new_probe_type_dict["probe_type"] all_values = ProbeType.fetch("probe_type").tolist() if probe_type not in all_values: - print( + logger.info( f"\nProbe type '{probe_type}' was not found in the database. " f"The current values are: {all_values}. " "Please ensure that the probe type you want to add does not " @@ -663,7 +661,7 @@ def create_from_nwbfile( query = ProbeType & {"probe_type": probe_type} if len(query) == 0: - print( + logger.warn( f"No ProbeType found with probe_type '{probe_type}'. Aborting." ) return @@ -727,7 +725,7 @@ def create_from_nwbfile( ] if not device_found: - print( + logger.warn( "No electrodes in the NWB file were associated with a device " + f"named '{nwb_device_name}'." ) diff --git a/src/spyglass/common/common_dio.py b/src/spyglass/common/common_dio.py index cd50d015b..93a087116 100644 --- a/src/spyglass/common/common_dio.py +++ b/src/spyglass/common/common_dio.py @@ -4,13 +4,12 @@ import pandas as pd import pynwb -from spyglass.utils.dj_mixin import SpyglassMixin - -from ..utils.nwb_helper_fn import get_data_interface, get_nwb_file -from .common_ephys import Raw -from .common_interval import IntervalList -from .common_nwbfile import Nwbfile -from .common_session import Session # noqa: F401 +from spyglass.common.common_ephys import Raw +from spyglass.common.common_interval import IntervalList +from spyglass.common.common_nwbfile import Nwbfile +from spyglass.common.common_session import Session # noqa: F401 +from spyglass.utils import SpyglassMixin, logger +from spyglass.utils.nwb_helper_fn import get_data_interface, get_nwb_file schema = dj.schema("common_dio") @@ -36,7 +35,7 @@ def make(self, key): nwbf, "behavioral_events", pynwb.behavior.BehavioralEvents ) if behav_events is None: - print( + logger.warn( "No conforming behavioral events data interface found in " + f"{nwb_file_name}\n" ) diff --git a/src/spyglass/common/common_ephys.py b/src/spyglass/common/common_ephys.py index ef9f4ee65..dd4004f16 100644 --- a/src/spyglass/common/common_ephys.py +++ b/src/spyglass/common/common_ephys.py @@ -6,10 +6,19 @@ import pandas as pd import pynwb -from spyglass.utils.dj_mixin import SpyglassMixin - -from ..utils.dj_helper_fn import fetch_nwb # dj_replace -from ..utils.nwb_helper_fn import ( +from spyglass.common.common_device import Probe # noqa: F401 +from spyglass.common.common_filter import FirFilterParameters +from spyglass.common.common_interval import interval_list_censor # noqa: F401 +from spyglass.common.common_interval import ( + IntervalList, + interval_list_contains_ind, + interval_list_intersect, +) +from spyglass.common.common_nwbfile import AnalysisNwbfile, Nwbfile +from spyglass.common.common_region import BrainRegion # noqa: F401 +from spyglass.common.common_session import Session # noqa: F401 +from spyglass.utils import SpyglassMixin, logger +from spyglass.utils.nwb_helper_fn import ( estimate_sampling_rate, get_config, get_data_interface, @@ -17,17 +26,6 @@ get_nwb_file, get_valid_intervals, ) -from .common_device import Probe # noqa: F401 -from .common_filter import FirFilterParameters -from .common_interval import interval_list_censor # noqa: F401 -from .common_interval import ( - IntervalList, - interval_list_contains_ind, - interval_list_intersect, -) -from .common_nwbfile import AnalysisNwbfile, Nwbfile -from .common_region import BrainRegion # noqa: F401 -from .common_session import Session # noqa: F401 schema = dj.schema("common_ephys") @@ -127,9 +125,13 @@ def make(self, key): key["filtering"] = elect_data.filtering key["impedance"] = elect_data.get("imp") - # rough check of whether the electrodes table was created by rec_to_nwb and has - # the appropriate custom columns used by rec_to_nwb - # TODO this could be better resolved by making an extension for the electrodes table + # rough check of whether the electrodes table was created by + # rec_to_nwb and has the appropriate custom columns used by + # rec_to_nwb + + # TODO this could be better resolved by making an extension for the + # electrodes table + if ( isinstance(elect_data.group.device, ndx_franklab_novela.Probe) and "probe_shank" in elect_data @@ -145,14 +147,18 @@ def make(self, key): ) key["original_reference_electrode"] = elect_data.ref_elect_id - # override with information from the config YAML based on primary key (electrode id) + # override with information from the config YAML based on primary + # key (electrode id) + if elect_id in electrode_config_dicts: # check whether the Probe.Electrode being referenced exists query = Probe.Electrode & electrode_config_dicts[elect_id] if len(query) == 0: warnings.warn( - f"No Probe.Electrode exists that matches the data: {electrode_config_dicts[elect_id]}. " - f"The config YAML for Electrode with electrode_id {elect_id} will be ignored." + "No Probe.Electrode exists that matches the data: " + + f"{electrode_config_dicts[elect_id]}. " + "The config YAML for Electrode with electrode_id " + + f"{elect_id} will be ignored." ) else: key.update(electrode_config_dicts[elect_id]) @@ -174,7 +180,7 @@ def create_from_config(cls, nwb_file_name: str): if "Electrode" not in config: return - # map electrode id to dictionary of electrode information from config YAML + # map electrode id to dictof electrode information from config YAML electrode_dicts = { electrode_dict["electrode_id"]: electrode_dict for electrode_dict in config["Electrode"] @@ -183,8 +189,9 @@ def create_from_config(cls, nwb_file_name: str): electrodes = nwbf.electrodes.to_dataframe() for nwbfile_elect_id, elect_data in electrodes.iterrows(): if nwbfile_elect_id in electrode_dicts: - # use the information in the electrodes table to start and then add (or overwrite) values from the - # config YAML + # use the information in the electrodes table to start and then + # add (or overwrite) values from the config YAML + key = dict() key["nwb_file_name"] = nwb_file_name key["name"] = str(nwbfile_elect_id) @@ -205,16 +212,20 @@ def create_from_config(cls, nwb_file_name: str): query = Electrode & {"electrode_id": nwbfile_elect_id} if len(query): cls.update1(key) - print(f"Updated Electrode with ID {nwbfile_elect_id}.") + logger.info( + f"Updated Electrode with ID {nwbfile_elect_id}." + ) else: cls.insert1( key, skip_duplicates=True, allow_direct_insert=True ) - print(f"Inserted Electrode with ID {nwbfile_elect_id}.") + logger.info( + f"Inserted Electrode with ID {nwbfile_elect_id}." + ) else: warnings.warn( - f"Electrode ID {nwbfile_elect_id} exists in the NWB file but has no corresponding " - "config YAML entry." + f"Electrode ID {nwbfile_elect_id} exists in the NWB file " + + "but has no corresponding config YAML entry." ) @@ -252,7 +263,7 @@ def make(self, key): if rawdata.rate is not None: sampling_rate = rawdata.rate else: - print("Estimating sampling rate...") + logger.info("Estimating sampling rate...") # NOTE: Only use first 1e6 timepoints to save time sampling_rate = estimate_sampling_rate( np.asarray(rawdata.timestamps[: int(1e6)]), 1.5, verbose=True @@ -276,11 +287,15 @@ def make(self, key): ) IntervalList().insert1(interval_dict, skip_duplicates=True) - # now insert each of the electrodes as an individual row, but with the same nwb_object_id + # now insert each of the electrodes as an individual row, but with the + # same nwb_object_id + key["raw_object_id"] = rawdata.object_id key["sampling_rate"] = sampling_rate - print(f'Importing raw data: Sampling rate:\t{key["sampling_rate"]} Hz') - print( + logger.info( + f'Importing raw data: Sampling rate:\t{key["sampling_rate"]} Hz' + ) + logger.info( f'Number of valid intervals:\t{len(interval_dict["valid_times"])}' ) key["interval_list_name"] = raw_interval_name @@ -289,8 +304,10 @@ def make(self, key): self.insert1(key, skip_duplicates=True) def nwb_object(self, key): - # TODO return the nwb_object; FIX: this should be replaced with a fetch call. Note that we're using the raw file - # so we can modify the other one. + # TODO return the nwb_object; FIX: this should be replaced with a fetch + # call. Note that we're using the raw file so we can modify the other + # one. + nwb_file_name = key["nwb_file_name"] nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name) nwbf = get_nwb_file(nwb_file_abspath) @@ -319,8 +336,9 @@ def make(self, key): # TODO: change name when nwb file is changed sample_count = get_data_interface(nwbf, "sample_count") if sample_count is None: - print( - f'Unable to import SampleCount: no data interface named "sample_count" found in {nwb_file_name}.' + logger.info( + "Unable to import SampleCount: no data interface named " + + f'"sample_count" found in {nwb_file_name}.' ) return key["sample_count_object_id"] = sample_count.object_id @@ -388,7 +406,9 @@ class LFP(SpyglassMixin, dj.Imported): """ def make(self, key): - # get the NWB object with the data; FIX: change to fetch with additional infrastructure + # get the NWB object with the data; FIX: change to fetch with + # additional infrastructure + rawdata = Raw().nwb_object(key) sampling_rate, interval_list_name = (Raw() & key).fetch1( "sampling_rate", "interval_list_name" @@ -409,8 +429,9 @@ def make(self, key): if interval[1] - interval[0] > min_interval_length: valid.append(count) valid_times = valid_times[valid] - print( - f"LFP: found {len(valid)} of {count+1} intervals > {min_interval_length} sec long." + logger.info( + f"LFP: found {len(valid)} of {count+1} intervals > " + + f"{min_interval_length} sec long." ) # target 1 KHz sampling rate @@ -423,14 +444,17 @@ def make(self, key): & {"filter_sampling_rate": sampling_rate} ).fetch(as_dict=True) - # there should only be one filter that matches, so we take the first of the dictionaries + # there should only be one filter that matches, so we take the first of + # the dictionaries + key["filter_name"] = filter[0]["filter_name"] key["filter_sampling_rate"] = filter[0]["filter_sampling_rate"] filter_coeff = filter[0]["filter_coeff"] if len(filter_coeff) == 0: - print( - f"Error in LFP: no filter found with data sampling rate of {sampling_rate}" + logger.error( + "Error in LFP: no filter found with data sampling rate of " + + f"{sampling_rate}" ) return None # get the list of selected LFP Channels from LFPElectrode @@ -453,7 +477,9 @@ def make(self, key): decimation, ) - # now that the LFP is filtered and in the file, add the file to the AnalysisNwbfile table + # now that the LFP is filtered and in the file, add the file to the + # AnalysisNwbfile table + AnalysisNwbfile().add(key["nwb_file_name"], lfp_file_name) key["analysis_file_name"] = lfp_file_name @@ -739,7 +765,7 @@ def make(self, key): filter_coeff = filter[0]["filter_coeff"] if len(filter_coeff) == 0: - print( + logger.info( f"Error in LFPBand: no filter found with data sampling rate of {lfp_band_sampling_rate}" ) return None diff --git a/src/spyglass/common/common_filter.py b/src/spyglass/common/common_filter.py index fadae95a9..0472c6e18 100644 --- a/src/spyglass/common/common_filter.py +++ b/src/spyglass/common/common_filter.py @@ -9,9 +9,8 @@ import pynwb import scipy.signal as signal -from spyglass.utils.dj_mixin import SpyglassMixin - -from ..utils.nwb_helper_fn import get_electrode_indices +from spyglass.utils import SpyglassMixin, logger +from spyglass.utils.nwb_helper_fn import get_electrode_indices schema = dj.schema("common_filter") @@ -90,14 +89,16 @@ def add_filter( # high pass: [low stop low pass] # band pass: [low_stop low_pass high_pass high_stop]. if filter_type not in VALID_FILTERS: - print( + logger.error( FILTER_ERR + f"{filter_type} not valid type: {VALID_FILTERS.keys()}" ) return None if not len(band_edges) == VALID_FILTERS[filter_type]: - print(FILTER_N_ERR.format(filter_name, VALID_FILTERS[filter_type])) + logger.error( + FILTER_N_ERR.format(filter_name, VALID_FILTERS[filter_type]) + ) return None gsp = _import_ghostipy() @@ -352,7 +353,7 @@ def filter_data_nwb( # Filter and write the output dataset ts_offset = 0 - print("Filtering data") + logger.info("Filtering data") for ii, (start, stop) in enumerate(indices): # Calc size of timestamps + data, check if < 90% of RAM interval_samples = stop - start @@ -361,7 +362,7 @@ def filter_data_nwb( + n_electrodes * data_on_disk[0][0].itemsize ) if req_mem < MEM_USE_LIMIT * psutil.virtual_memory().available: - print(f"Interval {ii}: loading data into memory") + logger.info(f"Interval {ii}: loading data into memory") timestamps = np.asarray( timestamps_on_disk[start:stop], dtype=timestamps_on_disk[0].dtype, @@ -382,7 +383,7 @@ def filter_data_nwb( input_index_bounds = [0, interval_samples - 1] else: - print(f"Interval {ii}: leaving data on disk") + logger.info(f"Interval {ii}: leaving data on disk") data = data_on_disk timestamps = timestamps_on_disk extracted_ts = timestamps[start:stop:decimation] @@ -495,7 +496,6 @@ def filter_data( for ii, (start, stop) in enumerate(indices): extracted_ts = timestamps[start:stop:decimation] - # print(f"Diffs {np.diff(extracted_ts)}") new_timestamps[ ts_offset : ts_offset + len(extracted_ts) ] = extracted_ts diff --git a/src/spyglass/common/common_interval.py b/src/spyglass/common/common_interval.py index bf921ded1..2ad12ad34 100644 --- a/src/spyglass/common/common_interval.py +++ b/src/spyglass/common/common_interval.py @@ -6,7 +6,7 @@ import numpy as np import pandas as pd -from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.utils import SpyglassMixin, logger from .common_session import Session # noqa: F401 @@ -47,7 +47,7 @@ def insert_from_nwbfile(cls, nwbf, *, nwb_file_name): table. """ if nwbf.epochs is None: - print("No epochs found in NWB file.") + logger.info("No epochs found in NWB file.") return epochs = nwbf.epochs.to_dataframe() diff --git a/src/spyglass/common/common_lab.py b/src/spyglass/common/common_lab.py index ed2e3a132..e60def6bd 100644 --- a/src/spyglass/common/common_lab.py +++ b/src/spyglass/common/common_lab.py @@ -1,7 +1,7 @@ """Schema for institution, lab team/name/members. Session-independent.""" import datajoint as dj -from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.utils import SpyglassMixin, logger from ..utils.nwb_helper_fn import get_nwb_file from .common_nwbfile import Nwbfile @@ -54,7 +54,7 @@ def insert_from_nwbfile(cls, nwbf): nwbf = get_nwb_file(nwb_file_abspath) if nwbf.experimenter is None: - print("No experimenter metadata found.\n") + logger.info("No experimenter metadata found.\n") return for experimenter in nwbf.experimenter: @@ -196,7 +196,7 @@ def create_new_team( LabMember.LabMemberInfo() & {"lab_member_name": team_member} ).fetch("google_user_name") if not query: - print( + logger.info( f"Please add the Google user ID for {team_member} in " + "LabMember.LabMemberInfo to help manage permissions." ) @@ -228,7 +228,7 @@ def insert_from_nwbfile(cls, nwbf): The NWB file with institution information. """ if nwbf.institution is None: - print("No institution metadata found.\n") + logger.info("No institution metadata found.\n") return cls.insert1( @@ -252,7 +252,7 @@ def insert_from_nwbfile(cls, nwbf): The NWB file with lab name information. """ if nwbf.lab is None: - print("No lab metadata found.\n") + logger.info("No lab metadata found.\n") return cls.insert1(dict(lab_name=nwbf.lab), skip_duplicates=True) diff --git a/src/spyglass/common/common_nwbfile.py b/src/spyglass/common/common_nwbfile.py index b7b98e848..b5eabab97 100644 --- a/src/spyglass/common/common_nwbfile.py +++ b/src/spyglass/common/common_nwbfile.py @@ -12,9 +12,9 @@ from hdmf.common import DynamicTable from spyglass.settings import analysis_dir, raw_dir -from spyglass.utils.dj_mixin import SpyglassMixin -from ..utils.dj_helper_fn import get_child_tables -from ..utils.nwb_helper_fn import get_electrode_indices, get_nwb_file +from spyglass.utils import SpyglassMixin, logger +from spyglass.utils.dj_helper_fn import get_child_tables +from spyglass.utils.nwb_helper_fn import get_electrode_indices, get_nwb_file schema = dj.schema("common_nwbfile") @@ -196,7 +196,7 @@ def create(self, nwb_file_name): analysis_file_name = self.__get_new_file_name(nwb_file_name) # write the new file - print(f"Writing new NWB file {analysis_file_name}") + logger.info(f"Writing new NWB file {analysis_file_name}") analysis_file_abs_path = AnalysisNwbfile.get_abs_path( analysis_file_name ) @@ -262,7 +262,7 @@ def copy(cls, nwb_file_name): original_nwb_file_name = query.fetch("nwb_file_name")[0] analysis_file_name = cls.__get_new_file_name(original_nwb_file_name) # write the new file - print(f"Writing new NWB file {analysis_file_name}...") + logger.info(f"Writing new NWB file {analysis_file_name}...") analysis_file_abs_path = AnalysisNwbfile.get_abs_path( analysis_file_name ) @@ -429,7 +429,9 @@ def add_units( ) # sort by unit_ids and apply that sorting to values to ensure that things go in the right order metric_values = metric_values[np.argsort(unit_ids)] - print(f"Adding metric {metric} : {metric_values}") + logger.info( + f"Adding metric {metric} : {metric_values}" + ) nwbf.add_unit_column( name=metric, description=f"{metric} metric", @@ -542,7 +544,7 @@ def add_units_waveforms( # If metrics were specified, add one column per metric if metrics is not None: for metric_name, metric_dict in metrics.items(): - print(f"Adding metric {metric_name} : {metric_dict}") + logger.info(f"Adding metric {metric_name} : {metric_dict}") metric_data = metric_dict.values().to_list() nwbf.add_unit_column( name=metric_name, @@ -586,7 +588,7 @@ def add_units_metrics(self, analysis_file_name, metrics): nwbf.add_unit(id=id) for metric_name, metric_dict in metrics.items(): - print(f"Adding metric {metric_name} : {metric_dict}") + logger.info(f"Adding metric {metric_name} : {metric_dict}") metric_data = list(metric_dict.values()) nwbf.add_unit_column( name=metric_name, description=metric_name, data=metric_data @@ -651,7 +653,7 @@ class NwbfileKachery(SpyglassMixin, dj.Computed): def make(self, key): import kachery_client as kc - print(f'Linking {key["nwb_file_name"]} and storing in kachery...') + logger.info(f'Linking {key["nwb_file_name"]} and storing in kachery...') key["nwb_file_uri"] = kc.link_file( Nwbfile().get_abs_path(key["nwb_file_name"]) ) @@ -669,7 +671,9 @@ class AnalysisNwbfileKachery(SpyglassMixin, dj.Computed): def make(self, key): import kachery_client as kc - print(f'Linking {key["analysis_file_name"]} and storing in kachery...') + logger.info( + f'Linking {key["analysis_file_name"]} and storing in kachery...' + ) key["analysis_file_uri"] = kc.link_file( AnalysisNwbfile().get_abs_path(key["analysis_file_name"]) ) diff --git a/src/spyglass/common/common_position.py b/src/spyglass/common/common_position.py index 4ed5bfd70..ea661a29d 100644 --- a/src/spyglass/common/common_position.py +++ b/src/spyglass/common/common_position.py @@ -27,8 +27,8 @@ from spyglass.common.common_interval import IntervalList # noqa F401 from spyglass.common.common_nwbfile import AnalysisNwbfile from spyglass.settings import raw_dir, video_dir +from spyglass.utils import SpyglassMixin, logger from spyglass.utils.dj_helper_fn import deprecated_factory -from spyglass.utils.dj_mixin import SpyglassMixin schema = dj.schema("common_position") @@ -85,7 +85,7 @@ class IntervalPositionInfo(SpyglassMixin, dj.Computed): """ def make(self, key): - print(f"Computing position for: {key}") + logger.info(f"Computing position for: {key}") analysis_file_name = AnalysisNwbfile().create(key["nwb_file_name"]) @@ -187,7 +187,7 @@ def generate_pos_components( **time_comments, ) else: - print( + logger.info( "No video frame index found. Assuming all camera frames " + "are present." ) @@ -513,7 +513,7 @@ class PositionVideo(SpyglassMixin, dj.Computed): def make(self, key): M_TO_CM = 100 - print("Loading position data...") + logger.info("Loading position data...") raw_position_df = ( RawPosition() & { @@ -530,7 +530,7 @@ def make(self, key): } ).fetch1_dataframe() - print("Loading video data...") + logger.info("Loading video data...") epoch = ( int( key["interval_list_name"] @@ -577,7 +577,7 @@ def make(self, key): position_time = np.asarray(position_info_df.index) cm_per_pixel = nwb_video.device.meters_per_pixel * M_TO_CM - print("Making video...") + logger.info("Making video...") self.make_video( f"{video_dir}/{video_filename}", centroids, diff --git a/src/spyglass/common/common_ripple.py b/src/spyglass/common/common_ripple.py index 9f862900f..c3f31ea5f 100644 --- a/src/spyglass/common/common_ripple.py +++ b/src/spyglass/common/common_ripple.py @@ -8,7 +8,7 @@ from spyglass.common import IntervalList # noqa from spyglass.common import IntervalPositionInfo, LFPBand, LFPBandSelection from spyglass.common.common_nwbfile import AnalysisNwbfile -from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.utils import SpyglassMixin, logger schema = dj.schema("common_ripple") @@ -138,7 +138,7 @@ class RippleTimes(SpyglassMixin, dj.Computed): """ def make(self, key): - print(f"Computing ripple times for: {key}") + logger.info(f"Computing ripple times for: {key}") ripple_params = ( RippleParameters & {"ripple_param_name": key["ripple_param_name"]} ).fetch1("ripple_param_dict") diff --git a/src/spyglass/common/common_sensors.py b/src/spyglass/common/common_sensors.py index b90aa5a71..829d72da4 100644 --- a/src/spyglass/common/common_sensors.py +++ b/src/spyglass/common/common_sensors.py @@ -3,12 +3,12 @@ import datajoint as dj import pynwb -from ..utils.dj_mixin import SpyglassMixin -from ..utils.nwb_helper_fn import get_data_interface, get_nwb_file -from .common_ephys import Raw -from .common_interval import IntervalList # noqa: F401 -from .common_nwbfile import Nwbfile -from .common_session import Session # noqa: F401 +from spyglass.common.common_ephys import Raw +from spyglass.common.common_interval import IntervalList # noqa: F401 +from spyglass.common.common_nwbfile import Nwbfile +from spyglass.common.common_session import Session # noqa: F401 +from spyglass.utils import SpyglassMixin, logger +from spyglass.utils.nwb_helper_fn import get_data_interface, get_nwb_file schema = dj.schema("common_sensors") @@ -33,11 +33,14 @@ def make(self, key): nwbf, "analog", pynwb.behavior.BehavioralEvents ) if sensor is None: - print(f"No conforming sensor data found in {nwb_file_name}\n") + logger.info(f"No conforming sensor data found in {nwb_file_name}\n") return key["sensor_data_object_id"] = sensor.time_series["analog"].object_id - # the valid times for these data are the same as the valid times for the raw ephys data + + # the valid times for these data are the same as the valid times for + # the raw ephys data + key["interval_list_name"] = ( Raw & {"nwb_file_name": nwb_file_name} ).fetch1("interval_list_name") diff --git a/src/spyglass/common/common_session.py b/src/spyglass/common/common_session.py index 37ca3e61b..a237bc03c 100644 --- a/src/spyglass/common/common_session.py +++ b/src/spyglass/common/common_session.py @@ -9,7 +9,7 @@ from spyglass.common.common_nwbfile import Nwbfile from spyglass.common.common_subject import Subject from spyglass.settings import config, debug_mode -from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.utils import SpyglassMixin, logger from spyglass.utils.nwb_helper_fn import get_config, get_nwb_file schema = dj.schema("common_session") @@ -72,30 +72,30 @@ def make(self, key): # then, they are linked to the session via fields of Session (e.g., Subject, Institution, Lab) or part # tables (e.g., Experimenter, DataAcquisitionDevice). - print("Institution...") + logger.info("Institution...") Institution().insert_from_nwbfile(nwbf) - print("Lab...") + logger.info("Lab...") Lab().insert_from_nwbfile(nwbf) - print("LabMember...") + logger.info("LabMember...") LabMember().insert_from_nwbfile(nwbf) - print("Subject...") + logger.info("Subject...") Subject().insert_from_nwbfile(nwbf) if not debug_mode: # TODO: remove when demo files agree on device - print("Populate DataAcquisitionDevice...") + logger.info("Populate DataAcquisitionDevice...") DataAcquisitionDevice.insert_from_nwbfile(nwbf, config) - print() + logger.info() - print("Populate CameraDevice...") + logger.info("Populate CameraDevice...") CameraDevice.insert_from_nwbfile(nwbf) - print() + logger.info() - print("Populate Probe...") + logger.info("Populate Probe...") Probe.insert_from_nwbfile(nwbf, config) - print() + logger.info() if nwbf.subject is not None: subject_id = nwbf.subject.subject_id @@ -117,16 +117,16 @@ def make(self, key): skip_duplicates=True, ) - print("Skipping Apparatus for now...") + logger.info("Skipping Apparatus for now...") # Apparatus().insert_from_nwbfile(nwbf) # interval lists depend on Session (as a primary key) but users may want to add these manually so this is # a manual table that is also populated from NWB files - print("IntervalList...") + logger.info("IntervalList...") IntervalList().insert_from_nwbfile(nwbf, nwb_file_name=nwb_file_name) - # print('Unit...') + # logger.info('Unit...') # Unit().insert_from_nwbfile(nwbf, nwb_file_name=nwb_file_name) self._add_data_acquisition_device_part(nwb_file_name, nwbf, config) @@ -144,7 +144,7 @@ def _add_data_acquisition_device_part(self, nwb_file_name, nwbf, config): "data_acquisition_device_name": device_name } if len(query) == 0: - print( + logger.warn( f"DataAcquisitionDevice with name {device_name} does not exist. " "Cannot link Session with DataAcquisitionDevice in Session.DataAcquisitionDevice." ) @@ -162,7 +162,7 @@ def _add_experimenter_part(self, nwb_file_name, nwbf): # ensure that the foreign key exists and do nothing if not query = LabMember & {"lab_member_name": name} if len(query) == 0: - print( + logger.warn( f"LabMember with name {name} does not exist. " "Cannot link Session with LabMember in Session.Experimenter." ) diff --git a/src/spyglass/common/common_subject.py b/src/spyglass/common/common_subject.py index ff8f7cee9..053909374 100644 --- a/src/spyglass/common/common_subject.py +++ b/src/spyglass/common/common_subject.py @@ -1,6 +1,6 @@ import datajoint as dj -from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.utils import SpyglassMixin, logger schema = dj.schema("common_subject") @@ -19,10 +19,10 @@ class Subject(SpyglassMixin, dj.Manual): @classmethod def insert_from_nwbfile(cls, nwbf): - """Get the subject information from the NWBFile and insert it into the Subject table.""" + """Get the subject info from the NWBFile, insert into the Subject.""" sub = nwbf.subject if sub is None: - print("No subject metadata found.\n") + logger.warn("No subject metadata found.\n") return subject_dict = dict() subject_dict["subject_id"] = sub.subject_id diff --git a/src/spyglass/common/common_task.py b/src/spyglass/common/common_task.py index c471aeb6f..0dffa4ac5 100644 --- a/src/spyglass/common/common_task.py +++ b/src/spyglass/common/common_task.py @@ -2,13 +2,12 @@ import ndx_franklab_novela import pynwb -from spyglass.utils.dj_mixin import SpyglassMixin - -from ..utils.nwb_helper_fn import get_nwb_file -from .common_device import CameraDevice -from .common_interval import IntervalList -from .common_nwbfile import Nwbfile -from .common_session import Session # noqa: F401 +from spyglass.common.common_device import CameraDevice # noqa: F401 +from spyglass.common.common_interval import IntervalList +from spyglass.common.common_nwbfile import Nwbfile +from spyglass.common.common_session import Session # noqa: F401 +from spyglass.utils import SpyglassMixin, logger +from spyglass.utils.nwb_helper_fn import get_nwb_file schema = dj.schema("common_task") @@ -34,7 +33,7 @@ def insert_from_nwbfile(cls, nwbf): """ tasks_mod = nwbf.processing.get("tasks") if tasks_mod is None: - print(f"No tasks processing module found in {nwbf}\n") + logger.warn(f"No tasks processing module found in {nwbf}\n") return for task in tasks_mod.data_interfaces.values(): if cls.check_task_table(task): @@ -102,20 +101,23 @@ def make(self, key): nwb_file_abspath = Nwbfile().get_abs_path(nwb_file_name) nwbf = get_nwb_file(nwb_file_abspath) camera_names = dict() - # the tasks refer to the camera_id which is unique for the NWB file but not for CameraDevice schema, so we - # need to look up the right camera + + # the tasks refer to the camera_id which is unique for the NWB file but + # not for CameraDevice schema, so we need to look up the right camera # map camera ID (in camera name) to camera_name + for device in nwbf.devices.values(): if isinstance(device, ndx_franklab_novela.CameraDevice): # get the camera ID camera_id = int(str.split(device.name)[1]) camera_names[camera_id] = device.camera_name - # find the task modules and for each one, add the task to the Task schema if it isn't there - # and then add an entry for each epoch + # find the task modules and for each one, add the task to the Task + # schema if it isn't there and then add an entry for each epoch + tasks_mod = nwbf.processing.get("tasks") if tasks_mod is None: - print(f"No tasks processing module found in {nwbf}\n") + logger.warn(f"No tasks processing module found in {nwbf}\n") return for task in tasks_mod.data_interfaces.values(): @@ -124,8 +126,9 @@ def make(self, key): Task.insert_from_task_table(task) key["task_name"] = task.task_name[0] - # get the CameraDevice used for this task (primary key is camera name so we need - # to map from ID to name) + # get the CameraDevice used for this task (primary key is + # camera name so we need to map from ID to name) + camera_ids = task.camera_id[0] valid_camera_ids = [ camera_id @@ -138,20 +141,25 @@ def make(self, key): for camera_id in valid_camera_ids ] else: - print( - f"No camera device found with ID {camera_ids} in NWB file {nwbf}\n" + logger.warn( + f"No camera device found with ID {camera_ids} in NWB " + + f"file {nwbf}\n" ) # Add task environment if hasattr(task, "task_environment"): key["task_environment"] = task.task_environment[0] - # get the interval list for this task, which corresponds to the matching epoch for the raw data. - # Users should define more restrictive intervals as required for analyses + # get the interval list for this task, which corresponds to the + # matching epoch for the raw data. Users should define more + # restrictive intervals as required for analyses + session_intervals = ( IntervalList() & {"nwb_file_name": nwb_file_name} ).fetch("interval_list_name") for epoch in task.task_epochs[0]: - # TODO in beans file, task_epochs[0] is 1x2 dset of ints, so epoch would be an int + # TODO in beans file, task_epochs[0] is 1x2 dset of ints, + # so epoch would be an int + key["epoch"] = epoch target_interval = str(epoch).zfill(2) for interval in session_intervals: @@ -175,11 +183,13 @@ def update_entries(cls, restrict={}): cls.update1(row=row) @classmethod - def check_task_table(cls, task_table): - """Check whether the pynwb DynamicTable containing task metadata conforms to the expected format. + def check_task_table(cls, task_table) -> bool: + """Check whether the pynwb DynamicTable containing task metadata + conforms to the expected format. - The table should be an instance of pynwb.core.DynamicTable and contain the columns 'task_name', - 'task_description', 'camera_id', 'and 'task_epochs'. + The table should be an instance of pynwb.core.DynamicTable and contain + the columns 'task_name', 'task_description', 'camera_id', 'and + 'task_epochs'. Parameters ---------- @@ -189,10 +199,12 @@ def check_task_table(cls, task_table): Returns ------- bool - Whether the DynamicTable conforms to the expected format for loading data into the TaskEpoch table. + Whether the DynamicTable conforms to the expected format for + loading data into the TaskEpoch table. """ - # TODO this could be more strict and check data types, but really it should be schematized + # TODO this could be more strict and check data types, but really it + # should be schematized return ( Task.check_task_table(task_table) and hasattr(task_table, "camera_id") diff --git a/src/spyglass/common/populate_all_common.py b/src/spyglass/common/populate_all_common.py index 8c9ca1ee5..018a6fd68 100644 --- a/src/spyglass/common/populate_all_common.py +++ b/src/spyglass/common/populate_all_common.py @@ -1,59 +1,63 @@ -from spyglass.spikesorting.imported import ImportedSpikeSorting -from spyglass.utils.dj_mixin import SpyglassMixin - -from .common_behav import ( +from spyglass.common.common_behav import ( PositionSource, RawPosition, StateScriptFile, VideoFile, ) -from .common_dio import DIOEvents -from .common_ephys import Electrode, ElectrodeGroup, Raw, SampleCount -from .common_nwbfile import Nwbfile -from .common_session import Session -from .common_task import TaskEpoch +from spyglass.common.common_dio import DIOEvents +from spyglass.common.common_ephys import ( + Electrode, + ElectrodeGroup, + Raw, + SampleCount, +) +from spyglass.common.common_nwbfile import Nwbfile +from spyglass.common.common_session import Session +from spyglass.common.common_task import TaskEpoch +from spyglass.spikesorting.imported import ImportedSpikeSorting +from spyglass.utils import logger def populate_all_common(nwb_file_name): # Insert session one by one fp = [(Nwbfile & {"nwb_file_name": nwb_file_name}).proj()] - print("Populate Session...") + logger.info("Populate Session...") Session.populate(fp) # If we use Kachery for data sharing we can uncomment the following two lines. TBD - # print('Populate NwbfileKachery...') + # logger.info('Populate NwbfileKachery...') # NwbfileKachery.populate() - print("Populate ElectrodeGroup...") + logger.info("Populate ElectrodeGroup...") ElectrodeGroup.populate(fp) - print("Populate Electrode...") + logger.info("Populate Electrode...") Electrode.populate(fp) - print("Populate Raw...") + logger.info("Populate Raw...") Raw.populate(fp) - print("Populate SampleCount...") + logger.info("Populate SampleCount...") SampleCount.populate(fp) - print("Populate DIOEvents...") + logger.info("Populate DIOEvents...") DIOEvents.populate(fp) # sensor data (from analog ProcessingModule) is temporarily removed from NWBFile # to reduce file size while it is not being used. add it back in by commenting out # the removal code in spyglass/data_import/insert_sessions.py when ready - # print('Populate SensorData') + # logger.info('Populate SensorData') # SensorData.populate(fp) - print("Populate TaskEpochs") + logger.info("Populate TaskEpochs") TaskEpoch.populate(fp) - print("Populate StateScriptFile") + logger.info("Populate StateScriptFile") StateScriptFile.populate(fp) - print("Populate VideoFile") + logger.info("Populate VideoFile") VideoFile.populate(fp) - print("RawPosition...") + logger.info("RawPosition...") PositionSource.insert_from_nwbfile(nwb_file_name) RawPosition.populate(fp) - print("Populate ImportedSpikeSorting...") + logger.info("Populate ImportedSpikeSorting...") ImportedSpikeSorting.populate(fp) diff --git a/src/spyglass/common/prepopulate/prepopulate.py b/src/spyglass/common/prepopulate/prepopulate.py index 39896bc20..e77e68ef5 100644 --- a/src/spyglass/common/prepopulate/prepopulate.py +++ b/src/spyglass/common/prepopulate/prepopulate.py @@ -5,7 +5,9 @@ import datajoint as dj import yaml -from ...settings import base_dir + +from spyglass.settings import base_dir +from spyglass.utils import logger def prepopulate_default(): @@ -27,20 +29,24 @@ def populate_from_yaml(yaml_path: str): for table_name, table_entries in d.items(): table_cls = _get_table_cls(table_name) for entry_dict in table_entries: - # test whether an entity with the primary key(s) already exists in the table + # test whether an entity with the primary key(s) already exists if not issubclass(table_cls, (dj.Manual, dj.Lookup, dj.Part)): raise ValueError( - f"Prepopulate YAML ('{yaml_path}') contains table '{table_name}' that cannot be " - "prepopulated. Only Manual and Lookup tables can be prepopulated." + f"Prepopulate YAML ('{yaml_path}') contains table " + + f"{table_name}' that cannot be prepopulated. Only Manual " + + "and Lookup tables can be prepopulated." ) if hasattr(table_cls, "fetch_add"): - # if the table has defined a fetch_add method, use that instead of insert1. this is useful for - # tables where the primary key is an ID that auto-increments. - # first check whether an entry exists with the same information. + # if the table has defined a fetch_add method, use that instead + # of insert1. this is useful for tables where the primary key + # is an ID that auto-increments. first check whether an entry + # exists with the same information. + query = table_cls & entry_dict if not query: - print( - f"Populate: Populating table {table_cls.__name__} with data {entry_dict} using fetch_add." + logger.info( + f"Populate: Populating table {table_cls.__name__} with " + + f"data {entry_dict} using fetch_add." ) table_cls.fetch_add(**entry_dict) continue @@ -51,25 +57,31 @@ def populate_from_yaml(yaml_path: str): if k in table_cls.primary_key } if not primary_key_values: - print( - f"Populate: No primary key provided in data {entry_dict} for table {table_cls.__name__}" + logger.warn( + f"Populate: No primary key provided in data {entry_dict} " + + f"for table {table_cls.__name__}" ) continue if primary_key_values not in table_cls.fetch( *table_cls.primary_key, as_dict=True ): - print( - f"Populate: Populating table {table_cls.__name__} with data {entry_dict} using insert1." + logger.info( + f"Populate: Populating table {table_cls.__name__} with data" + + f" {entry_dict} using insert1." ) table_cls.insert1(entry_dict) else: - logging.info( - f"Populate: Entry in {table_cls.__name__} with primary keys {primary_key_values} already exists." + logging.warn( + f"Populate: Entry in {table_cls.__name__} with primary keys" + + f" {primary_key_values} already exists." ) def _get_table_cls(table_name): - """Get the spyglass.common class associated with a given table name. Also works for part tables one level deep.""" + """Get the spyglass.common class associated with a given table name. + + Also works for part tables one level deep.""" + if "." in table_name: # part table master_table_name = table_name[0 : table_name.index(".")] part_table_name = table_name[table_name.index(".") + 1 :] diff --git a/src/spyglass/data_import/insert_sessions.py b/src/spyglass/data_import/insert_sessions.py index caee7682e..c862fe85b 100644 --- a/src/spyglass/data_import/insert_sessions.py +++ b/src/spyglass/data_import/insert_sessions.py @@ -6,9 +6,10 @@ import pynwb -from ..common import Nwbfile, get_raw_eseries, populate_all_common -from ..settings import debug_mode, raw_dir -from ..utils.nwb_helper_fn import get_nwb_copy_filename +from spyglass.common import Nwbfile, get_raw_eseries, populate_all_common +from spyglass.settings import debug_mode, raw_dir +from spyglass.utils import logger +from spyglass.utils.nwb_helper_fn import get_nwb_copy_filename def insert_sessions(nwb_file_names: Union[str, List[str]]): @@ -83,7 +84,7 @@ def copy_nwb_link_raw_ephys(nwb_file_name, out_nwb_file_name): str The absolute path of the new NWB file. """ - print( + logger.info( f"Creating a copy of NWB file {nwb_file_name} " + f"with link to raw ephys data: {out_nwb_file_name}" ) diff --git a/src/spyglass/decoding/clusterless.py b/src/spyglass/decoding/clusterless.py index 951a8d233..3a2e29458 100644 --- a/src/spyglass/decoding/clusterless.py +++ b/src/spyglass/decoding/clusterless.py @@ -57,7 +57,6 @@ SpikeSorting, SpikeSortingSelection, ) -from spyglass.utils.dj_helper_fn import fetch_nwb from spyglass.utils.dj_mixin import SpyglassMixin schema = dj.schema("decoding_clusterless") diff --git a/src/spyglass/decoding/sorted_spikes.py b/src/spyglass/decoding/sorted_spikes.py index 502406a9f..604a9c6ee 100644 --- a/src/spyglass/decoding/sorted_spikes.py +++ b/src/spyglass/decoding/sorted_spikes.py @@ -24,13 +24,13 @@ from replay_trajectory_classification.initial_conditions import ( UniformInitialConditions, ) -from spyglass.common.common_interval import IntervalList -from spyglass.common.common_nwbfile import AnalysisNwbfile -from spyglass.common.common_position import IntervalPositionInfo -from spyglass.utils.dj_helper_fn import fetch_nwb + from spyglass.common.common_behav import ( convert_epoch_interval_name_to_position_interval_name, ) +from spyglass.common.common_interval import IntervalList +from spyglass.common.common_nwbfile import AnalysisNwbfile +from spyglass.common.common_position import IntervalPositionInfo from spyglass.decoding.core import ( convert_valid_times_to_slice, get_valid_ephys_position_times_by_epoch, @@ -40,7 +40,7 @@ restore_classes, ) from spyglass.spikesorting.spikesorting_curation import CuratedSpikeSorting -from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.utils import SpyglassMixin, logger schema = dj.schema("decoding_sortedspikes") @@ -411,7 +411,7 @@ def get_data_for_multiple_epochs( environment_labels = [] for epoch in epoch_names: - print(epoch) + logger.info(epoch) data.append( get_decoding_data_for_epoch( nwb_file_name, diff --git a/src/spyglass/decoding/visualization.py b/src/spyglass/decoding/visualization.py index d1ef17940..f7f660e43 100644 --- a/src/spyglass/decoding/visualization.py +++ b/src/spyglass/decoding/visualization.py @@ -12,6 +12,7 @@ from spyglass.decoding.visualization_1D_view import create_1D_decode_view from spyglass.decoding.visualization_2D_view import create_2D_decode_view +from spyglass.utils import logger def make_single_environment_movie( @@ -243,7 +244,7 @@ def setup_subplots( mosaic[-1].append(env_name) else: mosaic[-1].append(env_name) - print("\n") + logger.info("\n") mosaic.append(["multiunit"] * len(env_names)) diff --git a/src/spyglass/decoding/visualization_2D_view.py b/src/spyglass/decoding/visualization_2D_view.py index 39b7a4277..52338ea78 100644 --- a/src/spyglass/decoding/visualization_2D_view.py +++ b/src/spyglass/decoding/visualization_2D_view.py @@ -19,8 +19,10 @@ def create_static_track_animation( compute_real_time_rate: bool = False, head_dir=None, ): - # float32 gives about 7 digits of decimal precision; we want 3 digits right of the decimal. - # So need to compress-store the timestamp if the start is greater than say 5000. + # float32 gives about 7 digits of decimal precision; we want 3 digits right + # of the decimal. So need to compress-store the timestamp if the start is + # greater than say 5000. + first_timestamp = 0 if timestamps[0] > 5000: first_timestamp = timestamps[0] @@ -41,7 +43,6 @@ def create_static_track_animation( # TODO: Better approach for accommodating further data streams } if head_dir is not None: - # print(f'Loading head direction: {head_dir}') data["headDirection"] = head_dir.astype(np.float32) if compute_real_time_rate: median_delta_t = np.median(np.diff(timestamps)) diff --git a/src/spyglass/figurl_views/SpikeSortingRecordingView.py b/src/spyglass/figurl_views/SpikeSortingRecordingView.py index 7c844fdc9..5fe0f9df5 100644 --- a/src/spyglass/figurl_views/SpikeSortingRecordingView.py +++ b/src/spyglass/figurl_views/SpikeSortingRecordingView.py @@ -8,9 +8,8 @@ from sortingview.SpikeSortingView import create_raw_traces_plot from sortingview.SpikeSortingView.Figure import Figure -from spyglass.utils.dj_mixin import SpyglassMixin - -from ..spikesorting.spikesorting_recording import SpikeSortingRecording +from spyglass.spikesorting.spikesorting_recording import SpikeSortingRecording +from spyglass.utils import SpyglassMixin, logger schema = dj.schema("figurl_view_spike_sorting_recording") @@ -33,12 +32,12 @@ def make(self, key): recording_path = rec["recording_path"] # Load the SI recording extractor - print("Loading recording") + logger.info("Loading recording") recording: si.BaseRecording = si.load_extractor(recording_path) # Raw traces (sample) # Extract the first 1 second of traces - print("Extracting traces") + logger.info("Extracting traces") traces: np.array = recording.get_traces( start_frame=0, end_frame=int(recording.get_sampling_frequency() * 1) ).astype(np.float32) @@ -50,11 +49,11 @@ def make(self, key): ) # Electrode geometry - print("Electrode geometry") + logger.info("Electrode geometry") f2 = create_electrode_geometry(recording) label = f"{nwb_file_name}:{sort_group_id}:{sort_interval_name}" - print(label) + logger.info(label) # Mountain layout F = create_mountain_layout(figures=[f1, f2], label=label) diff --git a/src/spyglass/figurl_views/SpikeSortingView.py b/src/spyglass/figurl_views/SpikeSortingView.py index f6056b214..b96fd0f96 100644 --- a/src/spyglass/figurl_views/SpikeSortingView.py +++ b/src/spyglass/figurl_views/SpikeSortingView.py @@ -5,7 +5,7 @@ SpikeSortingView as SortingViewSpikeSortingView, ) -from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.utils import SpyglassMixin, logger from ..spikesorting import SpikeSorting, SpikeSortingRecording from .prepare_spikesortingview_data import prepare_spikesortingview_data @@ -39,7 +39,7 @@ def make(self, key): with kc.TemporaryDirectory() as tmpdir: fname = f"{tmpdir}/spikesortingview.h5" - print("Preparing spikesortingview data") + logger.info("Preparing spikesortingview data") prepare_spikesortingview_data( recording=recording, sorting=sorting, @@ -50,21 +50,21 @@ def make(self, key): output_file_name=fname, ) - print("Creating view object") + logger.info("Creating view object") X = SortingViewSpikeSortingView(fname) - print("Creating summary") + logger.info("Creating summary") f1 = X.create_summary() # f2 = X.create_units_table(unit_ids=X.unit_ids, unit_metrics=unit_metrics) - print("Creating autocorrelograms") + logger.info("Creating autocorrelograms") f3 = X.create_autocorrelograms(unit_ids=X.unit_ids) - print("Creating raster plot") + logger.info("Creating raster plot") f4 = X.create_raster_plot(unit_ids=X.unit_ids) - print("Creating average waveforms") + logger.info("Creating average waveforms") f5 = X.create_average_waveforms(unit_ids=X.unit_ids) - print("Creating spike amplitudes") + logger.info("Creating spike amplitudes") f6 = X.create_spike_amplitudes(unit_ids=X.unit_ids) - print("Creating electrode geometry") + logger.info("Creating electrode geometry") f7 = X.create_electrode_geometry() # f8 = X.create_live_cross_correlograms() @@ -76,16 +76,16 @@ def make(self, key): sorter = sorting_record["sorter"] sorter_params_name = sorting_record["sorter_params_name"] label = f"{nwb_file_name}:{sort_group_id}:{sort_interval_name}:{sorter}:{sorter_params_name}" - print(label) + logger.info(label) - print("Creating mountain layout") + logger.info("Creating mountain layout") mountain_layout = X.create_mountain_layout( figures=[f1, f3, f4, f5, f6, f7], label=label, sorting_curation_uri=sorting_curation_uri, ) - print("Making URL") + logger.info("Making URL") url = mountain_layout.url() # Insert row into table diff --git a/src/spyglass/figurl_views/prepare_spikesortingview_data.py b/src/spyglass/figurl_views/prepare_spikesortingview_data.py index 601c277fd..46138b696 100644 --- a/src/spyglass/figurl_views/prepare_spikesortingview_data.py +++ b/src/spyglass/figurl_views/prepare_spikesortingview_data.py @@ -6,6 +6,8 @@ import numpy as np import spikeinterface as si +from spyglass.utils import logger + def prepare_spikesortingview_data( *, @@ -70,7 +72,7 @@ def prepare_spikesortingview_data( something_missing = True if not something_missing: break - print(f"Initial pass: segment {iseg}") + logger.info(f"Initial pass: segment {iseg}") start_frame = iseg * num_frames_per_segment end_frame = min(start_frame + num_frames_per_segment, num_frames) start_frame_with_padding = max(start_frame - snippet_len[0], 0) @@ -131,7 +133,7 @@ def prepare_spikesortingview_data( ) for iseg in range(num_segments): - print(f"Segment {iseg} of {num_segments}") + logger.info(f"Segment {iseg} of {num_segments}") start_frame = iseg * num_frames_per_segment end_frame = min(start_frame + num_frames_per_segment, num_frames) start_frame_with_padding = max(start_frame - snippet_len[0], 0) @@ -196,13 +198,13 @@ def prepare_spikesortingview_data( subsampled_spike_trains_concat = np.concatenate( all_subsampled_spike_trains, dtype=np.int32 ) - # print('Extracting spike snippets') + # logger.info('Extracting spike snippets') spike_snippets_concat = extract_spike_snippets( traces=traces_with_padding, times=subsampled_spike_trains_concat - start_frame_with_padding, snippet_len=snippet_len, ) - # print('Collecting spike snippets') + # logger.info('Collecting spike snippets') index = 0 for ii, unit_id in enumerate(unit_ids): channel_neighborhood = unit_channel_neighborhoods[str(unit_id)] diff --git a/src/spyglass/lfp/analysis/v1/lfp_band.py b/src/spyglass/lfp/analysis/v1/lfp_band.py index ddc40928a..4db1dd755 100644 --- a/src/spyglass/lfp/analysis/v1/lfp_band.py +++ b/src/spyglass/lfp/analysis/v1/lfp_band.py @@ -15,7 +15,7 @@ from spyglass.common.common_nwbfile import AnalysisNwbfile from spyglass.lfp.lfp_electrode import LFPElectrodeGroup from spyglass.lfp.lfp_merge import LFPOutput -from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.utils import SpyglassMixin, logger from spyglass.utils.nwb_helper_fn import get_electrode_indices schema = dj.schema("lfp_band_v1") @@ -275,8 +275,9 @@ def make(self, key): filter_coeff = filter[0]["filter_coeff"] if len(filter_coeff) == 0: - print( - f"Error in LFPBand: no filter found with data sampling rate of {lfp_band_sampling_rate}" + logger.error( + "LFPBand: no filter found with data " + + f"sampling rate of {lfp_band_sampling_rate}" ) return None diff --git a/src/spyglass/lfp/v1/lfp.py b/src/spyglass/lfp/v1/lfp.py index 9f8b2b86c..2d7e32bf0 100644 --- a/src/spyglass/lfp/v1/lfp.py +++ b/src/spyglass/lfp/v1/lfp.py @@ -16,7 +16,7 @@ from spyglass.lfp.lfp_electrode import LFPElectrodeGroup # from spyglass.utils.dj_helper_fn import fetch_nwb # dj_replace -from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.utils import SpyglassMixin, logger schema = dj.schema("lfp_v1") @@ -91,10 +91,10 @@ def make(self, key): raw_valid_times, min_length=MIN_LFP_INTERVAL_DURATION, ) - print( - f"LFP: found {len(valid_times)} intervals > {MIN_LFP_INTERVAL_DURATION} sec long." + logger.info( + f"LFP: found {len(valid_times)} intervals > " + + f"{MIN_LFP_INTERVAL_DURATION} sec long." ) - # target user-specified sampling rate decimation = sampling_rate // key["target_sampling_rate"] @@ -107,14 +107,17 @@ def make(self, key): } # not key['filter_sampling_rate']? ).fetch(as_dict=True)[0] - # there should only be one filter that matches, so we take the first of the dictionaries + # there should only be one filter that matches, so we take the first of + # the dictionaries + key["filter_name"] = filter["filter_name"] key["filter_sampling_rate"] = filter["filter_sampling_rate"] filter_coeff = filter["filter_coeff"] if len(filter_coeff) == 0: - print( - f"Error in LFP: no filter found with data sampling rate of {sampling_rate}" + logger.error( + "LFP: no filter found with data sampling rate of " + + f"{sampling_rate}" ) return None # get the list of selected LFP Channels from LFPElectrode @@ -137,14 +140,16 @@ def make(self, key): decimation, ) - # now that the LFP is filtered and in the file, add the file to the AnalysisNwbfile table + # now that the LFP is filtered and in the file, add the file to the + # AnalysisNwbfile table + AnalysisNwbfile().add(key["nwb_file_name"], lfp_file_name) key["analysis_file_name"] = lfp_file_name key["lfp_object_id"] = lfp_object_id key["lfp_sampling_rate"] = sampling_rate // decimation - # finally, we need to censor the valid times to account for the downsampling + # need to censor the valid times to account for the downsampling lfp_valid_times = interval_list_censor(valid_times, timestamp_interval) # add an interval list for the LFP valid times, skipping duplicates diff --git a/src/spyglass/lfp/v1/lfp_artifact_difference_detection.py b/src/spyglass/lfp/v1/lfp_artifact_difference_detection.py index 54a5eaf1a..a8cfcd4d9 100644 --- a/src/spyglass/lfp/v1/lfp_artifact_difference_detection.py +++ b/src/spyglass/lfp/v1/lfp_artifact_difference_detection.py @@ -10,6 +10,7 @@ interval_from_inds, interval_list_intersect, ) +from spyglass.utils import logger from spyglass.utils.nwb_helper_fn import get_valid_intervals @@ -66,7 +67,7 @@ def difference_artifact_detector( # numpy array before referencing check for referencing flag if referencing == 1: - print("referencing activated. may be set to -1") + logger.info("referencing activated. may be set to -1") # valid_timestamps = recording.timestamps valid_timestamps = timestamps @@ -79,7 +80,7 @@ def difference_artifact_detector( [valid_timestamps[0], valid_timestamps[-1]] ) artifact_times_empty = np.asarray([]) - print("Amplitude threshold is None, skipping artifact detection") + logger.info("Amplitude threshold is None, skipping artifact detection") return recording_interval, artifact_times_empty # verify threshold parameters @@ -99,8 +100,8 @@ def difference_artifact_detector( # compute the number of electrodes that have to be above threshold nelect_above_1st = np.ceil(proportion_above_thresh_1st * recording.shape[1]) nelect_above_2nd = np.ceil(proportion_above_thresh_2nd * recording.shape[1]) - print("num tets 1", nelect_above_1st, "num tets 2", nelect_above_2nd) - print("data shape", recording.shape) + logger.info("num tets 1", nelect_above_1st, "num tets 2", nelect_above_2nd) + logger.info("data shape", recording.shape) # find the artifact occurrences using one or both thresholds, across # channels @@ -127,7 +128,7 @@ def difference_artifact_detector( above_thresh_1st = np.where(artifact_times_all_5 >= nelect_above_1st)[0] # second, find artifacts with large baseline change - print("thresh", amplitude_thresh_2nd, "window", local_window) + logger.info("thresh", amplitude_thresh_2nd, "window", local_window) big_artifacts = np.zeros( (recording.shape[1], above_thresh_1st.shape[0]) @@ -164,7 +165,7 @@ def difference_artifact_detector( ] artifact_frames = above_thresh.copy() - print("detected ", artifact_frames.shape[0], " artifacts") + logger.info("detected ", artifact_frames.shape[0], " artifacts") # Convert to s to remove from either side of each detected artifact half_removal_window_s = removal_window_ms / 1000 * 0.5 @@ -174,7 +175,7 @@ def difference_artifact_detector( [[valid_timestamps[0], valid_timestamps[-1]]] ) artifact_times_empty = np.asarray([]) - print("No artifacts detected.") + logger.info("No artifacts detected.") return recording_interval, artifact_times_empty artifact_intervals = interval_from_inds(artifact_frames) diff --git a/src/spyglass/linearization/v0/main.py b/src/spyglass/linearization/v0/main.py index 5a88079d2..f08694689 100644 --- a/src/spyglass/linearization/v0/main.py +++ b/src/spyglass/linearization/v0/main.py @@ -9,7 +9,7 @@ from spyglass.common.common_nwbfile import AnalysisNwbfile # noqa F401 from spyglass.common.common_position import IntervalPositionInfo # noqa F401 -from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.utils import SpyglassMixin, logger schema = dj.schema("common_position") @@ -119,7 +119,7 @@ class IntervalLinearizedPosition(SpyglassMixin, dj.Computed): """ def make(self, key): - print(f"Computing linear position for: {key}") + logger.info(f"Computing linear position for: {key}") key["analysis_file_name"] = AnalysisNwbfile().create( key["nwb_file_name"] diff --git a/src/spyglass/linearization/v1/main.py b/src/spyglass/linearization/v1/main.py index 53a050f61..ae47477cb 100644 --- a/src/spyglass/linearization/v1/main.py +++ b/src/spyglass/linearization/v1/main.py @@ -12,8 +12,7 @@ from spyglass.common.common_nwbfile import AnalysisNwbfile from spyglass.position.position_merge import PositionOutput -from spyglass.utils.dj_helper_fn import fetch_nwb -from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.utils import SpyglassMixin, logger schema = dj.schema("position_linearization_v1") @@ -116,7 +115,7 @@ class LinearizedPositionV1(SpyglassMixin, dj.Computed): def make(self, key): orig_key = copy.deepcopy(key) - print(f"Computing linear position for: {key}") + logger.info(f"Computing linear position for: {key}") position_nwb = PositionOutput.fetch_nwb( {"merge_id": key["pos_merge_id"]} diff --git a/src/spyglass/lock/file_lock.py b/src/spyglass/lock/file_lock.py index bfb3c7821..c20927520 100644 --- a/src/spyglass/lock/file_lock.py +++ b/src/spyglass/lock/file_lock.py @@ -2,7 +2,7 @@ import datajoint as dj -from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.utils import SpyglassMixin, logger schema = dj.schema("file_lock") @@ -23,7 +23,7 @@ def populate_from_lock_file(self): if os.path.exists(os.getenv("NWB_LOCK_FILE")): lock_file = open(os.getenv("NWB_LOCK_FILE"), "r") for line in lock_file: - print(line) + logger.info(line) key = {"nwb_file_name": line.strip()} self.insert1(key, skip_duplicates="True") lock_file.close() diff --git a/src/spyglass/position/position_merge.py b/src/spyglass/position/position_merge.py index 1c1c98efe..69a42f72c 100644 --- a/src/spyglass/position/position_merge.py +++ b/src/spyglass/position/position_merge.py @@ -7,14 +7,18 @@ import pandas as pd from datajoint.utils import to_camel_case -from spyglass.utils.dj_mixin import SpyglassMixin - -from ..common.common_position import IntervalPositionInfo as CommonPos -from ..utils.dj_merge_tables import _Merge -from .v1.dlc_utils import check_videofile, get_video_path, make_video -from .v1.position_dlc_pose_estimation import DLCPoseEstimationSelection -from .v1.position_dlc_selection import DLCPosV1 -from .v1.position_trodes_position import TrodesPosV1 +from spyglass.common.common_position import IntervalPositionInfo as CommonPos +from spyglass.position.v1.dlc_utils import ( + check_videofile, + get_video_path, + make_video, +) +from spyglass.position.v1.position_dlc_pose_estimation import ( + DLCPoseEstimationSelection, +) +from spyglass.position.v1.position_dlc_selection import DLCPosV1 +from spyglass.position.v1.position_trodes_position import TrodesPosV1 +from spyglass.utils import SpyglassMixin, _Merge, logger schema = dj.schema("position_merge") @@ -142,7 +146,7 @@ def make(self, key): M_TO_CM = 100 output_dir = (PositionVideoSelection & key).fetch1("output_dir") - print("Loading position data...") + logger.info("Loading position data...") # raw_position_df = ( # RawPosition() # & { @@ -207,7 +211,7 @@ def make(self, key): pos_df[["orientation"]] ) - print("Loading video data...") + logger.info("Loading video data...") epoch = int("".join(filter(str.isdigit, key["interval_list_name"]))) + 1 ( @@ -242,7 +246,7 @@ def make(self, key): # centroids = {'red': np.asarray(raw_position_df[['xloc', 'yloc']]), # 'green': np.asarray(raw_position_df[['xloc2', 'yloc2']])} - print("Making video...") + logger.info("Making video...") make_video( video_path, diff --git a/src/spyglass/position/v1/dlc_utils.py b/src/spyglass/position/v1/dlc_utils.py index 67606e684..6fc7dc741 100644 --- a/src/spyglass/position/v1/dlc_utils.py +++ b/src/spyglass/position/v1/dlc_utils.py @@ -156,7 +156,7 @@ def _set_permissions(directory, mode, username: str, groupname: str = None): os.chmod(os.path.join(dirpath, filename), mode) -class OutputLogger: +class OutputLogger: # TODO: migrate to spyglass.utils.logger """ A class to wrap a logging.Logger object in order to provide context manager capabilities. diff --git a/src/spyglass/ripple/v1/ripple.py b/src/spyglass/ripple/v1/ripple.py index c12f94373..ef7483f01 100644 --- a/src/spyglass/ripple/v1/ripple.py +++ b/src/spyglass/ripple/v1/ripple.py @@ -12,7 +12,7 @@ from spyglass.common.common_nwbfile import AnalysisNwbfile from spyglass.lfp.analysis.v1.lfp_band import LFPBandSelection, LFPBandV1 from spyglass.position import PositionOutput -from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.utils import SpyglassMixin, logger from spyglass.utils.nwb_helper_fn import get_electrode_indices schema = dj.schema("ripple_v1") @@ -96,7 +96,7 @@ def set_lfp_electrodes( .loc[:, LFPBandSelection.LFPBandElectrode.primary_key] ) except KeyError as err: - print(err) + logger.debug(err) raise KeyError( "Attempting to use electrode_ids that aren't in the associated" " LFPBand filtered dataset." @@ -160,7 +160,7 @@ def make(self, key): "nwb_file_name", "target_interval_list_name" ) - print(f"Computing ripple times for: {key}") + logger.info(f"Computing ripple times for: {key}") ripple_params = ( RippleParameters & {"ripple_param_name": key["ripple_param_name"]} ).fetch1("ripple_param_dict") diff --git a/src/spyglass/sharing/sharing_kachery.py b/src/spyglass/sharing/sharing_kachery.py index 0d7405b46..e3b9111ec 100644 --- a/src/spyglass/sharing/sharing_kachery.py +++ b/src/spyglass/sharing/sharing_kachery.py @@ -4,8 +4,8 @@ import kachery_cloud as kcl from datajoint.errors import DataJointError -from spyglass.utils.dj_mixin import SpyglassMixin from spyglass.settings import config +from spyglass.utils import SpyglassMixin, logger from ..common.common_lab import Lab # noqa: F401 from ..common.common_nwbfile import AnalysisNwbfile @@ -141,7 +141,7 @@ def make(self, key): # linked_key = copy.deepcopy(key) - print(f'Linking {key["analysis_file_name"]} in kachery-cloud...') + logger.info(f'Linking {key["analysis_file_name"]} in kachery-cloud...') # set the kachery zone KacheryZone.set_zone(key) @@ -149,11 +149,11 @@ def make(self, key): key["analysis_file_uri"] = kcl.link_file( AnalysisNwbfile().get_abs_path(key["analysis_file_name"]) ) - print( + logger.info( os.environ[kachery_zone_envar], os.environ[kachery_cloud_dir_envar] ) - print(AnalysisNwbfile().get_abs_path(key["analysis_file_name"])) - print(kcl.load_file(key["analysis_file_uri"])) + logger.info(AnalysisNwbfile().get_abs_path(key["analysis_file_name"])) + logger.info(kcl.load_file(key["analysis_file_uri"])) self.insert1(key) # we also need to insert any linked files @@ -185,7 +185,7 @@ def download_file(analysis_file_name: str) -> bool: for uri, kachery_zone_name in zip(fetched_list[0], fetched_list[1]): if len(uri) == 0: return False - print("uri:", uri) + logger.info("uri:", uri) if kachery_download_file( uri=uri, dest=AnalysisNwbfile.get_abs_path(analysis_file_name), @@ -199,7 +199,7 @@ def download_file(analysis_file_name: str) -> bool: ).fetch(as_dict=True) for file in linked_files: uri = file["linked_file_uri"] - print(f"attempting to download linked file uri {uri}") + logger.info(f"attempting to download linked file uri {uri}") linked_file_path = ( os.environ["SPYGLASS_BASE_DIR"] + file["linked_file_rel_path"] diff --git a/src/spyglass/spikesorting/curation_figurl.py b/src/spyglass/spikesorting/curation_figurl.py index 069de6dd5..8ba0d3cfb 100644 --- a/src/spyglass/spikesorting/curation_figurl.py +++ b/src/spyglass/spikesorting/curation_figurl.py @@ -6,11 +6,10 @@ import spikeinterface as si from sortingview.SpikeSortingView import SpikeSortingView -from spyglass.utils.dj_mixin import SpyglassMixin - -from .spikesorting_curation import Curation -from .spikesorting_recording import SpikeSortingRecording -from .spikesorting_sorting import SpikeSorting +from spyglass.spikesorting.spikesorting_curation import Curation +from spyglass.spikesorting.spikesorting_recording import SpikeSortingRecording +from spyglass.spikesorting.spikesorting_sorting import SpikeSorting +from spyglass.utils import SpyglassMixin, logger schema = dj.schema("spikesorting_curation_figurl") @@ -116,7 +115,7 @@ def _generate_the_figurl( sorting_label: str, new_curation_uri: str, ): - print("Preparing spikesortingview data") + logger.info("Preparing spikesortingview data") X = SpikeSortingView.create( recording=R, sorting=S, @@ -125,22 +124,6 @@ def _generate_the_figurl( max_num_snippets_per_segment=100, channel_neighborhood_size=7, ) - # create a fake unit similarity matrix (for future reference) - # similarity_scores = [] - # for u1 in X.unit_ids: - # for u2 in X.unit_ids: - # similarity_scores.append( - # vv.UnitSimilarityScore( - # unit_id1=u1, - # unit_id2=u2, - # similarity=similarity_matrix[(X.unit_ids==u1),(X.unit_ids==u2)] - # ) - # ) - # Create the similarity matrix view - # unit_similarity_matrix_view = vv.UnitSimilarityMatrix( - # unit_ids=X.unit_ids, - # similarity_scores=similarity_scores - # ) # Assemble the views in a layout # You can replace this with other layouts diff --git a/src/spyglass/spikesorting/imported.py b/src/spyglass/spikesorting/imported.py index 68cc9a9a4..06a8be26b 100644 --- a/src/spyglass/spikesorting/imported.py +++ b/src/spyglass/spikesorting/imported.py @@ -2,8 +2,8 @@ import pynwb from spyglass.common.common_nwbfile import Nwbfile -from spyglass.common.common_session import Session -from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.common.common_session import Session # noqa: F401 +from spyglass.utils import SpyglassMixin, logger schema = dj.schema("spikesorting_imported") @@ -27,4 +27,4 @@ def make(self, key): key["object_id"] = nwbfile.units.object_id self.insert1(key, skip_duplicates=True) else: - print("No units found in NWB file") + logger.warn("No units found in NWB file") diff --git a/src/spyglass/spikesorting/merged_sorting_extractor.py b/src/spyglass/spikesorting/merged_sorting_extractor.py index e2140bfdc..2ce11d8d1 100644 --- a/src/spyglass/spikesorting/merged_sorting_extractor.py +++ b/src/spyglass/spikesorting/merged_sorting_extractor.py @@ -3,6 +3,8 @@ import numpy as np import spikeinterface as si +from spyglass.utils import logger + class MergedSortingExtractor(si.BaseSorting): extractor_name = "MergedSortingExtractor" @@ -77,7 +79,7 @@ def __init__( } for new_sorting_segment in sorting_segment_list: self.add_sorting_segment(new_sorting_segment) - print(self) + logger.info(self) class MergedSortingSegment(si.BaseSortingSegment): diff --git a/src/spyglass/spikesorting/sortingview.py b/src/spyglass/spikesorting/sortingview.py index 42eb66684..73a6fb1cf 100644 --- a/src/spyglass/spikesorting/sortingview.py +++ b/src/spyglass/spikesorting/sortingview.py @@ -1,17 +1,15 @@ import datajoint as dj import sortingview as sv -import spikeinterface as si -from spyglass.utils.dj_mixin import SpyglassMixin - -from ..common.common_lab import LabMember, LabTeam -from .sortingview_helper_fn import ( +from spyglass.common.common_lab import LabMember, LabTeam +from spyglass.spikesorting.sortingview_helper_fn import ( _create_spikesortingview_workspace, _generate_url, ) -from .spikesorting_curation import Curation -from .spikesorting_recording import SpikeSortingRecording -from .spikesorting_sorting import SpikeSorting +from spyglass.spikesorting.spikesorting_curation import Curation +from spyglass.spikesorting.spikesorting_recording import SpikeSortingRecording +from spyglass.spikesorting.spikesorting_sorting import SpikeSorting +from spyglass.utils import SpyglassMixin, logger schema = dj.schema("spikesorting_sortingview") @@ -72,7 +70,7 @@ def make(self, key: dict): LabMember.LabMemberInfo & {"lab_member_name": team_member} ).fetch("google_user_name") if len(google_user_id) != 1: - print( + logger.warn( f"Google user ID for {team_member} does not exist or more than one ID detected;\ permission not given to {team_member}, skipping..." ) @@ -110,8 +108,6 @@ def make(self, key: dict): # generate URLs and add to key url = self.url_trythis(key) - # url = _generate_url(key) - # print("URL:", url) key["curation_url"] = url key["curation_jot"] = "not ready yet" diff --git a/src/spyglass/spikesorting/sortingview_helper_fn.py b/src/spyglass/spikesorting/sortingview_helper_fn.py index 177c8a831..735f462bf 100644 --- a/src/spyglass/spikesorting/sortingview_helper_fn.py +++ b/src/spyglass/spikesorting/sortingview_helper_fn.py @@ -8,7 +8,10 @@ import spikeinterface as si from sortingview.SpikeSortingView import SpikeSortingView -from .merged_sorting_extractor import MergedSortingExtractor +from spyglass.spikesorting.merged_sorting_extractor import ( + MergedSortingExtractor, +) +from spyglass.utils import logger def _create_spikesortingview_workspace( @@ -69,7 +72,7 @@ def _generate_url( unit_metrics: Union[List[Any], None] = None, ) -> Tuple[str, str]: # moved figURL creation to function called trythis_URL in sosrtingview.py - print("Preparing spikesortingview data") + logger.info("Preparing spikesortingview data") X = SpikeSortingView.create( recording=recording, sorting=sorting, @@ -79,25 +82,9 @@ def _generate_url( channel_neighborhood_size=7, ) - # create a fake unit similarity matrix - # similarity_scores = [] - # for u1 in X.unit_ids: - # for u2 in X.unit_ids: - # similarity_scores.append( - # vv.UnitSimilarityScore( - # unit_id1=u1, - # unit_id2=u2, - # similarity=similarity_matrix[(X.unit_ids==u1),(X.unit_ids==u2)] - # ) - # ) - # Create the similarity matrix view - # unit_similarity_matrix_view = vv.UnitSimilarityMatrix( - # unit_ids=X.unit_ids, - # similarity_scores=similarity_scores - # ) - # Assemble the views in a layout # You can replace this with other layouts + view = vv.MountainLayout( items=[ vv.MountainLayoutItem( @@ -149,7 +136,7 @@ def _generate_url( ) if initial_curation is not None: - print("found initial curation") + logger.warn("found initial curation") sorting_curation_uri = kcl.store_json(initial_curation) else: sorting_curation_uri = None @@ -160,6 +147,6 @@ def _generate_url( ) url = view.url(label=label, state=url_state) - print(url) + logger.info(url) return url diff --git a/src/spyglass/spikesorting/spikesorting_artifact.py b/src/spyglass/spikesorting/spikesorting_artifact.py index 0c21fac24..567c1b616 100644 --- a/src/spyglass/spikesorting/spikesorting_artifact.py +++ b/src/spyglass/spikesorting/spikesorting_artifact.py @@ -8,16 +8,15 @@ import spikeinterface as si from spikeinterface.core.job_tools import ChunkRecordingExecutor, ensure_n_jobs -from spyglass.utils.dj_mixin import SpyglassMixin - -from ..common.common_interval import ( +from spyglass.common.common_interval import ( IntervalList, _union_concat, interval_from_inds, interval_set_difference_inds, ) -from ..utils.nwb_helper_fn import get_valid_intervals -from .spikesorting_recording import SpikeSortingRecording +from spyglass.spikesorting.spikesorting_recording import SpikeSortingRecording +from spyglass.utils import SpyglassMixin, logger +from spyglass.utils.nwb_helper_fn import get_valid_intervals schema = dj.schema("spikesorting_artifact") @@ -204,7 +203,7 @@ def _get_artifact_times( [[valid_timestamps[0], valid_timestamps[-1]]] ) artifact_times_empty = np.asarray([]) - print( + logger.info( "Amplitude and zscore thresholds are both None, skipping artifact detection" ) return recording_interval, artifact_times_empty @@ -220,7 +219,7 @@ def _get_artifact_times( # detect frames that are above threshold in parallel n_jobs = ensure_n_jobs(recording, n_jobs=job_kwargs.get("n_jobs", 1)) - print(f"using {n_jobs} jobs...") + logger.info(f"using {n_jobs} jobs...") func = _compute_artifact_chunk init_func = _init_artifact_worker @@ -260,7 +259,7 @@ def _get_artifact_times( [[valid_timestamps[0], valid_timestamps[-1]]] ) artifact_times_empty = np.asarray([]) - print("No artifacts detected.") + logger.warn("No artifacts detected.") return recording_interval, artifact_times_empty # convert indices to intervals diff --git a/src/spyglass/spikesorting/spikesorting_curation.py b/src/spyglass/spikesorting/spikesorting_curation.py index 7a5794aee..fd93cfbab 100644 --- a/src/spyglass/spikesorting/spikesorting_curation.py +++ b/src/spyglass/spikesorting/spikesorting_curation.py @@ -16,7 +16,7 @@ from spyglass.common.common_interval import IntervalList from spyglass.common.common_nwbfile import AnalysisNwbfile from spyglass.settings import waveform_dir -from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.utils import SpyglassMixin, logger from .merged_sorting_extractor import MergedSortingExtractor from .spikesorting_recording import SortInterval, SpikeSortingRecording @@ -246,7 +246,7 @@ def save_sorting_nwb( AnalysisNwbfile().add(key["nwb_file_name"], analysis_file_name) if object_ids == "": - print( + logger.warn( "Sorting contains no units." "Created an empty analysis nwb file anyway." ) @@ -318,7 +318,7 @@ def make(self, key): sorting = Curation.get_curated_sorting(key) - print("Extracting waveforms...") + logger.info("Extracting waveforms...") waveform_params = (WaveformParameters & key).fetch1("waveform_params") if "whiten" in waveform_params: if waveform_params.pop("whiten"): @@ -448,7 +448,7 @@ def get_available_metrics(self): metric_string = ("{metric_name} : {metric_doc}").format( metric_name=metric, metric_doc=metric_doc ) - print(metric_string + "\n") + logger.info(metric_string + "\n") # TODO def _validate_metrics_list(self, key): @@ -511,7 +511,7 @@ def make(self, key): Path(waveform_dir) / Path(qm_name + ".json") ) # save metrics dict as json - print(f"Computed all metrics: {qm}") + logger.info(f"Computed all metrics: {qm}") self._dump_to_json(qm, key["quality_metrics_path"]) key["analysis_file_name"] = AnalysisNwbfile().create( @@ -955,7 +955,7 @@ def make(self, key): if int(unit_id) in accepted_units } - print(f"Found {len(accepted_units)} accepted units") + logger.info(f"Found {len(accepted_units)} accepted units") # get the sorting and save it in the NWB file sorting = Curation.get_curated_sorting(key) diff --git a/src/spyglass/spikesorting/spikesorting_populator.py b/src/spyglass/spikesorting/spikesorting_populator.py index 6531c8380..8db6ee37b 100644 --- a/src/spyglass/spikesorting/spikesorting_populator.py +++ b/src/spyglass/spikesorting/spikesorting_populator.py @@ -1,15 +1,16 @@ import datajoint as dj -from spyglass.utils.dj_mixin import SpyglassMixin - -from ..common import ElectrodeGroup, IntervalList -from .curation_figurl import CurationFigurl, CurationFigurlSelection -from .spikesorting_artifact import ( +from spyglass.common import ElectrodeGroup, IntervalList +from spyglass.spikesorting.curation_figurl import ( + CurationFigurl, + CurationFigurlSelection, +) +from spyglass.spikesorting.spikesorting_artifact import ( ArtifactDetection, ArtifactDetectionSelection, ArtifactRemovedIntervalList, ) -from .spikesorting_curation import ( +from spyglass.spikesorting.spikesorting_curation import ( AutomaticCuration, AutomaticCurationSelection, CuratedSpikeSorting, @@ -20,13 +21,17 @@ Waveforms, WaveformSelection, ) -from .spikesorting_recording import ( +from spyglass.spikesorting.spikesorting_recording import ( SortGroup, SortInterval, SpikeSortingRecording, SpikeSortingRecordingSelection, ) -from .spikesorting_sorting import SpikeSorting, SpikeSortingSelection +from spyglass.spikesorting.spikesorting_sorting import ( + SpikeSorting, + SpikeSortingSelection, +) +from spyglass.utils import SpyglassMixin, logger schema = dj.schema("spikesorting_sorting") @@ -113,7 +118,7 @@ def spikesorting_pipeline_populator( nwbf_dict = dict(nwb_file_name=nwb_file_name) # Define pipeline parameters if pipeline_parameters_name is not None: - print(f"Using pipeline parameters {pipeline_parameters_name}") + logger.info(f"Using pipeline parameters {pipeline_parameters_name}") ( artifact_parameters, preproc_params_name, @@ -138,14 +143,14 @@ def spikesorting_pipeline_populator( # make sort groups only if not currently available # don't overwrite existing ones! if not SortGroup() & nwbf_dict: - print("Generating sort groups") + logger.info("Generating sort groups") SortGroup().set_group_by_shank(nwb_file_name) # Define sort interval interval_dict = dict(**nwbf_dict, interval_list_name=interval_list_name) if sort_interval_name is not None: - print(f"Using sort interval {sort_interval_name}") + logger.info(f"Using sort interval {sort_interval_name}") if not ( SortInterval & nwbf_dict @@ -153,7 +158,7 @@ def spikesorting_pipeline_populator( ): raise KeyError(f"Sort interval {sort_interval_name} not found") else: - print(f"Generating sort interval from {interval_list_name}") + logger.info(f"Generating sort interval from {interval_list_name}") interval_list = (IntervalList & interval_dict).fetch1("valid_times")[0] sort_interval_name = interval_list_name @@ -178,7 +183,7 @@ def spikesorting_pipeline_populator( ).fetch("sort_group_id") # make spike sorting recording - print("Generating spike sorting recording") + logger.info("Generating spike sorting recording") for sort_group_id in sort_group_id_list: ssr_key = dict( **sort_dict, @@ -192,7 +197,7 @@ def spikesorting_pipeline_populator( SpikeSortingRecording.populate(interval_dict) # Artifact detection - print("Running artifact detection") + logger.info("Running artifact detection") artifact_keys = [ {**k, "artifact_params_name": artifact_parameters} for k in (SpikeSortingRecordingSelection() & interval_dict).fetch("KEY") @@ -201,7 +206,7 @@ def spikesorting_pipeline_populator( ArtifactDetection.populate(interval_dict) # Spike sorting - print("Running spike sorting") + logger.info("Running spike sorting") for artifact_key in artifact_keys: ss_key = dict( **(ArtifactDetection & artifact_key).fetch1("KEY"), @@ -214,7 +219,7 @@ def spikesorting_pipeline_populator( SpikeSorting.populate(sort_dict) # initial curation - print("Beginning curation") + logger.info("Beginning curation") for sorting_key in (SpikeSorting() & sort_dict).fetch("KEY"): if not (Curation() & sorting_key): Curation.insert_curation(sorting_key) @@ -226,7 +231,7 @@ def spikesorting_pipeline_populator( and len(auto_curation_params_name) > 0 ): # Extract waveforms - print("Extracting waveforms") + logger.info("Extracting waveforms") curation_keys = [ {**k, "waveform_params_name": waveform_params_name} for k in (Curation() & sort_dict & {"curation_id": 0}).fetch("KEY") @@ -235,7 +240,7 @@ def spikesorting_pipeline_populator( Waveforms.populate(sort_dict) # Quality Metrics - print("Calculating quality metrics") + logger.info("Calculating quality metrics") waveform_keys = [ {**k, "metric_params_name": metric_params_name} for k in (Waveforms() & sort_dict).fetch("KEY") @@ -244,7 +249,7 @@ def spikesorting_pipeline_populator( QualityMetrics().populate(sort_dict) # Automatic Curation - print("Creating automatic curation") + logger.info("Creating automatic curation") metric_keys = [ {**k, "auto_curation_params_name": auto_curation_params_name} for k in (QualityMetrics() & sort_dict).fetch("KEY") @@ -255,7 +260,7 @@ def spikesorting_pipeline_populator( # Curated Spike Sorting # get curation keys of the automatic curation to populate into curated # spike sorting selection - print("Creating curated spike sorting") + logger.info("Creating curated spike sorting") auto_key_list = (AutomaticCuration() & sort_dict).fetch( "auto_curation_key" ) @@ -269,7 +274,7 @@ def spikesorting_pipeline_populator( # Perform no automatic curation, just populate curated spike sorting # selection with the initial curation. Used in case of clusterless # decoding - print("Creating curated spike sorting") + logger.info("Creating curated spike sorting") curation_keys = (Curation() & sort_dict).fetch("KEY") for curation_key in curation_keys: CuratedSpikeSortingSelection.insert1( @@ -281,7 +286,7 @@ def spikesorting_pipeline_populator( if fig_url_repo: # Curation Figurl - print("Creating curation figurl") + logger.info("Creating curation figurl") sort_interval_name = interval_list_name + "_entire" gh_url = ( fig_url_repo diff --git a/src/spyglass/spikesorting/spikesorting_recording.py b/src/spyglass/spikesorting/spikesorting_recording.py index 9d188e1bf..c9b79e608 100644 --- a/src/spyglass/spikesorting/spikesorting_recording.py +++ b/src/spyglass/spikesorting/spikesorting_recording.py @@ -9,21 +9,20 @@ import spikeinterface as si import spikeinterface.extractors as se -from spyglass.utils.dj_mixin import SpyglassMixin - -from ..common.common_device import Probe, ProbeType # noqa: F401 -from ..common.common_ephys import Electrode, ElectrodeGroup -from ..common.common_interval import ( +from spyglass.common.common_device import Probe, ProbeType # noqa: F401 +from spyglass.common.common_ephys import Electrode, ElectrodeGroup +from spyglass.common.common_interval import ( IntervalList, interval_list_intersect, intervals_by_length, union_adjacent_index, ) -from ..common.common_lab import LabTeam # noqa: F401 -from ..common.common_nwbfile import Nwbfile -from ..common.common_session import Session # noqa: F401 -from ..settings import recording_dir -from ..utils.dj_helper_fn import dj_replace +from spyglass.common.common_lab import LabTeam # noqa: F401 +from spyglass.common.common_nwbfile import Nwbfile +from spyglass.common.common_session import Session # noqa: F401 +from spyglass.settings import recording_dir +from spyglass.utils import SpyglassMixin, logger +from spyglass.utils.dj_helper_fn import dj_replace schema = dj.schema("spikesorting_recording") @@ -150,7 +149,7 @@ def set_group_by_shank( if omit_ref_electrode_group and ( str(e_group) == str(reference_electrode_group) ): - print( + logger.warn( f"Omitting electrode group {e_group} from sort groups " + "because contains reference." ) @@ -164,8 +163,9 @@ def set_group_by_shank( if ( omit_unitrode and len(shank_elect) == 1 ): # omit unitrodes if indicated - print( - f"Omitting electrode group {e_group}, shank {shank} from sort groups because unitrode." + logger.warn( + f"Omitting electrode group {e_group}, shank {shank} " + + "from sort groups because unitrode." ) continue self.insert1(sg_key) diff --git a/src/spyglass/spikesorting/spikesorting_sorting.py b/src/spyglass/spikesorting/spikesorting_sorting.py index 3cb70b986..6ab7c862b 100644 --- a/src/spyglass/spikesorting/spikesorting_sorting.py +++ b/src/spyglass/spikesorting/spikesorting_sorting.py @@ -12,16 +12,16 @@ import spikeinterface.sorters as sis from spikeinterface.sortingcomponents.peak_detection import detect_peaks -from spyglass.utils.dj_mixin import SpyglassMixin - -from ..common.common_lab import LabMember, LabTeam -from ..common.common_nwbfile import AnalysisNwbfile -from ..settings import sorting_dir, temp_dir -from .spikesorting_artifact import ArtifactRemovedIntervalList -from .spikesorting_recording import ( +from spyglass.common.common_lab import LabMember, LabTeam +from spyglass.settings import sorting_dir, temp_dir +from spyglass.spikesorting.spikesorting_artifact import ( + ArtifactRemovedIntervalList, +) +from spyglass.spikesorting.spikesorting_recording import ( SpikeSortingRecording, SpikeSortingRecordingSelection, ) +from spyglass.utils import SpyglassMixin, logger schema = dj.schema("spikesorting_sorting") @@ -190,7 +190,7 @@ def make(self, key: dict): mode="zeros", ) - print(f"Running spike sorting on {key}...") + logger.info(f"Running spike sorting on {key}...") sorter, sorter_params = (SpikeSorterParameters & key).fetch1( "sorter", "sorter_params" ) @@ -229,7 +229,7 @@ def make(self, key: dict): ) key["time_of_sort"] = int(time.time()) - print("Saving sorting results...") + logger.info("Saving sorting results...") sorting_folder = Path(sorting_dir) @@ -249,7 +249,7 @@ def delete(self): current_user_name = dj.config["database.user"] entries = self.fetch() permission_bool = np.zeros((len(entries),)) - print( + logger.info( f"Attempting to delete {len(entries)} entries, checking permission..." ) @@ -275,7 +275,7 @@ def delete(self): current_user_name in datajoint_user_names ) if np.sum(permission_bool) == len(entries): - print("Permission to delete all specified entries granted.") + logger.info("Permission to delete all specified entries granted.") super().delete() else: raise Exception( @@ -286,7 +286,6 @@ def delete(self): def fetch_nwb(self, *attrs, **kwargs): raise NotImplementedError return None - # return fetch_nwb(self, (AnalysisNwbfile, 'analysis_file_abs_path'), *attrs, **kwargs) def nightly_cleanup(self): """Clean up spike sorting directories that are not in the SpikeSorting table. @@ -299,7 +298,7 @@ def nightly_cleanup(self): for dir in dir_names: if dir not in analysis_file_names: full_path = str(Path(sorting_dir) / dir) - print(f"removing {full_path}") + logger.info(f"removing {full_path}") shutil.rmtree(str(Path(sorting_dir) / dir)) @staticmethod diff --git a/src/spyglass/spikesorting/v1/artifact.py b/src/spyglass/spikesorting/v1/artifact.py index 23880137f..33cddbe83 100644 --- a/src/spyglass/spikesorting/v1/artifact.py +++ b/src/spyglass/spikesorting/v1/artifact.py @@ -21,7 +21,7 @@ SpikeSortingRecording, SpikeSortingRecordingSelection, ) -from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.utils import SpyglassMixin, logger schema = dj.schema("spikesorting_v1_artifact") @@ -92,7 +92,7 @@ def insert_selection(cls, key: dict): """ query = cls & key if query: - print("Similar row(s) already inserted.") + logger.warn("Similar row(s) already inserted.") return query.fetch(as_dict=True) key["artifact_id"] = uuid.uuid4() cls.insert1(key, skip_duplicates=True) @@ -208,7 +208,7 @@ def _get_artifact_times( # if both thresholds are None, we skip artifract detection if amplitude_thresh_uV is zscore_thresh is None: - print( + logger.info( "Amplitude and zscore thresholds are both None, " + "skipping artifact detection" ) @@ -227,7 +227,7 @@ def _get_artifact_times( # detect frames that are above threshold in parallel n_jobs = ensure_n_jobs(recording, n_jobs=job_kwargs.get("n_jobs", 1)) - print(f"Using {n_jobs} jobs...") + logger.info(f"Using {n_jobs} jobs...") func = _compute_artifact_chunk init_func = _init_artifact_worker if n_jobs == 1: @@ -267,7 +267,7 @@ def _get_artifact_times( [[valid_timestamps[0], valid_timestamps[-1]]] ) artifact_times_empty = np.asarray([]) - print("No artifacts detected.") + logger.warn("No artifacts detected.") return recording_interval, artifact_times_empty # convert indices to intervals diff --git a/src/spyglass/spikesorting/v1/figurl_curation.py b/src/spyglass/spikesorting/v1/figurl_curation.py index fb33dcb4b..f52d1a7ef 100644 --- a/src/spyglass/spikesorting/v1/figurl_curation.py +++ b/src/spyglass/spikesorting/v1/figurl_curation.py @@ -3,7 +3,6 @@ import datajoint as dj import kachery_cloud as kcl -import numpy as np import pynwb import sortingview.views as vv import spikeinterface as si @@ -12,7 +11,7 @@ from spyglass.common.common_nwbfile import AnalysisNwbfile from spyglass.spikesorting.v1.curation import CurationV1, _merge_dict_to_list from spyglass.spikesorting.v1.sorting import SpikeSortingSelection -from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.utils import SpyglassMixin, logger schema = dj.schema("spikesorting_v1_figurl_curation") @@ -51,7 +50,7 @@ def insert_selection(cls, key: dict): if "figurl_curation_id" in key: query = cls & {"figurl_curation_id": key["figurl_curation_id"]} if query: - print("Similar row(s) already inserted.") + logger.warn("Similar row(s) already inserted.") return query.fetch(as_dict=True) key["figurl_curation_id"] = uuid.uuid4() cls.insert1(key, skip_duplicates=True) @@ -202,7 +201,7 @@ def _generate_figurl( raster_plot_subsample_max_firing_rate=50, spike_amplitudes_subsample_max_firing_rate=50, ) -> str: - print("Preparing spikesortingview data") + logger.info("Preparing spikesortingview data") recording = R sorting = S diff --git a/src/spyglass/spikesorting/v1/metric_curation.py b/src/spyglass/spikesorting/v1/metric_curation.py index 0d7affc07..73ab84feb 100644 --- a/src/spyglass/spikesorting/v1/metric_curation.py +++ b/src/spyglass/spikesorting/v1/metric_curation.py @@ -23,7 +23,7 @@ get_peak_offset, ) from spyglass.spikesorting.v1.sorting import SpikeSortingSelection -from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.utils import SpyglassMixin, logger schema = dj.schema("spikesorting_v1_metric_curation") @@ -134,7 +134,7 @@ def insert_default(cls): def show_available_metrics(self): for metric in _metric_name_to_func: metric_doc = _metric_name_to_func[metric].__doc__.split("\n")[0] - print(f"{metric} : {metric_doc}\n") + logger.info(f"{metric} : {metric_doc}\n") @schema @@ -185,9 +185,7 @@ def insert_selection(cls, key: dict): key for the inserted row """ if cls & key: - print( - "This row has already been inserted into MetricCurationSelection." - ) + logger.warn("This row has already been inserted.") return (cls & key).fetch1() key["metric_curation_id"] = uuid.uuid4() cls.insert1(key, skip_duplicates=True) @@ -240,7 +238,7 @@ def make(self, key): os.mkdir(waveforms_dir) except FileExistsError: pass - print("Extracting waveforms...") + logger.info("Extracting waveforms...") waveforms = si.extract_waveforms( recording=recording, sorting=sorting, @@ -249,7 +247,7 @@ def make(self, key): **waveform_params, ) # compute metrics - print("Computing metrics...") + logger.info("Computing metrics...") metrics = {} for metric_name, metric_param_dict in metric_params.items(): metrics[metric_name] = self._compute_metric( @@ -261,11 +259,11 @@ def make(self, key): for unit_id, value in metrics["nn_isolation"].items() } - print("Applying curation...") + logger.info("Applying curation...") labels = self._compute_labels(metrics, label_params) merge_groups = self._compute_merge_groups(metrics, merge_params) - print("Saving to NWB...") + logger.info("Saving to NWB...") ( key["analysis_file_name"], key["object_id"], diff --git a/src/spyglass/spikesorting/v1/recording.py b/src/spyglass/spikesorting/v1/recording.py index 3e279a71c..397524aef 100644 --- a/src/spyglass/spikesorting/v1/recording.py +++ b/src/spyglass/spikesorting/v1/recording.py @@ -18,7 +18,7 @@ ) from spyglass.common.common_lab import LabTeam from spyglass.common.common_nwbfile import AnalysisNwbfile, Nwbfile -from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.utils import SpyglassMixin, logger schema = dj.schema("spikesorting_v1_recording") @@ -147,7 +147,7 @@ def set_group_by_shank( if omit_ref_electrode_group and ( str(e_group) == str(reference_electrode_group) ): - print( + logger.warn( f"Omitting electrode group {e_group} from sort groups " + "because contains reference." ) @@ -161,8 +161,9 @@ def set_group_by_shank( if ( omit_unitrode and len(shank_elect) == 1 ): # omit unitrodes if indicated - print( - f"Omitting electrode group {e_group}, shank {shank} from sort groups because unitrode." + logger.warn( + f"Omitting electrode group {e_group}, shank {shank} " + + "from sort groups because unitrode." ) continue cls.insert1(sg_key, skip_duplicates=True) @@ -231,7 +232,7 @@ def insert_selection(cls, key: dict): """ query = cls & key if query: - print("Similar row(s) already inserted.") + logger.warn("Similar row(s) already inserted.") return query.fetch(as_dict=True) key["recording_id"] = uuid.uuid4() cls.insert1(key, skip_duplicates=True) diff --git a/src/spyglass/spikesorting/v1/sorting.py b/src/spyglass/spikesorting/v1/sorting.py index 8836f12de..b673e5c8f 100644 --- a/src/spyglass/spikesorting/v1/sorting.py +++ b/src/spyglass/spikesorting/v1/sorting.py @@ -14,16 +14,14 @@ from spikeinterface.sortingcomponents.peak_detection import detect_peaks from spyglass.common.common_interval import IntervalList -from spyglass.common.common_lab import LabMember, LabTeam from spyglass.common.common_nwbfile import AnalysisNwbfile from spyglass.settings import temp_dir -from spyglass.spikesorting.v1.recording import ( +from spyglass.spikesorting.v1.recording import ( # noqa: F401 SpikeSortingRecording, SpikeSortingRecordingSelection, + _consolidate_intervals, ) -from spyglass.utils.dj_mixin import SpyglassMixin - -from .recording import _consolidate_intervals +from spyglass.utils import SpyglassMixin, logger schema = dj.schema("spikesorting_v1_sorting") @@ -128,7 +126,7 @@ def insert_selection(cls, key: dict): """ query = cls & key if query: - print("Similar row(s) already inserted.") + logger.info("Similar row(s) already inserted.") return query.fetch(as_dict=True) key["sorting_id"] = uuid.uuid4() cls.insert1(key, skip_duplicates=True) diff --git a/src/spyglass/utils/__init__.py b/src/spyglass/utils/__init__.py index 9dfa81f97..05f316598 100644 --- a/src/spyglass/utils/__init__.py +++ b/src/spyglass/utils/__init__.py @@ -1,4 +1,5 @@ from spyglass.utils.dj_merge_tables import _Merge from spyglass.utils.dj_mixin import SpyglassMixin +from spyglass.utils.logging import logger -__all__ = ["_Merge", "SpyglassMixin"] +__all__ = ["_Merge", "SpyglassMixin", "logger"] diff --git a/src/spyglass/utils/database_settings.py b/src/spyglass/utils/database_settings.py index dc8ecdc1f..5a634c69c 100755 --- a/src/spyglass/utils/database_settings.py +++ b/src/spyglass/utils/database_settings.py @@ -7,6 +7,8 @@ import datajoint as dj +from spyglass.utils.logging import logger + GRANT_ALL = "GRANT ALL PRIVILEGES ON " GRANT_SEL = "GRANT SELECT ON " CREATE_USR = "CREATE USER IF NOT EXISTS " @@ -34,7 +36,7 @@ def __init__( target_group : str, optional Group to which user belongs. Default is kachery-users debug : bool, optional - Default False. If True, print sql instead of running + Default False. If True, pprint sql instead of running target_database : str, optional Default is mysql. Can also be docker container id """ @@ -100,7 +102,7 @@ def _find_group(self): # Check if the group was found if not group_found: if self.debug: - print(f"All groups: {[g.gr_name for g in groups]}") + logger.info(f"All groups: {[g.gr_name for g in groups]}") sys.exit( f"Error: The target group {self.target_group} was not found." ) @@ -116,7 +118,7 @@ def _add_module_sql(self, module_name, group): def add_module(self, module_name): """Add module to database. Grant permissions to all users in group""" - print(f"Granting everyone permissions to module {module_name}") + logger.info(f"Granting everyone permissions to module {module_name}") group = self._find_group() file = self.write_temp_file(self._add_module_sql(module_name, group)) self.exec(file) @@ -141,7 +143,7 @@ def add_dj_user(self, check_exists=True): if check_exists: user_home = Path.home().parent / self.user if user_home.exists(): - print("Creating database user ", self.user) + logger.info("Creating database user ", self.user) else: sys.exit( f"Error: could not find {self.user} in home dir: {user_home}" diff --git a/src/spyglass/utils/dj_helper_fn.py b/src/spyglass/utils/dj_helper_fn.py index e0247dffb..3be9f371a 100644 --- a/src/spyglass/utils/dj_helper_fn.py +++ b/src/spyglass/utils/dj_helper_fn.py @@ -6,11 +6,12 @@ import datajoint as dj import numpy as np -from .nwb_helper_fn import get_nwb_file +from spyglass.utils.logging import logger +from spyglass.utils.nwb_helper_fn import get_nwb_file def deprecated_factory(classes: list, old_module: str = "") -> list: - """Creates a list of classes and prints a warning when instantiated + """Creates a list of classes and logs a warning when instantiated Parameters --------- @@ -20,7 +21,7 @@ def deprecated_factory(classes: list, old_module: str = "") -> list: Returns ------ list - list of classes that will print a warning when instantiated + list of classes that will logs a warning when instantiated """ if not isinstance(classes, list): @@ -50,8 +51,8 @@ def _subclass_factory( # Define the __call__ method for the new class def init_override(self, *args, **kwargs): - print( - "Deprecation Warning: this class has been moved out of " + logger.warn( + "Deprecation: this class has been moved out of " + f"{old_module}\n" + f"\t{old_name} -> {new_module}.{new_class.__name__}" + "\nPlease use the new location." diff --git a/src/spyglass/utils/dj_merge_tables.py b/src/spyglass/utils/dj_merge_tables.py index ed783ef08..4805a8d9a 100644 --- a/src/spyglass/utils/dj_merge_tables.py +++ b/src/spyglass/utils/dj_merge_tables.py @@ -10,8 +10,8 @@ from datajoint.utils import from_camel_case, get_master, to_camel_case from IPython.core.display import HTML -from spyglass.common.common_nwbfile import AnalysisNwbfile from spyglass.utils.dj_helper_fn import fetch_nwb +from spyglass.utils.logging import logger RESERVED_PRIMARY_KEY = "merge_id" RESERVED_SECONDARY_KEY = "source" @@ -39,18 +39,27 @@ def __init__(self): if not self.is_declared: # remove comments after # from each line of definition if self._remove_comments(self.definition) != merge_def: - print( - "WARNING: merge table with non-default definition\n\t" + logger.warn( + "Merge table with non-default definition\n\t" + f"Expected: {merge_def.strip()}\n\t" + f"Actual : {self.definition.strip()}" ) for part in self.parts(as_objects=True): if part.primary_key != self.primary_key: - print( - f"WARNING: unexpected primary key in {part.table_name}" + logger.warn( + f"Unexpected primary key in {part.table_name}" + f"\n\tExpected: {self.primary_key}" + f"\n\tActual : {part.primary_key}" ) + self._analysis_nwbfile = None + + @property # CB: This is a property to avoid circular import + def analysis_nwbfile(self): + if self._analysis_nwbfile is None: + from spyglass.common import AnalysisNwbfile # noqa F401 + + self._analysis_nwbfile = AnalysisNwbfile + return self._analysis_nwbfile def _remove_comments(self, definition): """Use regular expressions to remove comments and blank lines""" @@ -464,11 +473,16 @@ def merge_delete_parent( return # User can still abort del below, but yes/no is unlikly for part_parent in part_parents: - super().delete(part_parent, **kwargs) # add safemode=False? + super().delete(part_parent, **kwargs) @classmethod def fetch_nwb( - cls, restriction: str = True, multi_source=False, *attrs, **kwargs + cls, + restriction: str = True, + multi_source=False, + disable_warning=False, + *attrs, + **kwargs, ): """Return the AnalysisNwbfile file linked in the source. @@ -479,6 +493,9 @@ def fetch_nwb( multi_source: bool Return from multiple parents. Default False. """ + if not disable_warning: + _warn_on_restriction(table=cls, restriction=restriction) + part_parents = cls._merge_restrict_parents( restriction=restriction, return_empties=False, @@ -496,7 +513,7 @@ def fetch_nwb( nwbs.extend( fetch_nwb( part_parent, - (AnalysisNwbfile, "analysis_file_abs_path"), + (cls.analysis_nwbfile, "analysis_file_abs_path"), *attrs, **kwargs, ) @@ -649,8 +666,8 @@ def merge_fetch(self, restriction: str = True, *attrs, **kwargs) -> list: try: results.extend(part.fetch(*attrs, **kwargs)) except DataJointError as e: - print( - f"WARNING: {e.args[0]} Skipping " + logger.warn( + f"{e.args[0]} Skipping " + to_camel_case(part.table_name.split("__")[-1]) ) @@ -659,7 +676,7 @@ def merge_fetch(self, restriction: str = True, *attrs, **kwargs) -> list: # attrs or "KEY" called. Intercept format, merge, and then transform? if not results: - print( + logger.info( "No merge_fetch results.\n\t" + "If not restricting, try: `M.merge_fetch(True,'attr')\n\t" + "If restricting by source, use dict: " @@ -713,11 +730,9 @@ def delete_downstream_merge( List[Tuple[dj.Table, dj.Table]] Entries in merge/part tables downstream of table input. """ - if not disable_warning and restriction is None and table().restriction: - print( - f"Warning: ignoring table restriction: {table().restriction}.\n\t" - + "Please pass restrictions as an arg" - ) + if not disable_warning: + _warn_on_restriction(table, restriction) + if not restriction: restriction = True @@ -741,6 +756,15 @@ def delete_downstream_merge( merge_table.delete(**kwargs) +def _warn_on_restriction(table: dj.Table, restriction: str = None): + """Warn if restriction on table object differs from input restriction""" + if restriction is None and table().restriction: + logger.warn( + f"Warning: ignoring table restriction: {table().restriction}.\n\t" + + "Please pass restrictions as an arg" + ) + + def _unique_descendants( table: dj.Table, recurse_level: int = 2, diff --git a/src/spyglass/utils/dj_mixin.py b/src/spyglass/utils/dj_mixin.py index f09b00cea..f4ed29197 100644 --- a/src/spyglass/utils/dj_mixin.py +++ b/src/spyglass/utils/dj_mixin.py @@ -3,6 +3,7 @@ from datajoint.utils import user_choice from spyglass.utils.dj_helper_fn import fetch_nwb +from spyglass.utils.logging import logger class SpyglassMixin: @@ -22,7 +23,7 @@ class SpyglassMixin: Check user permissions before deleting table rows. Permission is granted to users listed as admin in LabMember table or to users on a team with with the Session experimenter(s). If the table where the delete is - executed cannot be linked to a Session, a warning is printed and the + executed cannot be linked to a Session, a warning is logged and the delete continues. If the Session has no experimenter, or if the user is not on a team with the Session experimenter(s), a PermissionError is raised. `force_permission` can be set to True to bypass permission check. @@ -182,7 +183,7 @@ def _check_delete_permission(self) -> None: sess = self._find_session(self, Session) if not sess: # Permit delete if not linked to a session - print( + logger.warn( "Could not find lab team associated with " + f"{self.__class__.__name__}." + "\nBe careful not to delete others' data." @@ -214,7 +215,7 @@ def cautious_delete(self, force_permission: bool = False, *args, **kwargs): Permission is granted to users listed as admin in LabMember table or to users on a team with with the Session experimenter(s). If the table - cannot be linked to Session, a warning is printed and the delete + cannot be linked to Session, a warning is logged and the delete continues. If the Session has no experimenter, or if the user is not on a team with the Session experimenter(s), a PermissionError is raised. @@ -253,7 +254,7 @@ def cautious_delete(self, force_permission: bool = False, *args, **kwargs): for merge_table, _ in merge_deletes: merge_table.delete({**kwargs, "safemode": False}) else: - print("Delete aborted.") + logger.info("Delete aborted.") return super().delete(*args, **kwargs) # Additional confirm here diff --git a/src/spyglass/utils/logging.py b/src/spyglass/utils/logging.py new file mode 100644 index 000000000..e73a4705b --- /dev/null +++ b/src/spyglass/utils/logging.py @@ -0,0 +1,32 @@ +"""Logging configuration based on datajoint/logging.py""" +import logging +import os +import sys + +logger = logging.getLogger(__name__.split(".")[0]) + +log_level = os.getenv("DJ_LOG_LEVEL", "info").upper() + +log_format = logging.Formatter( + "[%(asctime)s][%(levelname)s] Spyglass: %(message)s", datefmt="%H:%M:%S" +) +date_format = "%H:%M:%S" + +stream_handler = logging.StreamHandler() +stream_handler.setFormatter(log_format) + +logger.setLevel(level=log_level) +logger.handlers = [stream_handler] + + +def excepthook(exc_type, exc_value, exc_traceback): + if issubclass(exc_type, KeyboardInterrupt): + sys.__excepthook__(exc_type, exc_value, exc_traceback) + return + + logger.error( + "Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback) + ) + + +sys.excepthook = excepthook diff --git a/src/spyglass/utils/nwb_helper_fn.py b/src/spyglass/utils/nwb_helper_fn.py index 58b5cf243..f9edb5c2f 100644 --- a/src/spyglass/utils/nwb_helper_fn.py +++ b/src/spyglass/utils/nwb_helper_fn.py @@ -10,6 +10,8 @@ import pynwb import yaml +from spyglass.utils.logging import logger + # dict mapping file path to an open NWBHDF5IO object in read mode and its NWBFile __open_nwb_files = dict() @@ -48,7 +50,7 @@ def get_nwb_file(nwb_file_path): if nwbfile is None: # check to see if the file exists if not os.path.exists(nwb_file_path): - print( + logger.info( "NWB file not found locally; checking kachery for " + f"{nwb_file_path}" ) @@ -92,7 +94,7 @@ def get_config(nwb_file_path): # NOTE use p.stem[:-1] to remove the underscore that was added to the file config_path = p.parent / (p.stem[:-1] + "_spyglass_config.yaml") if not os.path.exists(config_path): - print(f"No config found at file path {config_path}") + logger.info(f"No config found at file path {config_path}") return dict() with open(config_path, "r") as stream: d = yaml.safe_load(stream) @@ -197,9 +199,9 @@ def estimate_sampling_rate( multiplier : float or int, optional Deft verbose : bool, optional - Print sampling rate to stdout. Default, False + Log sampling rate to stdout. Default, False filename : str, optional - Filename to reference when printing or err. Default, "file" + Filename to reference when logging or err. Default, "file" Returns ------- @@ -241,7 +243,9 @@ def estimate_sampling_rate( if sampling_rate < 0: raise ValueError(f"Error calculating sampling rate. For {filename}") if verbose: - print(f"Estimated sampling rate for {filename}: {sampling_rate} Hz") + logger.info( + f"Estimated sampling rate for {filename}: {sampling_rate} Hz" + ) return sampling_rate @@ -265,7 +269,7 @@ def get_valid_intervals( a gap. Must be > 1. Default to 2.5 min_valid_len : float, optional Length of smallest valid interval. Default to 0. If greater - than interval duration, print warning and use half the total time. + than interval duration, log warning and use half the total time. Returns ------- @@ -279,7 +283,7 @@ def get_valid_intervals( if total_time < min_valid_len: half_total_time = total_time / 2 - print(f"WARNING: Setting minimum valid interval to {half_total_time}") + logger.warn(f"Setting minimum valid interval to {half_total_time}") min_valid_len = half_total_time # get rid of NaN elements @@ -393,9 +397,9 @@ def _get_pos_dict( epoch_groups : dict Epoch start times as keys, spatial series indices as values session_id : str, optional - Optional session ID for verbose print during sampling rate estimation + Optional session ID for verbose log during sampling rate estimation verbose : bool, optional - Default to False. Print estimated sampling rate + Default to False. Log estimated sampling rate incl_times : bool, optional Default to True. Include valid intervals. Requires additional computation not needed for RawPosition @@ -442,7 +446,7 @@ def get_all_spatial_series(nwbf, verbose=False, incl_times=True) -> dict: nwbf : pynwb.NWBFile The source NWB file object. verbose : bool - Flag representing whether to print the sampling rate. + Flag representing whether to log the sampling rate. incl_times : bool Include valid times in the output. Default, True. Set to False for only spatial series object IDs. @@ -477,7 +481,7 @@ def get_nwb_copy_filename(nwb_file_name): filename, file_extension = os.path.splitext(nwb_file_name) if filename.endswith("_"): - print(f"WARNING: File may already be a copy: {nwb_file_name}") + logger.warn(f"File may already be a copy: {nwb_file_name}") return f"{filename}_{file_extension}" @@ -495,7 +499,7 @@ def change_group_permissions( ] # Loop through nwb file directories and change group permissions for target_content in target_contents: - print( + logger.info( f"For {target_content}, changing group to {set_group_name} " + "and giving read/write/execute permissions" )