Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Monitor updates 2022 11 11 #1097

Open
wants to merge 19 commits into
base: develop
Choose a base branch
from
Open
Changes from 1 commit
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
bb8af30
Fixing an error with array types in dark monitor output, and making C…
york-stsci Nov 11, 2022
0df0013
Merge branch 'spacetelescope:develop' into monitor_updates_2022-11-11
york-stsci Nov 11, 2022
302d148
Merge branch 'spacetelescope:develop' into monitor_updates_2022-11-11
york-stsci Nov 11, 2022
86a67e4
Adding more logging to bad pixel monitor
york-stsci Nov 14, 2022
a78bb9d
Hopefully speeding up and including more data
york-stsci Nov 14, 2022
c5c357f
Merge branch 'monitor_updates_2022-11-11' of https://github.com/york-…
york-stsci Nov 14, 2022
2a6be15
changed line colour to hopefully stand out a bit better
york-stsci Nov 14, 2022
47ca5b7
Actually look for if all exts are done before doing a step, in case i…
york-stsci Nov 15, 2022
8d36b76
Added file name to hover tooltips
york-stsci Nov 15, 2022
ef3c1d6
More monitoring around quitting when all the outputs are there
york-stsci Nov 16, 2022
118908f
Trying again on finding output files
york-stsci Nov 16, 2022
605fedf
More logging
york-stsci Nov 17, 2022
8f41d9e
Don't assume files are not None
york-stsci Nov 17, 2022
cccdda0
Merge branch 'develop' into monitor_updates_2022-11-11
mfixstsci Nov 17, 2022
c7c19ea
Value debugging
york-stsci Nov 17, 2022
01aa442
Merge branch 'monitor_updates_2022-11-11' of https://github.com/york-…
york-stsci Nov 17, 2022
9638ecf
Fixing typo
york-stsci Nov 17, 2022
3d31dc2
Added readnoise check for existing files, and custom maximum group limit
york-stsci Nov 18, 2022
133521b
typo
york-stsci Nov 18, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Added readnoise check for existing files, and custom maximum group limit
  • Loading branch information
york-stsci committed Nov 18, 2022
commit 3d31dc2bf30c711d1c89fd6a03b47a53e7cc759d
15 changes: 11 additions & 4 deletions jwql/instrument_monitors/common_monitors/readnoise_monitor.py
Original file line number Diff line number Diff line change
@@ -415,7 +415,11 @@ def process(self, file_list):
files_to_calibrate.append(file)

# Run the files through the necessary pipeline steps
outputs = run_parallel_pipeline(files_to_calibrate, "uncal", "refpix", self.instrument)
# NOTE: Because the readnoise monitor only needs to get as far as the "refpix"
# pipeline stage, it doesn't have the incredibly increased run times that are
# otherwise associated with files with a lot of groups, so the readnoise
# monitor can pretty much calibrate anything as far as it goes.
outputs = run_parallel_pipeline(files_to_calibrate, "uncal", "refpix", self.instrument, max_groups=20000)

for filename in file_list:
logging.info('\tWorking on file: {}'.format(filename))
@@ -623,9 +627,12 @@ def run(self):
# Skip processing if the file doesnt have enough groups/ints to calculate the readnoise.
# MIRI needs extra since they omit the first five and last group before calculating the readnoise.
if total_cds_frames >= 10:
shutil.copy(uncal_filename, self.data_dir)
logging.info('\tCopied {} to {}'.format(uncal_filename, output_filename))
set_permissions(output_filename)
if os.path.isfile(os.path.join(self.data_dir, os.path.basename(uncal_filename))):
logging.info("\tRaw file already exists locally.")
else:
shutil.copy(uncal_filename, self.data_dir)
logging.info('\tCopied {} to {}'.format(uncal_filename, output_filename))
set_permissions(output_filename)
new_files.append(output_filename)
else:
logging.info('\tNot enough groups/ints to calculate readnoise in {}'.format(uncal_filename))
20 changes: 9 additions & 11 deletions jwql/shared_tasks/shared_tasks.py
Original file line number Diff line number Diff line change
@@ -215,7 +215,7 @@ def collect_after_task(**kwargs):


@celery_app.task(name='jwql.shared_tasks.shared_tasks.run_calwebb_detector1')
def run_calwebb_detector1(input_file_name, short_name, ext_or_exts, instrument, step_args={}):
def run_calwebb_detector1(input_file_name, short_name, ext_or_exts, instrument, step_args={}, max_groups=1000):
"""Run the steps of ``calwebb_detector1`` on the input file, saving the result of each
step as a separate output file, then return the name-and-path of the file as reduced
in the reduction directory. Once all requested extensions have been produced, the
@@ -271,7 +271,6 @@ def run_calwebb_detector1(input_file_name, short_name, ext_or_exts, instrument,
set_permissions(uncal_file)

# Check for exposures with too many groups
max_groups = config.get("max_groups", 1000)
with fits.open(uncal_file) as inf:
total_groups = inf[0].header["NINTS"] * inf[0].header["NGROUPS"]
if total_groups > max_groups:
@@ -356,7 +355,7 @@ def run_calwebb_detector1(input_file_name, short_name, ext_or_exts, instrument,


@celery_app.task(name='jwql.shared_tasks.shared_tasks.calwebb_detector1_save_jump')
def calwebb_detector1_save_jump(input_file_name, ramp_fit=True, save_fitopt=True):
def calwebb_detector1_save_jump(input_file_name, ramp_fit=True, save_fitopt=True, max_groups=1000):
"""Call ``calwebb_detector1`` on the provided file, running all
steps up to the ``ramp_fit`` step, and save the result. Optionally
run the ``ramp_fit`` step and save the resulting slope file as well.
@@ -409,7 +408,6 @@ def calwebb_detector1_save_jump(input_file_name, ramp_fit=True, save_fitopt=True
set_permissions(uncal_file)

# Check for exposures with too many groups
max_groups = config.get("max_groups", 1000)
with fits.open(uncal_file) as inf:
total_groups = inf[0].header["NINTS"] * inf[0].header["NGROUPS"]
if total_groups > max_groups:
@@ -572,7 +570,7 @@ def prep_file(input_file, in_ext):
return short_name, cal_lock, os.path.join(send_path, uncal_name)


def start_pipeline(input_file, short_name, ext_or_exts, instrument, jump_pipe=False):
def start_pipeline(input_file, short_name, ext_or_exts, instrument, jump_pipe=False, max_groups=1000):
"""Starts the standard or save_jump pipeline for the provided file.

.. warning::
@@ -624,9 +622,9 @@ def start_pipeline(input_file, short_name, ext_or_exts, instrument, jump_pipe=Fa
ramp_fit = True
elif "fitopt" in ext:
save_fitopt = True
result = calwebb_detector1_save_jump.delay(input_file, ramp_fit=ramp_fit, save_fitopt=save_fitopt)
result = calwebb_detector1_save_jump.delay(input_file, ramp_fit=ramp_fit, save_fitopt=save_fitopt, max_groups=max_groups)
else:
result = run_calwebb_detector1.delay(input_file, short_name, ext_or_exts, instrument)
result = run_calwebb_detector1.delay(input_file, short_name, ext_or_exts, instrument, max_groups=max_groups)
return result


@@ -678,7 +676,7 @@ def retrieve_files(short_name, ext_or_exts, dest_dir):
return output_file_or_files


def run_pipeline(input_file, in_ext, ext_or_exts, instrument, jump_pipe=False):
def run_pipeline(input_file, in_ext, ext_or_exts, instrument, jump_pipe=False, max_groups=1000):
"""Convenience function for using the ``run_calwebb_detector1`` function on a data
file, including the following steps:

@@ -718,7 +716,7 @@ def run_pipeline(input_file, in_ext, ext_or_exts, instrument, jump_pipe=False):
retrieve_dir = os.path.dirname(input_file)
short_name, cal_lock, uncal_file = prep_file(input_file, in_ext)
uncal_name = os.path.basename(uncal_file)
result = start_pipeline(uncal_name, short_name, ext_or_exts, instrument, jump_pipe=jump_pipe)
result = start_pipeline(uncal_name, short_name, ext_or_exts, instrument, jump_pipe=jump_pipe, max_groups=max_groups)
logging.info("\t\tStarting with ID {}".format(result.id))
processed_path = result.get()
logging.info("\t\tPipeline Complete")
@@ -734,7 +732,7 @@ def run_pipeline(input_file, in_ext, ext_or_exts, instrument, jump_pipe=False):
return output


def run_parallel_pipeline(input_files, in_ext, ext_or_exts, instrument, jump_pipe=False):
def run_parallel_pipeline(input_files, in_ext, ext_or_exts, instrument, jump_pipe=False, max_groups=max_groups):
"""Convenience function for using the ``run_calwebb_detector1`` function on a list of
data files, breaking them into parallel celery calls, collecting the results together,
and returning the results as another list. In particular, this function will do the
@@ -800,7 +798,7 @@ def run_parallel_pipeline(input_files, in_ext, ext_or_exts, instrument, jump_pip
output_dirs[short_name] = retrieve_dir
input_file_paths[short_name] = input_file
locks[short_name] = cal_lock
results[short_name] = start_pipeline(uncal_name, short_name, ext_or_exts, instrument, jump_pipe=jump_pipe)
results[short_name] = start_pipeline(uncal_name, short_name, ext_or_exts, instrument, jump_pipe=jump_pipe, max_groups=max_groups)
logging.info("\tStarting {} with ID {}".format(short_name, results[short_name].id))
logging.info("Celery tasks submitted.")
logging.info("Waiting for task results")