diff --git a/CHANGELOG.md b/CHANGELOG.md index 75abfd90f..8e012f76b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -44,6 +44,7 @@ release. ### Fixed - Fixed LRO MiniRF drivers naif keywords focal to pixel and pixel to focal translations to be correct. [#569](https://github.com/DOI-USGS/ale/pull/569) +- Bugfix for position and orientation for MSL cameras (driver MslMastcamPds3NaifSpiceDriver). Validated that Nav and Mast LBL files (for both left and right sensor) produce correctly positioned and oriented CSM cameras, that are self-consistent and consistent with a prior DEM for the site. [#580](https://github.com/DOI-USGS/ale/pull/580) ### Changed - Removed the affine6p library and replaced affine6p's affine transformation with a numpy solution [#579](https://github.com/DOI-USGS/ale/pull/579) @@ -77,3 +78,6 @@ release. - Chandrayaan1_mrffr IsisLabelNaifSpice driver, tests and test data [#519](https://github.com/DOI-USGS/ale/pull/519) - MGS MOC Narrow Angle IsisLabelNaifSpice driver, tests, and test data [#517](https://github.com/DOI-USGS/ale/pull/517) - Hayabusa NIRS IsisLabelNaifSpice driver, tests and test data [#532](https://github.com/DOI-USGS/ale/pull/532) + + + diff --git a/ale/base/type_sensor.py b/ale/base/type_sensor.py index 613bb48b6..eca917801 100644 --- a/ale/base/type_sensor.py +++ b/ale/base/type_sensor.py @@ -373,9 +373,8 @@ def line_times(self): class Cahvor(): """ - Mixin for largely ground based sensors to add an - extra step in the frame chain to go from ground camera to - the Camera + Mixin for ground-based sensors to add to the position and rotation + the components going from rover frame to camera frame. """ @property @@ -396,17 +395,50 @@ def final_inst_frame(self): @property def sensor_position(self): - positions, velocities, times = super().sensor_position - positions += self.cahvor_camera_dict["C"] - if self._props.get("landed", False): - positions = np.array([[0, 0, 0]] * len(times)) - velocities = np.array([[0, 0, 0]] * len(times)) - return positions, velocities, times + """ + Find the rover position, then add the camera position relative to the + rover. The returned position is in ECEF. + + Returns + ------- + : (positions, velocities, times) + a tuple containing a list of positions, a list of velocities, and a + list of times. + """ + + # Rover position in ECEF + positions, velocities, times = super().sensor_position + + nadir = self._props.get("nadir", False) + if nadir: + # For nadir applying the rover-to-camera offset runs into + # problems, so return early. TBD + return positions, velocities, times + + # Rover-to-camera offset in rover frame + cam_ctr = self.cahvor_center + + # Rover-to-camera offset in ECEF + ecef_frame = self.target_frame_id + rover_frame = self.final_inst_frame + frame_chain = self.frame_chain + rover2ecef_rotation = \ + frame_chain.compute_rotation(rover_frame, ecef_frame) + cam_ctr = rover2ecef_rotation.apply_at([cam_ctr], times)[0] + + # Go from rover position to camera position + positions[0] += cam_ctr + + if self._props.get("landed", False): + positions = np.array([[0, 0, 0]] * len(times)) + velocities = np.array([[0, 0, 0]] * len(times)) + + return positions, velocities, times def compute_h_c(self): """ Computes the h_c element of a cahvor model for the conversion - to a photogrametric model + to a photogrammetric model Returns ------- @@ -418,7 +450,7 @@ def compute_h_c(self): def compute_h_s(self): """ Computes the h_s element of a cahvor model for the conversion - to a photogrametric model + to a photogrammetric model Returns ------- @@ -430,7 +462,7 @@ def compute_h_s(self): def compute_v_c(self): """ Computes the v_c element of a cahvor model for the conversion - to a photogrametric model + to a photogrammetric model Returns ------- @@ -442,7 +474,7 @@ def compute_v_c(self): def compute_v_s(self): """ Computes the v_s element of a cahvor model for the conversion - to a photogrametric model + to a photogrammetric model Returns ------- @@ -474,6 +506,18 @@ def cahvor_rotation_matrix(self): self._cahvor_rotation_matrix = np.array([H_prime, V_prime, self.cahvor_camera_dict['A']]) return self._cahvor_rotation_matrix + @property + def cahvor_center(self): + """ + Computes the cahvor center for the sensor relative to the rover frame + + Returns + ------- + : array + Cahvor center as a 1D numpy array + """ + return self.cahvor_camera_dict['C'] + @property def frame_chain(self): """ @@ -566,6 +610,6 @@ def pixel_size(self): Returns ------- : float - Focal length of a cahvor model instrument + Pixel size of a cahvor model instrument """ - return self.focal_length/self.compute_h_s() + return -self.focal_length/self.compute_h_s() diff --git a/ale/drivers/co_drivers.py b/ale/drivers/co_drivers.py index f800c06b1..a062d838d 100644 --- a/ale/drivers/co_drivers.py +++ b/ale/drivers/co_drivers.py @@ -205,7 +205,7 @@ def focal_length(self): """ NAC uses multiple filter pairs, each filter combination has a different focal length. NAIF's Cassini kernels do not contain focal lengths for NAC filters and - so we aquired updated NAC filter data from ISIS's IAK kernel. + so we acquired updated NAC filter data from ISIS's IAK kernel. """ # default focal defined by IAK kernel diff --git a/ale/drivers/lro_drivers.py b/ale/drivers/lro_drivers.py index ed2b531a9..1cb50f427 100644 --- a/ale/drivers/lro_drivers.py +++ b/ale/drivers/lro_drivers.py @@ -102,7 +102,7 @@ def odtk(self): @property def light_time_correction(self): """ - Returns the type of light time correction and abberation correction to + Returns the type of light time correction and aberration correction to use in NAIF calls. LROC is specifically set to not use light time correction because it is @@ -112,7 +112,7 @@ def light_time_correction(self): Returns ------- : str - The light time and abberation correction string for use in NAIF calls. + The light time and aberration correction string for use in NAIF calls. See https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/req/abcorr.html for the different options available. """ diff --git a/ale/drivers/msl_drivers.py b/ale/drivers/msl_drivers.py index f61402f96..4aa009a74 100644 --- a/ale/drivers/msl_drivers.py +++ b/ale/drivers/msl_drivers.py @@ -42,10 +42,28 @@ def instrument_id(self): """ lookup = { "MAST_RIGHT": 'MASTCAM_RIGHT', - "MAST_LEFT": 'MASTCAM_LEFT' + "MAST_LEFT": 'MASTCAM_LEFT', + "NAV_RIGHT_B": 'NAVCAM_RIGHT_B', + "NAV_LEFT_B": 'NAVCAM_LEFT_B' } return self.instrument_host_id + "_" + lookup[super().instrument_id] + @property + def is_navcam(self): + """ + Returns True if the camera is a nav cam, False otherwise. + Need to handle nav cam differently as its focal length + cannot be looked up in the spice data. Use instead + a focal length in pixels computed from the CAHVOR model, + and a pixel size of 1. + + Returns + ------- + : bool + True if the camera is a nav cam, False otherwise + """ + return 'NAVCAM' in self.instrument_id + @property def cahvor_camera_dict(self): """ @@ -72,14 +90,14 @@ def cahvor_camera_dict(self): @property def final_inst_frame(self): """ - Defines MSLs last naif frame before the cahvor model frame + Defines the rover frame, relative to which the MSL cahvor camera is defined Returns ------- : int - Naif frame code for MSL_RSM_HEAD + Naif frame code for MSL_ROVER """ - return spice.bods2c("MSL_RSM_HEAD") + return spice.bods2c("MSL_ROVER") @property def sensor_frame_id(self): @@ -108,10 +126,7 @@ def focal2pixel_lines(self): : list<double> focal plane to detector lines """ - if self._props.get("landed", False): - return [0, 0, -1/self.pixel_size] - else: - return [0, 0, 1/self.pixel_size] + return [0, 0, -1/self.pixel_size] @property def focal2pixel_samples(self): @@ -123,10 +138,7 @@ def focal2pixel_samples(self): : list<double> focal plane to detector samples """ - if (self._props.get("nadir", False)): - return [0, 1/self.pixel_size, 0] - else: - return [0, -1/self.pixel_size, 0] + return [0, -1/self.pixel_size, 0] @property def sensor_model_version(self): @@ -137,3 +149,56 @@ def sensor_model_version(self): ISIS sensor model version """ return 1 + + @property + def light_time_correction(self): + """ + Returns the type of light time correction and aberration correction to + use in NAIF calls. + + For MSL using such a correction returns wrong results, so turn it off. + + Returns + ------- + : str + The light time and aberration correction string for use in NAIF calls. + See https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/req/abcorr.html + for the different options available. + """ + return 'NONE' + + @property + def focal_length(self): + """ + Returns the focal length of the sensor with a negative sign. + This was tested to work with MSL mast and nav cams. + + Returns + ------- + : float + focal length + """ + if self.is_navcam: + # Focal length in pixel as computed for a cahvor model. + # See is_navcam() for an explanation. + return -(self.compute_h_s() + self.compute_v_s())/2.0 + + # For mast cam + return -super().focal_length + + @property + def pixel_size(self): + """ + Returns the pixel size. + + Returns + ------- + : float + pixel size + """ + if self.is_navcam: + # See is_navcam() for an explanation. + return 1.0 + + # For mast cam + return super().pixel_size diff --git a/ale/formatters/formatter.py b/ale/formatters/formatter.py index 73a9d6444..afeb2f966 100644 --- a/ale/formatters/formatter.py +++ b/ale/formatters/formatter.py @@ -26,7 +26,7 @@ def to_isd(driver): meta_data = {} meta_data['isis_camera_version'] = driver.sensor_model_version - + # general information meta_data['image_lines'] = driver.image_lines meta_data['image_samples'] = driver.image_samples @@ -89,14 +89,15 @@ def to_isd(driver): frame_chain = driver.frame_chain target_frame = driver.target_frame_id + J2000 = 1 # J2000 frame id body_rotation = {} - source_frame, destination_frame, time_dependent_target_frame = frame_chain.last_time_dependent_frame_between(target_frame, 1) - - if source_frame != 1: + source_frame, destination_frame, time_dependent_target_frame = frame_chain.last_time_dependent_frame_between(target_frame, J2000) + + if source_frame != J2000: # Reverse the frame order because ISIS orders frames as # (destination, intermediate, ..., intermediate, source) - body_rotation['time_dependent_frames'] = shortest_path(frame_chain, source_frame, 1) - time_dependent_rotation = frame_chain.compute_rotation(1, source_frame) + body_rotation['time_dependent_frames'] = shortest_path(frame_chain, source_frame, J2000) + time_dependent_rotation = frame_chain.compute_rotation(J2000, source_frame) body_rotation['ck_table_start_time'] = time_dependent_rotation.times[0] body_rotation['ck_table_end_time'] = time_dependent_rotation.times[-1] body_rotation['ck_table_original_size'] = len(time_dependent_rotation.times) @@ -122,8 +123,8 @@ def to_isd(driver): # Reverse the frame order because ISIS orders frames as # (destination, intermediate, ..., intermediate, source) - instrument_pointing['time_dependent_frames'] = shortest_path(frame_chain, destination_frame, 1) - time_dependent_rotation = frame_chain.compute_rotation(1, destination_frame) + instrument_pointing['time_dependent_frames'] = shortest_path(frame_chain, destination_frame, J2000) + time_dependent_rotation = frame_chain.compute_rotation(J2000, destination_frame) instrument_pointing['ck_table_start_time'] = time_dependent_rotation.times[0] instrument_pointing['ck_table_end_time'] = time_dependent_rotation.times[-1] instrument_pointing['ck_table_original_size'] = len(time_dependent_rotation.times) @@ -141,7 +142,7 @@ def to_isd(driver): instrument_pointing['constant_rotation'] = constant_rotation.rotation_matrix().flatten() meta_data['instrument_pointing'] = instrument_pointing - # interiror orientation + # interior orientation meta_data['naif_keywords'] = driver.naif_keywords if isinstance(driver,LineScanner) or isinstance(driver, Framer) or isinstance(driver, PushFrame): @@ -162,9 +163,8 @@ def to_isd(driver): meta_data['starting_detector_line'] = driver.detector_start_line meta_data['starting_detector_sample'] = driver.detector_start_sample - - j2000_rotation = frame_chain.compute_rotation(target_frame, 1) + j2000_rotation = frame_chain.compute_rotation(target_frame, J2000) instrument_position = {} positions, velocities, times = driver.sensor_position diff --git a/ale/transformation.py b/ale/transformation.py index ab3ff5e6f..711393192 100644 --- a/ale/transformation.py +++ b/ale/transformation.py @@ -123,8 +123,8 @@ def from_spice(cls, sensor_frame, target_frame, center_ephemeris_time, ephemeris constant_frames.extend(target_constant_frames) - frame_chain.compute_time_dependent_rotiations(sensor_time_dependent_frames, sensor_times, inst_time_bias) - frame_chain.compute_time_dependent_rotiations(target_time_dependent_frames, target_times, 0) + frame_chain.compute_time_dependent_rotations(sensor_time_dependent_frames, sensor_times, inst_time_bias) + frame_chain.compute_time_dependent_rotations(target_time_dependent_frames, target_times, 0) for s, d in constant_frames: quats = np.zeros(4) @@ -380,7 +380,7 @@ def extract_exact_ck_times(observStart, observEnd, targetFrame): return times - def compute_time_dependent_rotiations(self, frames, times, time_bias): + def compute_time_dependent_rotations(self, frames, times, time_bias): """ Computes the time dependent rotations based on a list of tuples that define the relationships between frames as (source, destination) and a list of times to diff --git a/tests/pytests/data/isds/msl_isd.json b/tests/pytests/data/isds/msl_isd.json index 496fc0da7..87194f8e8 100644 --- a/tests/pytests/data/isds/msl_isd.json +++ b/tests/pytests/data/isds/msl_isd.json @@ -37,19 +37,15 @@ ], "angular_velocities": [ [ - 3.1623010827381965e-05, + 3.162301082738196e-05, -2.881378599775597e-05, - 5.651578887273642e-05 + 5.651578887273641e-05 ] ], "reference_frame": 1 }, "instrument_pointing": { "time_dependent_frames": [ - -76204, - -76203, - -76202, - -76201, -76000, -76910, -76900, @@ -64,35 +60,34 @@ ], "quaternions": [ [ - 0.5220816838298279, - -0.21018675356622796, - -0.22520580724316586, - -0.7953204312845581 + -0.15620799812369596, + -0.10975869489004511, + 0.492119901898648, + 0.8493350883917882 ] ], "angular_velocities": [ [ - 0.00023743002562463675, - -0.005620403629369784, - -0.003106243307946095 + 3.170513304263995e-05, + -2.8784577092481574e-05, + 5.646693411150336e-05 ] ], "reference_frame": 1, "constant_frames": [ -76573, - -76205, - -76204 + -76000 ], "constant_rotation": [ - 0.41995474711084907, - 0.9072597689641163, - -0.022753505185826722, - -0.7790017804603829, - 0.3732225088642592, - 0.5038463901990582, - 0.4656116798563116, - -0.19386766232728606, - 0.8634935396892888 + 0.41656672703572295, + 0.908831179877854, + -0.022316998198093657, + -0.7809014963237929, + 0.3702829395764849, + 0.503073948538243, + 0.46547289391113544, + -0.19213649091316684, + 0.8639551804888768 ] }, "naif_keywords": { @@ -231,20 +226,20 @@ "detector_sample_summing": 1, "detector_line_summing": 1, "focal_length_model": { - "focal_length": 34.0 + "focal_length": -34.0 }, "detector_center": { "line": 576.4026068104001, - "sample": 680.1442422028802 + "sample": 680.1442422028803 }, "focal2pixel_lines": [ 0, 0, - 136.49886775101945 + -136.49886775101947 ], "focal2pixel_samples": [ 0, - -136.49886775101945, + -136.49886775101947, 0 ], "optical_distortion": { @@ -269,16 +264,16 @@ ], "positions": [ [ - -42.94908602840011, - -2878.1178384691016, - -1794.0007301446622 + -42.942715970517135, + -2877.9103543439596, + -1793.8709578319247 ] ], "velocities": [ [ - 0.2143509476698853, - 0.054297300199827, - -0.09225670467004271 + 0.21433558786732382, + 0.054300659253033345, + -0.09224553252379056 ] ], "reference_frame": 1 @@ -293,15 +288,15 @@ "positions": [ [ -178112289.0644448, - -111686023.67244285, - -46420243.18796099 + -111686023.6724429, + -46420243.18796098 ] ], "velocities": [ [ 12.688344510456547, - -19.98128446099907, - -9.507348850576207 + -19.98128446099721, + -9.507348850574344 ] ], "reference_frame": 1 diff --git a/tests/pytests/data/isds/msl_nadir_isd.json b/tests/pytests/data/isds/msl_nadir_isd.json index c131a90c1..519319850 100644 --- a/tests/pytests/data/isds/msl_nadir_isd.json +++ b/tests/pytests/data/isds/msl_nadir_isd.json @@ -37,16 +37,16 @@ ], "angular_velocities": [ [ - 3.1623010827381965e-05, + 3.162301082738196e-05, -2.881378599775597e-05, - 5.651578887273642e-05 + 5.651578887273641e-05 ] ], "reference_frame": 1 }, "instrument_pointing": { "time_dependent_frames": [ - -76205, + -76000, 1 ], "ck_table_start_time": 598494669.4412209, @@ -57,26 +57,26 @@ ], "quaternions": [ [ - 0.7419897630883615, - 0.4156401046800741, - 0.25055247114123014, - 0.4625126528632889 + 0.7419899275668841, + 0.41563986262021746, + 0.25055291222506065, + 0.4625123675815364 ] ], "angular_velocities": null, "reference_frame": 1, "constant_frames": [ -76573, - -76205 + -76000 ], "constant_rotation": [ - 0.4165667270357225, + 0.41656672703572295, 0.908831179877854, - -0.02231699819809352, + -0.022316998198093657, -0.7809014963237929, - 0.37028293957648445, + 0.3702829395764849, 0.503073948538243, - 0.4654728939111354, + 0.46547289391113544, -0.19213649091316684, 0.8639551804888768 ] @@ -217,20 +217,20 @@ "detector_sample_summing": 1, "detector_line_summing": 1, "focal_length_model": { - "focal_length": 34.0 + "focal_length": -34.0 }, "detector_center": { "line": 576.4026068104001, - "sample": 680.1442422028802 + "sample": 680.1442422028803 }, "focal2pixel_lines": [ 0, 0, - 136.49886775101945 + -136.49886775101947 ], "focal2pixel_samples": [ 0, - 136.49886775101945, + -136.49886775101947, 0 ], "optical_distortion": { @@ -255,16 +255,16 @@ ], "positions": [ [ - -42.94908602840011, - -2878.1178384691016, - -1794.0007301446622 + -42.94189787175925, + -2877.908568116462, + -1793.8700624828516 ] ], "velocities": [ [ - 0.2143509476698853, - 0.054297300199827, - -0.09225670467004271 + 0.21433546111887108, + 0.0543006771748966, + -0.09224545246537652 ] ], "reference_frame": 1 @@ -279,15 +279,15 @@ "positions": [ [ -178112289.0644448, - -111686023.67244285, - -46420243.18796099 + -111686023.6724429, + -46420243.18796098 ] ], "velocities": [ [ 12.688344510456547, - -19.98128446099907, - -9.507348850576207 + -19.98128446099721, + -9.507348850574344 ] ], "reference_frame": 1 diff --git a/tests/pytests/test_cahvor_mixin.py b/tests/pytests/test_cahvor_mixin.py index b3ad22f4b..ad9b3a68d 100644 --- a/tests/pytests/test_cahvor_mixin.py +++ b/tests/pytests/test_cahvor_mixin.py @@ -65,4 +65,4 @@ def test_cahvor_detector_center_sample(self, cahvor_camera_dict): @patch("ale.base.type_sensor.Cahvor.cahvor_camera_dict", new_callable=PropertyMock, return_value=cahvor_camera_dict()) def test_cahvor_pixel_size(self, cahvor_camera_dict): - assert self.driver.pixel_size == 0.007248034226138798 \ No newline at end of file + assert self.driver.pixel_size == -0.007248034226138798 \ No newline at end of file diff --git a/tests/pytests/test_msl_drivers.py b/tests/pytests/test_msl_drivers.py index 0b25ee117..c12d308c1 100644 --- a/tests/pytests/test_msl_drivers.py +++ b/tests/pytests/test_msl_drivers.py @@ -53,9 +53,9 @@ def test_exposure_duration(self): np.testing.assert_almost_equal(self.driver.exposure_duration, 0.0102) def test_final_inst_frame(self): - with patch('ale.drivers.msl_drivers.spice.bods2c', new_callable=PropertyMock, return_value=-76562) as bods2c: - assert self.driver.final_inst_frame == -76562 - bods2c.assert_called_with("MSL_RSM_HEAD") + with patch('ale.drivers.msl_drivers.spice.bods2c', new_callable=PropertyMock, return_value=-76000) as bods2c: + assert self.driver.final_inst_frame == -76000 + bods2c.assert_called_with("MSL_ROVER") def test_cahvor_camera_dict(self): cahvor_camera_dict = self.driver.cahvor_camera_dict @@ -73,7 +73,7 @@ def test_sensor_frame_id(self): def test_focal2pixel_lines(self): with patch('ale.drivers.msl_drivers.spice.bods2c', new_callable=PropertyMock, return_value=-76220) as bods2c, \ patch('ale.drivers.msl_drivers.spice.gdpool', new_callable=PropertyMock, return_value=[100]) as gdpool: - np.testing.assert_allclose(self.driver.focal2pixel_lines, [0, 0, 137.96844341513602]) + np.testing.assert_allclose(self.driver.focal2pixel_lines, [0, 0, -137.96844341513602]) bods2c.assert_called_with('MSL_MASTCAM_RIGHT') gdpool.assert_called_with('INS-76220_FOCAL_LENGTH', 0, 1)