diff --git a/node-hub/dora-pyorbbecksdk/dora_pyorbbecksdk/main.py b/node-hub/dora-pyorbbecksdk/dora_pyorbbecksdk/main.py index 3475f8ac0..b7a822e70 100644 --- a/node-hub/dora-pyorbbecksdk/dora_pyorbbecksdk/main.py +++ b/node-hub/dora-pyorbbecksdk/dora_pyorbbecksdk/main.py @@ -32,6 +32,22 @@ raise err +class TemporalFilter: + def __init__(self, alpha): + self.alpha = alpha + self.previous_frame = None + + def process(self, frame): + if self.previous_frame is None: + result = frame + else: + result = cv2.addWeighted( + frame, self.alpha, self.previous_frame, 1 - self.alpha, 0 + ) + self.previous_frame = result + return result + + def yuyv_to_bgr(frame: np.ndarray, width: int, height: int) -> np.ndarray: yuyv = frame.reshape((height, width, 2)) bgr_image = cv2.cvtColor(yuyv, cv2.COLOR_YUV2BGR_YUY2) @@ -108,6 +124,8 @@ def frame_to_bgr_image(frame: VideoFrame): import pyarrow as pa ESC_KEY = 27 +MIN_DEPTH_METERS = 0.01 +MAX_DEPTH_METERS = 15.0 DEVICE_INDEX = int(os.getenv("DEVICE_INDEX", "0")) @@ -118,23 +136,36 @@ def main(): ctx = Context() device_list = ctx.query_devices() device = device_list.get_device_by_index(int(DEVICE_INDEX)) + temporal_filter = TemporalFilter(alpha=0.5) pipeline = Pipeline(device) profile_list = pipeline.get_stream_profile_list(OBSensorType.COLOR_SENSOR) try: color_profile: VideoStreamProfile = profile_list.get_video_stream_profile( - 640, 0, OBFormat.RGB, 30 + 640, 480, OBFormat.RGB, 30 ) except OBError as e: print(e) color_profile = profile_list.get_default_video_stream_profile() print("color profile: ", color_profile) + profile_list = pipeline.get_stream_profile_list(OBSensorType.DEPTH_SENSOR) + try: + depth_profile: VideoStreamProfile = profile_list.get_video_stream_profile( + 640, 480, OBFormat.Y16, 30 + ) + except OBError as e: + print(e) + depth_profile = profile_list.get_default_video_stream_profile() + print("depth profile: ", depth_profile) config.enable_stream(color_profile) + config.enable_stream(depth_profile) pipeline.start(config) for _event in node: try: frames: FrameSet = pipeline.wait_for_frames(100) if frames is None: continue + + # Get Color image color_frame = frames.get_color_frame() if color_frame is None: continue @@ -143,9 +174,40 @@ def main(): if color_image is None: print("failed to convert frame to image") continue + # Send Color Image ret, frame = cv2.imencode("." + "jpeg", color_image) if ret: node.send_output("image", pa.array(frame), {"encoding": "jpeg"}) + + # Get Depth data + depth_frame = frames.get_depth_frame() + if depth_frame is None: + continue + width = depth_frame.get_width() + height = depth_frame.get_height() + scale = depth_frame.get_depth_scale() + depth_data = np.frombuffer(depth_frame.get_data(), dtype=np.uint16) + depth_data = depth_data.reshape((height, width)) + depth_data = depth_data.astype(np.float32) * scale * 0.001 + depth_data = np.where( + (depth_data > MIN_DEPTH_METERS) & (depth_data < MAX_DEPTH_METERS), + depth_data, + 0, + ) + depth_data = temporal_filter.process(depth_data) + # Send Depth data + storage = pa.array(depth_data.ravel()) + node.send_output("depth", storage) + # Covert to Image + depth_image = cv2.normalize( + depth_data, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U + ) + # Send Depth Image + depth_image = cv2.applyColorMap(depth_image, cv2.COLORMAP_JET) + ret, frame = cv2.imencode("." + "jpeg", depth_image) + if ret: + node.send_output("image_depth", pa.array(frame), {"encoding": "jpeg"}) + except KeyboardInterrupt: break pipeline.stop() diff --git a/node-hub/dora-pyorbbecksdk/pyproject.toml b/node-hub/dora-pyorbbecksdk/pyproject.toml index 485c7459d..2755a2deb 100644 --- a/node-hub/dora-pyorbbecksdk/pyproject.toml +++ b/node-hub/dora-pyorbbecksdk/pyproject.toml @@ -1,7 +1,10 @@ [tool.poetry] name = "dora-pyorbbecksdk" version = "0.3.8" -authors = ["Haixuan Xavier Tao "] +authors = [ + "Haixuan Xavier Tao ", + "Xiang Yang ", +] description = "Dora Node for capturing video with PyOrbbeck SDK" readme = "README.md"