Comments (2)
Here's some minimal(ish) reproducible code, meant to be run on the host, with the spatial_image_detections code running on the ESP32. The lines that are commented out are lines that make the pipeline more like the one I'm using in my real application, but I was able to still make the SPI output stop working without these lines.
#!/usr/bin/env python3
from pathlib import Path
import sys
import cv2
import depthai as dai
import numpy as np
import time
from time import sleep
"""
Spatial detection network demo over SPI.
Performs inference on RGB camera and retrieves spatial location coordinates: x,y,z relative to the center of depth map.
"""
spiOut = True
# MobilenetSSD label texts
labelMap = [
"background",
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor",
]
syncNN = False
# Get argument first
nnBlobPath = "/home/depthai_testing/submodules/depthai-python/examples/models/mobilenet-ssd_openvino_2021.4_5shave.blob"
# nnBlobPath = str(
# (Path(/home/depthai_testing/submodules/depthai-python/examples/models/mobilenet-ssd_openvino_2021.4_5shave.blob)
# .resolve()
# .absolute()
# )
if len(sys.argv) > 1:
nnBlobPath = sys.argv[1]
# Start defining a pipeline
pipeline = dai.Pipeline()
# Define a source - color camera
colorCam = pipeline.create(dai.node.ColorCamera)
spatialDetectionNetwork = pipeline.create(dai.node.MobileNetSpatialDetectionNetwork)
monoLeft = pipeline.create(dai.node.MonoCamera)
monoRight = pipeline.create(dai.node.MonoCamera)
stereo = pipeline.create(dai.node.StereoDepth)
xoutRgb = pipeline.create(dai.node.XLinkOut)
# xoutNN = pipeline.create(dai.node.XLinkOut)
# xoutBoundingBoxDepthMapping = pipeline.create(dai.node.XLinkOut)
xoutDepth = pipeline.create(dai.node.XLinkOut)
# serial_out_imu = pipeline.create(dai.node.XLinkOut)
serial_out_confidence_map = pipeline.create(dai.node.XLinkOut)
serial_out_left = pipeline.create(dai.node.XLinkOut)
serial_out_right = pipeline.create(dai.node.XLinkOut)
serial_out_sys_info = pipeline.create(dai.node.XLinkOut)
xoutRgb.setStreamName("rgb")
# xoutNN.setStreamName("detections")
# xoutBoundingBoxDepthMapping.setStreamName("boundingBoxDepthMapping")
xoutDepth.setStreamName("depth")
# serial_out_imu.setStreamName("imu")
serial_out_confidence_map.setStreamName("conf_map")
serial_out_left.setStreamName("left")
serial_out_right.setStreamName("right")
serial_out_sys_info.setStreamName("sys_info")
spiOutSpatialNN = pipeline.create(dai.node.SPIOut)
spiOutSpatialNN.input.setBlocking(False)
spiOutSpatialNN.input.setQueueSize(1)
spiOutSpatialNN.setStreamName("spimetaout")
spiOutSpatialNN.setBusId(0)
colorCam.setPreviewSize(300, 300)
colorCam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
colorCam.setInterleaved(False)
colorCam.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)
# colorCam.setFps(30.0)
# colorCam.setBoardSocket(dai.CameraBoardSocket.RGB)
# colorCam.setPreviewKeepAspectRatio(False)
# colorCam.initialControl.setManualFocus(120)
# colorCam.setIspScale(1, 3)
monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
# monoLeft.setFps(30.0)
monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
# monoRight.setFps(30.0)
monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)
# setting node configs
stereo.initialConfig.setConfidenceThreshold(255)
stereo.setDepthAlign(dai.CameraBoardSocket.RGB)
stereo.setLeftRightCheck(True)
spatialDetectionNetwork.setBlobPath(nnBlobPath)
spatialDetectionNetwork.setConfidenceThreshold(0.5)
spatialDetectionNetwork.input.setBlocking(False)
spatialDetectionNetwork.setBoundingBoxScaleFactor(0.5)
spatialDetectionNetwork.setDepthLowerThreshold(100)
spatialDetectionNetwork.setDepthUpperThreshold(5000)
# imu_sensors = [
# dai.IMUSensor.ACCELEROMETER_RAW,
# dai.IMUSensor.GYROSCOPE_RAW,
# dai.IMUSensor.MAGNETOMETER_RAW,
# dai.IMUSensor.ROTATION_VECTOR,
# ]
# imu = pipeline.create(dai.node.IMU)
# imu.enableIMUSensor(imu_sensors, 50)
# imu.setBatchReportThreshold(1) # Minimum packets per message
# imu.setMaxBatchReports(10) # Maximum packets per message
sys_info = pipeline.create(dai.node.SystemLogger)
# Create outputs
monoLeft.out.link(stereo.left)
monoRight.out.link(stereo.right)
colorCam.preview.link(spatialDetectionNetwork.input)
stereo.depth.link(spatialDetectionNetwork.inputDepth)
if syncNN:
spatialDetectionNetwork.passthrough.link(xoutRgb.input)
else:
colorCam.preview.link(xoutRgb.input)
# spatialDetectionNetwork.out.link(xoutNN.input)
# spatialDetectionNetwork.boundingBoxMapping.link(xoutBoundingBoxDepthMapping.input)
spatialDetectionNetwork.passthroughDepth.link(xoutDepth.input)
# imu.out.link(serial_out_imu.input)
stereo.confidenceMap.link(serial_out_confidence_map.input)
monoLeft.out.link(serial_out_left.input)
monoRight.out.link(serial_out_right.input)
sys_info.out.link(serial_out_sys_info.input)
spatialDetectionNetwork.out.link(spiOutSpatialNN.input)
def printSystemInformation(info):
if info is None:
return
m = 1024 * 1024 # MiB
print(
f"Ddr used / total - {info.ddrMemoryUsage.used / m:.2f} / {info.ddrMemoryUsage.total / m:.2f} MiB"
)
print(
f"Cmx used / total - {info.cmxMemoryUsage.used / m:.2f} / {info.cmxMemoryUsage.total / m:.2f} MiB"
)
print(
f"LeonCss heap used / total - {info.leonCssMemoryUsage.used / m:.2f} / {info.leonCssMemoryUsage.total / m:.2f} MiB"
)
print(
f"LeonMss heap used / total - {info.leonMssMemoryUsage.used / m:.2f} / {info.leonMssMemoryUsage.total / m:.2f} MiB"
)
t = info.chipTemperature
print(
f"Chip temperature - average: {t.average:.2f}, css: {t.css:.2f}, mss: {t.mss:.2f}, upa: {t.upa:.2f}, dss: {t.dss:.2f}"
)
print(
f"Cpu usage - Leon CSS: {info.leonCssCpuUsage.average * 100:.2f}%, Leon MSS: {info.leonMssCpuUsage.average * 100:.2f} %"
)
print("----------------------------------------")
# Pipeline defined, now the device is connected to
with dai.Device(pipeline) as device:
# Start pipeline
# device.startPipeline()
# if not spiOut:
# Output queues will be used to get the rgb frames and nn data from the outputs defined above
previewQueue = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
# detectionNNQueue = device.getOutputQueue(
# name="detections", maxSize=4, blocking=False
# )
# xoutBoundingBoxDepthMapping = device.getOutputQueue(
# name="boundingBoxDepthMapping", maxSize=4, blocking=False
# )
depthQueue = device.getOutputQueue(name="depth", maxSize=4, blocking=False)
# imu_queue = device.getOutputQueue(name="imu", maxSize=4, blocking=False)
conf_map_queue = device.getOutputQueue(name="conf_map", maxSize=4, blocking=False)
left_queue = device.getOutputQueue(name="left", maxSize=4, blocking=False)
right_queue = device.getOutputQueue(name="right", maxSize=4, blocking=False)
sys_info_queue = device.getOutputQueue(name="sys_info", maxSize=4, blocking=False)
frame = None
detections = []
start_time = time.monotonic()
counter = 0
fps = 0
color = (255, 255, 255)
while True:
# if spiOut:
# sleep(1)
# continue
inPreview = previewQueue.get()
# inNN = detectionNNQueue.get()
depth = depthQueue.get()
conf_map = conf_map_queue.get()
left = left_queue.get()
right = right_queue.get()
# while imu_queue.has():
# imu_queue.get()
printSystemInformation(sys_info_queue.tryGet())
counter += 1
current_time = time.monotonic()
if (current_time - start_time) > 1:
fps = counter / (current_time - start_time)
counter = 0
start_time = current_time
frame = inPreview.getCvFrame()
depthFrame = depth.getFrame()
conf_map_frame = conf_map.getCvFrame()
left_frame = left.getCvFrame()
right_frame = right.getCvFrame()
depthFrameColor = cv2.normalize(
depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1
)
depthFrameColor = cv2.equalizeHist(depthFrameColor)
depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT)
# detections = inNN.detections
# if len(detections) != 0:
# boundingBoxMapping = xoutBoundingBoxDepthMapping.get()
# roiDatas = boundingBoxMapping.getConfigData()
# for roiData in roiDatas:
# roi = roiData.roi
# roi = roi.denormalize(
# depthFrameColor.shape[1], depthFrameColor.shape[0]
# )
# topLeft = roi.topLeft()
# bottomRight = roi.bottomRight()
# xmin = int(topLeft.x)
# ymin = int(topLeft.y)
# xmax = int(bottomRight.x)
# ymax = int(bottomRight.y)
# cv2.rectangle(
# depthFrameColor,
# (xmin, ymin),
# (xmax, ymax),
# color,
# cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,
# )
# # if the frame is available, draw bounding boxes on it and show the frame
# height = frame.shape[0]
# width = frame.shape[1]
# for detection in detections:
# # denormalize bounding box
# x1 = int(detection.xmin * width)
# x2 = int(detection.xmax * width)
# y1 = int(detection.ymin * height)
# y2 = int(detection.ymax * height)
# try:
# label = labelMap[detection.label]
# except:
# label = detection.label
# cv2.putText(
# frame,
# str(label),
# (x1 + 10, y1 + 20),
# cv2.FONT_HERSHEY_TRIPLEX,
# 0.5,
# color,
# )
# cv2.putText(
# frame,
# "{:.2f}".format(detection.confidence * 100),
# (x1 + 10, y1 + 35),
# cv2.FONT_HERSHEY_TRIPLEX,
# 0.5,
# color,
# )
# cv2.putText(
# frame,
# f"X: {int(detection.spatialCoordinates.x)} mm",
# (x1 + 10, y1 + 50),
# cv2.FONT_HERSHEY_TRIPLEX,
# 0.5,
# color,
# )
# cv2.putText(
# frame,
# f"Y: {int(detection.spatialCoordinates.y)} mm",
# (x1 + 10, y1 + 65),
# cv2.FONT_HERSHEY_TRIPLEX,
# 0.5,
# color,
# )
# cv2.putText(
# frame,
# f"Z: {int(detection.spatialCoordinates.z)} mm",
# (x1 + 10, y1 + 80),
# cv2.FONT_HERSHEY_TRIPLEX,
# 0.5,
# color,
# )
# cv2.rectangle(frame, (x1, y1), (x2, y2), color, cv2.FONT_HERSHEY_SIMPLEX)
# cv2.putText(
# frame,
# "NN fps: {:.2f}".format(fps),
# (2, frame.shape[0] - 4),
# cv2.FONT_HERSHEY_TRIPLEX,
# 0.4,
# color,
# )
cv2.imshow("depth", depthFrameColor)
cv2.imshow("confidence", conf_map_frame)
cv2.imshow("left", left_frame)
cv2.imshow("right", right_frame)
cv2.imshow("rgb", frame)
if cv2.waitKey(1) == ord("q"):
break
from esp32-spi-message-demo.
After some offline conversation with @themarpe , we managed to find a workaround for this. The issue seems to be caused by the SPI clock speed. I changed this from 16 MHz to 4 MHz in components/depthai-spi-api/common/esp32_spi_impl.c
:
//Configuration for the SPI device on the other side of the bus
spi_device_interface_config_t devcfg={
.command_bits=0,
.address_bits=0,
.dummy_bits=0,
.clock_speed_hz=4000000,
...
After this change, I was having trouble getting the ESP32 to receive any data from the Myriad X at all. The issue here ended up being that the Myriad X needs to be running before the ESP32 tries to initialize a connection. I was able to achieve this in a hacky way by adding a delay at the beginning of the ESP32 code, but it means that restarting the pipeline requires me to:
- Power cycle the entire camera.
- Manually start the pipeline during the ESP32 delay.
It's not ideal, but it gets the job done until a more elegant solution can be implemented.
from esp32-spi-message-demo.
Related Issues (16)
- [FeatureRequest] People tracking decoding and publishing to cloud
- failed to allocate 0 bytes HOT 11
- `gen2-spi`/`device-yolo-parsing` running error HOT 1
- Build error: GPIO_PIN_INTR_NEGEDGE undeclared HOT 4
- Build error when running mjpeg-streaming-wifi HOT 6
- Camera clogging up sending messages to esp32. HOT 16
- Failed to resolve component 'depthai-spi-api'. HOT 2
- Raw IMU Data metadata parsing fails HOT 21
- SPIOut blocking the pipeline HOT 2
- how can upload dap file via wifi
- Cannot build the depthai-spi-api under ESP-IDF in Windows HOT 1
- sync problem HOT 1
- Cannot build due to '#include "esp_wifi.h" issues HOT 1
- Missing `mobilenet-ssd.blob` HOT 1
- Questions: Enabling SPI peripheral mode HOT 4
Recommend Projects
-
React
A declarative, efficient, and flexible JavaScript library for building user interfaces.
-
Vue.js
🖖 Vue.js is a progressive, incrementally-adoptable JavaScript framework for building UI on the web.
-
Typescript
TypeScript is a superset of JavaScript that compiles to clean JavaScript output.
-
TensorFlow
An Open Source Machine Learning Framework for Everyone
-
Django
The Web framework for perfectionists with deadlines.
-
Laravel
A PHP framework for web artisans
-
D3
Bring data to life with SVG, Canvas and HTML. 📊📈🎉
-
Recommend Topics
-
javascript
JavaScript (JS) is a lightweight interpreted programming language with first-class functions.
-
web
Some thing interesting about web. New door for the world.
-
server
A server is a program made to process requests and deliver data to clients.
-
Machine learning
Machine learning is a way of modeling and interpreting data that allows a piece of software to respond intelligently.
-
Visualization
Some thing interesting about visualization, use data art
-
Game
Some thing interesting about game, make everyone happy.
Recommend Org
-
Facebook
We are working to build community through open source technology. NB: members must have two-factor auth.
-
Microsoft
Open source projects and samples from Microsoft.
-
Google
Google ❤️ Open Source for everyone.
-
Alibaba
Alibaba Open Source for everyone
-
D3
Data-Driven Documents codes.
-
Tencent
China tencent open source team.
from esp32-spi-message-demo.