K230庐山派,想用摄像头1进行循迹,摄像头2进行yolo,先运行的yolo任务,但刚启动一下画面就弹错误;先运行任务一又会有另一个错误

Viewed 156

重现步骤

期待结果和实际结果

软硬件版本信息

错误日志

先运行yolo,错误如下:
异常: IDE interrupt
warning: sensor not call run()
MPY: soft reboot
CanMV v1.2.2(based on Micropython e00a144) on 2024-12-18; k230_canmv_lckfb with K230
find sensor gc2093_csi2, type 23, output 1920x1080@30
buffer pool : 5
sensor(0), mode 0, buffer_num 4, buffer_size 0
find sensor gc2093_csi1, type 20, output 1920x1080@60
异常: please run MediaManager._config() before MediaManager.init()
warning: sensor not call run()
MPY: soft reboot
CanMV v1.2.2(based on Micropython e00a144) on 2024-12-18; k230_canmv_lckfb with K230

先运行循迹,错误如下:
异常: IDE interrupt
warning: sensor not call run()
MPY: soft reboot
CanMV v1.2.2(based on Micropython e00a144) on 2024-12-18; k230_canmv_lckfb with K230
find sensor gc2093_csi2, type 23, output 1920x1080@30
buffer pool : 5
sensor(0), mode 0, buffer_num 4, buffer_size 0
find sensor gc2093_csi1, type 20, output 1920x1080@60
异常: please run MediaManager._config() before MediaManager.init()
warning: sensor not call run()
MPY: soft reboot
CanMV v1.2.2(based on Micropython e00a144) on 2024-12-18; k230_canmv_lckfb with K230
尝试解决过程

补充材料

import time, os, sys
from machine import UART
from machine import FPIOA
from media.sensor import *
from media.display import *
from media.media import *
import utime
from machine import Pin

from libs.PipeLine import PipeLine, ScopedTiming
from libs.AIBase import AIBase
from libs.AI2D import Ai2d
import ujson
from time import *
import nncase_runtime as nn
import ulab.numpy as np
import image
import random
import gc
import aidemo

fpioa = FPIOA()
fpioa.set_function(11, FPIOA.UART2_TXD)
fpioa.set_function(12, FPIOA.UART2_RXD)
fpioa.set_function(52, FPIOA.GPIO52)
fpioa.set_function(36, FPIOA.GPIO36)
fpioa.set_function(37, FPIOA.GPIO37)

uart = UART(UART.UART2, baudrate=38400, bits=UART.EIGHTBITS, parity=UART.PARITY_NONE, stop=UART.STOPBITS_ONE)
sensor_id = 1
sensor = None

picture_width = 1200
picture_height = 360

# 显示模式选择:可以是 "VIRT"、"LCD" 或 "HDMI"
DISPLAY_MODE = "VIRT"

# 根据模式设置显示宽高
if DISPLAY_MODE == "VIRT":
    # 虚拟显示器模式
    DISPLAY_WIDTH = ALIGN_UP(1920, 16)
    DISPLAY_HEIGHT = 1080
elif DISPLAY_MODE == "LCD":
    # 3.1寸屏幕模式
    DISPLAY_WIDTH = 800
    DISPLAY_HEIGHT = 480
elif DISPLAY_MODE == "HDMI":
    # HDMI扩展板模式
    DISPLAY_WIDTH = 1920
    DISPLAY_HEIGHT = 1080
else:
    raise ValueError("未知的 DISPLAY_MODE,请选择 'VIRT', 'LCD' 或 'HDMI'")


# 自定义YOLOv8检测类
class ObjectDetectionApp(AIBase):
    def __init__(self,kmodel_path,labels,model_input_size,max_boxes_num,confidence_threshold=0.5,nms_threshold=0.2,rgb888p_size=[224,224],display_size=[1920,1080],debug_mode=0):
        super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode)
        self.kmodel_path=kmodel_path
        self.labels=labels
        # 模型输入分辨率
        self.model_input_size=model_input_size
        # 阈值设置
        self.confidence_threshold=confidence_threshold
        self.nms_threshold=nms_threshold
        self.max_boxes_num=max_boxes_num
        # sensor给到AI的图像分辨率
        self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]]
        # 显示分辨率
        self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]]
        self.debug_mode=debug_mode
        # 检测框预置颜色值
        self.color_four=[(255, 220, 20, 60), (255, 119, 11, 32), (255, 0, 0, 142), (255, 0, 0, 230),
                         (255, 106, 0, 228), (255, 0, 60, 100), (255, 0, 80, 100), (255, 0, 0, 70),
                         (255, 0, 0, 192), (255, 250, 170, 30), (255, 100, 170, 30), (255, 220, 220, 0),
                         (255, 175, 116, 175), (255, 250, 0, 30), (255, 165, 42, 42), (255, 255, 77, 255),
                         (255, 0, 226, 252), (255, 182, 182, 255), (255, 0, 82, 0), (255, 120, 166, 157)]
        # 宽高缩放比例
        self.x_factor = float(self.rgb888p_size[0])/self.model_input_size[0]
        self.y_factor = float(self.rgb888p_size[1])/self.model_input_size[1]
        # Ai2d实例,用于实现模型预处理
        self.ai2d=Ai2d(debug_mode)
        # 设置Ai2d的输入输出格式和类型
        self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8)

    # 配置预处理操作,这里使用了resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看
    def config_preprocess(self,input_image_size=None):
        with ScopedTiming("set preprocess config",self.debug_mode > 0):
            # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,您可以通过设置input_image_size自行修改输入尺寸
            ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size
            self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel)
            self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]])

    # 自定义当前任务的后处理
    def postprocess(self,results):
        with ScopedTiming("postprocess",self.debug_mode > 0):
            result=results[0]
            result = result.reshape((result.shape[0] * result.shape[1], result.shape[2]))
            output_data = result.transpose()
            boxes_ori = output_data[:,0:4]
            scores_ori = output_data[:,4:]
            confs_ori = np.max(scores_ori,axis=-1)
            inds_ori = np.argmax(scores_ori,axis=-1)
            boxes,scores,inds = [],[],[]
            for i in range(len(boxes_ori)):
                if confs_ori[i] > confidence_threshold:
                    scores.append(confs_ori[i])
                    inds.append(inds_ori[i])
                    x = boxes_ori[i,0]
                    y = boxes_ori[i,1]
                    w = boxes_ori[i,2]
                    h = boxes_ori[i,3]
                    left = int((x - 0.5 * w) * self.x_factor)
                    top = int((y - 0.5 * h) * self.y_factor)
                    right = int((x + 0.5 * w) * self.x_factor)
                    bottom = int((y + 0.5 * h) * self.y_factor)
                    boxes.append([left,top,right,bottom])
            if len(boxes)==0:
                return []
            boxes = np.array(boxes)
            scores = np.array(scores)
            inds = np.array(inds)
            # NMS过程
            keep = self.nms(boxes,scores,nms_threshold)
            dets = np.concatenate((boxes, scores.reshape((len(boxes),1)), inds.reshape((len(boxes),1))), axis=1)
            dets_out = []
            for keep_i in keep:
                dets_out.append(dets[keep_i])
            dets_out = np.array(dets_out)
            dets_out = dets_out[:self.max_boxes_num, :]
            return dets_out

    # 绘制结果
    def draw_result(self,pl,dets):
        with ScopedTiming("display_draw",self.debug_mode >0):
            if dets:
                pl.osd_img.clear()
                for det in dets:
                    x1, y1, x2, y2 = map(lambda x: int(round(x, 0)), det[:4])
                    x= x1*self.display_size[0] // self.rgb888p_size[0]
                    y= y1*self.display_size[1] // self.rgb888p_size[1]
                    w = (x2 - x1) * self.display_size[0] // self.rgb888p_size[0]
                    h = (y2 - y1) * self.display_size[1] // self.rgb888p_size[1]
                    pl.osd_img.draw_rectangle(x,y, w, h, color=self.get_color(int(det[5])),thickness=4)
                    pl.osd_img.draw_string_advanced( x , y-50,32," " + self.labels[int(det[5])] + " " + str(round(det[4],2)) , color=self.get_color(int(det[5])))
            else:
                pl.osd_img.clear()


    # 多目标检测 非最大值抑制方法实现
    def nms(self,boxes,scores,thresh):
        """Pure Python NMS baseline."""
        x1,y1,x2,y2 = boxes[:, 0],boxes[:, 1],boxes[:, 2],boxes[:, 3]
        areas = (x2 - x1 + 1) * (y2 - y1 + 1)
        order = np.argsort(scores,axis = 0)[::-1]
        keep = []
        while order.size > 0:
            i = order[0]
            keep.append(i)
            new_x1,new_y1,new_x2,new_y2,new_areas = [],[],[],[],[]
            for order_i in order:
                new_x1.append(x1[order_i])
                new_x2.append(x2[order_i])
                new_y1.append(y1[order_i])
                new_y2.append(y2[order_i])
                new_areas.append(areas[order_i])
            new_x1 = np.array(new_x1)
            new_x2 = np.array(new_x2)
            new_y1 = np.array(new_y1)
            new_y2 = np.array(new_y2)
            xx1 = np.maximum(x1[i], new_x1)
            yy1 = np.maximum(y1[i], new_y1)
            xx2 = np.minimum(x2[i], new_x2)
            yy2 = np.minimum(y2[i], new_y2)
            w = np.maximum(0.0, xx2 - xx1 + 1)
            h = np.maximum(0.0, yy2 - yy1 + 1)
            inter = w * h
            new_areas = np.array(new_areas)
            ovr = inter / (areas[i] + new_areas - inter)
            new_order = []
            for ovr_i,ind in enumerate(ovr):
                if ind < thresh:
                    new_order.append(order[ovr_i])
            order = np.array(new_order,dtype=np.uint8)
        return keep

    # 根据当前类别索引获取框的颜色
    def get_color(self, x):
        idx=x%len(self.color_four)
        return self.color_four[idx]


if __name__=="__main__":
    # 显示模式,默认"hdmi",可以选择"hdmi"和"lcd"
    display_mode="hdmi"
    rgb888p_size=[320,320]

    if display_mode=="hdmi":
        display_size=[1920,1080]
    else:
        display_size=[800,480]
    # 模型路径
    kmodel_path="/sdcard/best.kmodel"
    labels = ["1", "2", "3", "4", "5", "6", "7", "8"]
    # 其它参数设置
    confidence_threshold = 0.2
    nms_threshold = 0.2
    max_boxes_num = 50

    # 初始化PipeLine
    pl=PipeLine(rgb888p_size=rgb888p_size,display_size=display_size,display_mode=display_mode)
    pl.create()
    # 初始化自定义目标检测实例
    ob_det=ObjectDetectionApp(kmodel_path,labels=labels,model_input_size=[320,320],max_boxes_num=max_boxes_num,confidence_threshold=confidence_threshold,nms_threshold=nms_threshold,rgb888p_size=rgb888p_size,display_size=display_size,debug_mode=0)
    ob_det.config_preprocess()



try:

    # 构造一个具有默认配置的摄像头对象
    sensor = Sensor(id=sensor_id)
    # 重置摄像头sensor
    sensor.reset()

    # 无需进行镜像翻转
    # 设置水平镜像
    # sensor.set_hmirror(False)
    # 设置垂直翻转
    # sensor.set_vflip(False)

    # 设置通道0的输出尺寸为显示分辨率
    sensor.set_framesize(width=picture_width, height=picture_height, chn=CAM_CHN_ID_0)
    # 设置通道0的输出像素格式为RGB565
    sensor.set_pixformat(Sensor.RGB565, chn=CAM_CHN_ID_0)

    # 根据模式初始化显示器
    if DISPLAY_MODE == "VIRT":
        Display.init(Display.VIRT, width=DISPLAY_WIDTH, height=DISPLAY_HEIGHT, fps=80)
    elif DISPLAY_MODE == "LCD":
        Display.init(Display.ST7701, width=DISPLAY_WIDTH, height=DISPLAY_HEIGHT, to_ide=True)
    elif DISPLAY_MODE == "HDMI":
        Display.init(Display.LT9611, width=DISPLAY_WIDTH, height=DISPLAY_HEIGHT, to_ide=True)

    # 初始化媒体管理器
    MediaManager.init()
    # 启动传感器
    sensor.run()

    # 指定颜色阈值
    # 格式:[min_L, max_L, min_A, max_A, min_B, max_B]
    color_threshold = [(0, 100, 9, 127, -128, 127)] #红(0, 100, 10, 127, 5, 127)
    color_ending = [(0, 100, 9, 127, -128, 127)]

    road_Weight = [0.5,0.2,0.3]
    clock = utime.clock()

    button1 = Pin(52, Pin.OUT, Pin.PULL_NONE)
    button2 = Pin(36, Pin.OUT, Pin.PULL_NONE)
    button3 = Pin(37, Pin.OUT, Pin.PULL_NONE)

    mission = 2

    while True:
        os.exitpoint()
        clock.tick()


        if mission == 1:

            # 捕获通道1的图像
            img = sensor.snapshot(chn=CAM_CHN_ID_0)
            #img.mode(1, threshold=True, offset=0, invert=False) #众数滤波

            #color_threshold 是要寻找的颜色的阈值,area_threshold 表示过滤掉小于此面积的色块。
            roads1 = img.find_blobs(color_threshold, area_threshold = 700, roi=(0,320,1200,40))
            roads2 = img.find_blobs(color_threshold, area_threshold = 700, roi=(0,160,1200,40))
            roads3 = img.find_blobs(color_threshold, area_threshold = 700, roi=(0,0,1200,40))
            endings = img.find_blobs(color_ending, area_threshold = 700, roi=(0,0,1200,40))

#            lines = img.find_blobs(color_threshold, area_threshold = 20000, roi=(0,0,800,100))
            # 如果检测到颜色块
            cnt1 = 0
            cnt2 = 0
            cnt3 = 0
            cnt4 = 0
            #横条检测标志
            cross1 = 0
            #cross2 = 0
            #cross3 = 0

            detect_result = [0,0,0]
            is_detect = [0,0,0]

            if roads1:
                # 遍历每个检测到的颜色块
                for road1 in roads1:
                    img.draw_rectangle(road1[0:4])
                    detect_result[0] = road1[5]
                    cnt1 += 1
                    if road1[2] > 4000:
                        cross1 = 1
                        cnt1 += 1
                        #message_road2 = "t\r\n"
                        #uart.write(message_road2)
                        #print(message_road2)
                    else:
                        cross = 0
            if cross1 == 1:
                button1.high()
            else:
                button1.low()


            if cnt1 == 1:
                is_detect[0] = 1
                img.draw_circle(detect_result[0], 300, 15, color=(0, 0, 255), thickness=3)  # 绘制蓝色圆

            if roads2:
                # 遍历每个检测到的颜色块
                for road2 in roads2:
                    img.draw_rectangle(road2[0:4])
                    detect_result[1] = road2[5]
                    cnt2 += 1
                    if road2[2] > 4000:
                        cnt2+=1
#                        message_road2 = "t\r\n"
#                        uart.write(message_road2)
#                        print(message_road2)
            if cnt2 == 1:
                is_detect[1] = 1
                img.draw_circle(detect_result[1], 180, 15, color=(0, 0, 255), thickness=3)  # 绘制蓝色圆

            if roads3:
                # 遍历每个检测到的颜色块
                for road3 in roads3:
                    img.draw_rectangle(road3[0:4])
                    detect_result[2] = road3[5]
                    cnt3 += 1
                    if road3[2] > 4000:
                        cnt3 += 1
#                        message_road2 = "t\r\n"
#                        uart.write(message_road2)
#                        print(message_road2)
            if cnt3 == 1:
                is_detect[2] = 1
                img.draw_circle(detect_result[2], 20, 15, color=(0, 0, 255), thickness=3)  # 绘制蓝色圆


            if endings:
                for ending in endings:
                    img.draw_rectangle(ending[0:4])
                    cnt4 += 1

            if cnt4 >= 7:
                button2.high()
            else:
                button2.low()


            if is_detect[0] == 1 or is_detect[1] == 1 or is_detect[2] == 1:
                value_ave = 0
                for i in range(3):
                    value_ave += is_detect[i]*road_Weight[i]*detect_result[i]
                value_ave = value_ave/(road_Weight[0]*is_detect[0]+road_Weight[1]*is_detect[1]+road_Weight[2]*is_detect[2])
                if value_ave<1000:
                    message_ave = '0'+str(int(value_ave)) + 'r\r\n'
                else:
                    message_ave = str(int(value_ave)) + 'r\r\n'
                img.draw_rectangle(int(value_ave-1), 0, 3, 360,color=(0, 255, 0), thickness=2)
            else:
                message_ave = "l\r\n"
            uart.write(message_ave)
            print(message_ave)
            Display.show_image(img, x=0, y=0, layer=Display.LAYER_OSD0)


        if mission == 2:  #开局数字识别


            with ScopedTiming("total",1):
                # 获取当前帧数据
                img=pl.get_frame()
                # 推理当前帧
                res=ob_det.run(img)

                # +++ 新增:输出检测结果 +++
                if len(res) > 0:
                    print(f"检测到 {len(res)} 个对象:")
                    for i, obj in enumerate(res):
                        x1, y1, x2, y2 = obj[0:4]  # 边界框坐标
                        conf = obj[4]               # 置信度
                        cls_idx = int(obj[5])        # 类别索引
                        cls_name = labels[cls_idx]   # 类别名称

                        print(f"对象 {i+1}:")
                        print(f"  类别: {cls_name} ({cls_idx})")
                        print(f"  置信度: {conf:.2f}")
                        print(f"  坐标: 左上({x1}, {y1}), 右下({x2}, {y2})")
                        print(f"  宽度: {x2-x1}px, 高度: {y2-y1}px")
                else:
                    print("未检测到任何对象")

                # 绘制结果到PipeLine的osd图像
                ob_det.draw_result(pl,res)

                # 显示当前的绘制结果
                pl.show_image()
                gc.collect()

#        print("fps = ", clock.fps())

except KeyboardInterrupt as e:
    print("用户停止: ", e)
except BaseException as e:
    print(f"异常: {e}")
finally:
    # 停止传感器运行
    if isinstance(sensor, Sensor):
        sensor.stop()
    # 反初始化显示模块
    Display.deinit()
    os.exitpoint(os.EXITPOINT_ENABLE_SLEEP)
    utime.sleep_ms(100)
    # 释放媒体缓冲区
    MediaManager.deinit()
1 Answers

代码已经在群里给到你了,现在附在这里:

from media.sensor import *
from media.display import *
from media.media import *
from libs.PlatTasks import DetectionApp
from libs.Utils import *
from machine import UART, FPIOA, Pin
import image
import time, gc, _thread, ujson

# ---------- 全局配置 ----------
sensor=None
display_size=[800,480]
redline_img_size=[640,480]
rgb888p_size = [1280, 720]
det_stop=False
redline_stop=False
det_osd_img=None
redline_osd_img=None

# ---------- 串口 & 按键 ----------
fpioa = FPIOA()
fpioa.set_function(11, FPIOA.UART2_TXD)
fpioa.set_function(12, FPIOA.UART2_RXD)
fpioa.set_function(53, FPIOA.GPIO53)
KEY = Pin(53, Pin.IN, Pin.PULL_DOWN)
uart = UART(UART.UART2, baudrate=115200)

# ---------- 红线识别参数 ----------
thresholds = [(23, 57, 30, 127, -128, 127)]
ROIS1 = [(225, 0, 400, 160, 0.3), (225, 160, 400, 160, 0.35), (225, 320, 400, 160, 0.35)]
ROIS2 = [(0, 150, 250, 300), (550, 150, 250, 300)]

# ---------- 状态变量 ----------
key_flag = 0
flag = 0
turn_flag = 0
move_record = []
return_mode = False
current_index = 0
cross_detected = False
num = 1  # 模拟外部识别(1=左转, 2=右转, else 直行)

def media_init():
    global sensor,osd_img,rgb888p_size,display_size,det_osd_img,yolo_osd_img
    Display.init(Display.ST7701, width = display_size[0], height = display_size[1], to_ide = True, osd_num=3)

    sensor = Sensor(fps=30)
    sensor.reset()
    sensor.set_framesize(w = display_size[0], h = display_size[1],chn=CAM_CHN_ID_0)
    sensor.set_pixformat(Sensor.YUV420SP,chn=CAM_CHN_ID_0)
    sensor.set_framesize(w = redline_img_size[0], h = redline_img_size[1], chn=CAM_CHN_ID_1)
    sensor.set_pixformat(Sensor.RGB565,chn=CAM_CHN_ID_1)
    sensor.set_framesize(w = rgb888p_size[0], h = rgb888p_size[1], chn=CAM_CHN_ID_2)
    sensor.set_pixformat(Sensor.RGBP888,chn=CAM_CHN_ID_2)

    sensor_bind_info = sensor.bind_info(x = 0, y = 0, chn = CAM_CHN_ID_0)
    Display.bind_layer(**sensor_bind_info, layer = Display.LAYER_VIDEO1)

    Display.init(Display.ST7701, osd_num=2, to_ide=True)
    det_osd_img = image.Image(display_size[0], display_size[1], image.ARGB8888)
    redline_osd_img = image.Image(display_size[0], display_size[1], image.ARGB8888)
    MediaManager.init()
    sensor.run()

def media_deinit():
    global sensor
    os.exitpoint(os.EXITPOINT_ENABLE_SLEEP)
    sensor.stop()
    Display.deinit()
    time.sleep_ms(50)
    MediaManager.deinit()

# ---------- 红线线程 ----------
def calculate_error(blobs, rois):
    global flag, turn_flag
    detected_any = False
    weight_sum = 0
    centroid_sum = 0
    for i, roi in enumerate(rois):
        if blobs[i]:
            detected_any = True
            largest = max(blobs[i], key=lambda b: b.pixels())
            centroid_sum += largest.cx() * roi[4]
            weight_sum += roi[4]
    flag = 1 if detected_any else 0
    if weight_sum:
        center_x = centroid_sum / weight_sum
        dx = center_x - 400
        if dx > 0:
            turn_flag = 1
        else:
            dx = -dx
            turn_flag = 2
        return dx
    return 0

def filter_blob(blob):
    if blob.pixels() < 2000:
        return False
    if 140 < blob.cx() < 160 and 240 < blob.cy() < 260:
        return False
    if 640 < blob.cx() < 660 and 240 < blob.cy() < 260:
        return False
    return True

def redline_thread():
    global sensor,flag, turn_flag, return_mode, move_record, key_flag, cross_detected, current_index
    while True:
        if redline_stop:
            break
        img = sensor.snapshot(chn=CAM_CHN_ID_1)
        # ROI 红色检测
        mid_blobs = [img.find_blobs(thresholds, roi=roi[:4], merge=True) for roi in ROIS1]
        delta_pos_x = int(calculate_error(mid_blobs, ROIS1))
        # 按键切换
        if KEY.value() == 1:
            if flag == 0:
                return_mode = True
                current_index = len(move_record) - 1
                print("切换到回程模式")
            else:
                return_mode = False
                move_record.clear()
                print("切换到正常模式")
            key_flag = 1 - key_flag
            time.sleep_ms(300)

        if not key_flag:
            time.sleep_ms(50)
            continue
        # 左右色块检测
        left = img.find_blobs(thresholds, roi=ROIS2[0], merge=True)
        right = img.find_blobs(thresholds, roi=ROIS2[1], merge=True)
        left = [b for b in left if filter_blob(b)]
        right = [b for b in right if filter_blob(b)]
        is_cross = bool(left and right)
        # 自动回程切换
        if flag == 0 and not return_mode:
            return_mode = True
            current_index = len(move_record)-1
            print("自动切换到回程模式")

        move_prepare = 1 if num == 1 else (3 if num == 2 else 2)

        if is_cross and not cross_detected:
            print("检测到十字边沿,记录动作")
            if not return_mode:
                move_record.append(move_prepare)
                print("记录动作:", move_record)
            else:
                if current_index >= 0:
                    print(f"回程:准备执行动作 {move_record[current_index]}")

        if is_cross:
            if not return_mode:
                if move_prepare == 1:
                    delta_pos_x = 400
                    turn_flag = 2
                elif move_prepare == 3:
                    delta_pos_x = 400
                    turn_flag = 1
                else:
                    delta_pos_x = 0
                    turn_flag = 0
            else:
                if current_index >= 0:
                    m = move_record[current_index]
                    if m == 1:
                        delta_pos_x = 410
                        turn_flag = 1
                    elif m == 3:
                        delta_pos_x = 400
                        turn_flag = 2
                    else:
                        delta_pos_x = 0
                        turn_flag = 0

        cross_detected = is_cross
        uart.write(f"S:{num},{delta_pos_x},{flag},{turn_flag}\n".encode())
        time.sleep_ms(50)

# ---------- AI 线程 ----------
def ai_thread():
    global sensor,rgb888p_size,display_size,det_osd_img
    root_path = "/sdcard/mp_deployment_source/"
    deploy_conf = read_json(root_path + "/deploy_config.json")
    kmodel_path = root_path + deploy_conf["kmodel_path"]
    labels = deploy_conf["categories"]
    confidence_threshold = deploy_conf["confidence_threshold"]
    nms_threshold = deploy_conf["nms_threshold"]
    model_input_size = deploy_conf["img_size"]
    nms_option = deploy_conf["nms_option"]
    model_type = deploy_conf["model_type"]
    anchors = []
    if model_type == "AnchorBaseDet":
        anchors = deploy_conf["anchors"][0] + deploy_conf["anchors"][1] + deploy_conf["anchors"][2]

    det_app = DetectionApp("video", kmodel_path, labels, model_input_size, anchors, model_type, confidence_threshold,nms_threshold, rgb888p_size, display_size)
    det_app.config_preprocess()

    while True:
        if det_stop:
            break
        img =sensor.snapshot(chn=CAM_CHN_ID_2)
        img_np=img.to_numpy_ref()
        det_app.cur_result={"boxes":[],"scores":[], "idx":[]}
        res = det_app.run(img_np)
#        scores = res["scores"]
#        idxs = res["idx"]
#        boxes = res["boxes"]
#        for i in range(len(idxs)):
#            class_id = idxs[i]
#            score = scores[i]
#            box = boxes[i]
#            name = labels[class_id]
#            print(f"检测到物品: {name}, 置信度: {score:.2f}, 位置: {box}")
        det_app.draw_result(det_osd_img, res)
        Display.show_image(det_osd_img)
    det_app.deinit()

if __name__=="__main__":
    media_init()
    # ---------- 启动线程 ----------
    _thread.start_new_thread(redline_thread, ())
    _thread.start_new_thread(ai_thread, ())

    try:
        while True:
            time.sleep_ms(50)
    except BaseException as e:
        import sys
        sys.print_exception(e)
        det_stop=True
        redline_stop=True
    time.sleep(1)
    media_deinit()
    gc.collect()