我在串口发送信息时无法发送,但是串口却有模型运行时间,但是并没有相关函数发送这个东西?求佬帮忙解决

Viewed 49

期待结果和实际结果

能发送出去我的信息即可

错误日志

有图
image.png
尝试解决过程

改变数据格式
补充材料

2 Answers
from libs.PipeLine import PipeLine, ScopedTiming
from libs.AIBase import AIBase
from libs.AI2D import Ai2d
import os
import ujson
from media.media import *
from time import *
from machine import UART
from machine import FPIOA
import nncase_runtime as nn
import ulab.numpy as np
import time
import utime
import image
import random
import gc
import sys
import aicube

flag = 0
final_num = 0
change_flag = 0
left_flag = 0
right_flag = 0
go_straight_falg = 0
two_flag = 0
four_flag = 0

#num_list = []
#length = 10

#for _ in range(length):
#    my_list.append(None)

# UART2代码
fpioa = FPIOA()
fpioa.set_function(11,FPIOA.UART2_TXD)
fpioa.set_function(12,FPIOA.UART2_RXD)

uart=UART(UART.UART2,115200) #设置串口号2和波特率
class STM32_transmit():                             # 定义 STM32 发送类
    head1 = 0x2C                                    # uint8_t   帧头
    head2 = 0x12                                    # uint8_t   帧头
    flag1 = 0                                       # uint8_t   标志位
    flag2 = 0                                       # uint8_t   标志位
    flag3 = 0                                       # uint8_t   标志位
    flag4 = 0                                       # uint8_t   标志位
    flag5 = 0                                       # uint8_t   标志位
    flag6 = 0                                       # uint8_t   标志位
    flag7 = 0                                       # uint8_t   标志位
    flag8 = 0                                       # uint8_t   标志位
    tail = 0x5B                                     # uint8_t   帧尾
# 实例化类
TSTM32 = STM32_transmit()                           # 实例化 STM32_transmit() 为 TSTM32

# 定义打包函数
def TSTM32_data():                                  # 数据打包函数
    data=bytearray([TSTM32.head1,                   # 帧头
                    TSTM32.head2,                   # 帧头
                    TSTM32.flag1,                   # 标志位
                    TSTM32.flag2,                   # 标志位
                    TSTM32.flag3,                   # 标志位
                    TSTM32.flag4,                   # 标志位
                    TSTM32.flag5,                   # 标志位
                    TSTM32.flag6,                   # 标志位
                    TSTM32.flag7,                   # 标志位
                    TSTM32.flag8,                   # 标志位
                    TSTM32.tail                     # 帧尾
                    ])

    return data                                     # 返回打包好的数据


#uart1.write(TSTM32_data())


# 自定义目标检测任务类
class DetectionApp(AIBase):
    def __init__(self,kmodel_path,labels,model_input_size=[640,640],anchors=[10.13,16,30,33,23,30,61,62,45,59,119,116,90,156,198,373,326],model_type="AnchorBaseDet",confidence_threshold=0.5,nms_threshold=0.25,nms_option=False,strides=[8,16,32],rgb888p_size=[1280,720],display_size=[1920,1080],debug_mode=0):
        super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode)
        # kmodel路径
        self.kmodel_path=kmodel_path
        # 类别标签
        self.labels=labels
        # 模型输入分辨率
        self.model_input_size=model_input_size
        # 检测任务的锚框
        self.anchors=anchors
        # 模型类型,支持"AnchorBaseDet","AnchorFreeDet","GFLDet"三种模型
        self.model_type=model_type
        # 检测框类别置信度阈值
        self.confidence_threshold=confidence_threshold
        # 检测框NMS筛选阈值
        self.nms_threshold=nms_threshold
        # NMS选项,如果为True做类间NMS,如果为False做类内NMS
        self.nms_option=nms_option
        # 输出特征图的降采样倍数
        self.strides=strides
        # sensor给到AI的图像分辨率,宽16字节对齐
        self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]]
        # 视频输出VO分辨率,宽16字节对齐
        self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]]
        # 调试模式
        self.debug_mode=debug_mode
        # 检测框预置颜色值
        self.color_four=[(255, 220, 20, 60), (255, 119, 11, 32), (255, 0, 0, 142), (255, 0, 0, 230),
                         (255, 106, 0, 228), (255, 0, 60, 100), (255, 0, 80, 100), (255, 0, 0, 70),
                         (255, 0, 0, 192), (255, 250, 170, 30), (255, 100, 170, 30), (255, 220, 220, 0),
                         (255, 175, 116, 175), (255, 250, 0, 30), (255, 165, 42, 42), (255, 255, 77, 255),
                         (255, 0, 226, 252), (255, 182, 182, 255), (255, 0, 82, 0), (255, 120, 166, 157)]
        # Ai2d实例,用于实现模型预处理
        self.ai2d=Ai2d(debug_mode)
        # 设置Ai2d的输入输出格式和类型
        self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8)

    # 配置预处理操作,这里使用了pad和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看
    def config_preprocess(self,input_image_size=None):
        with ScopedTiming("set preprocess config",self.debug_mode > 0):
            # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,您可以通过设置input_image_size自行修改输入尺寸
            ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size
            # 计算padding参数
            top,bottom,left,right=self.get_padding_param()
            # 配置padding预处理
            self.ai2d.pad([0,0,0,0,top,bottom,left,right], 0, [114,114,114])
            # 配置resize预处理
            self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel)
            # build预处理过程,参数为输入tensor的shape和输出tensor的shape
            self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]])

    # 自定义当前任务的后处理,这里调用了aicube模块的后处理接口
    def postprocess(self,results):
        with ScopedTiming("postprocess",self.debug_mode > 0):
            # AnchorBaseDet模型的后处理
            if self.model_type == "AnchorBaseDet":
                det_boxes = aicube.anchorbasedet_post_process( results[0], results[1], results[2], self.model_input_size, self.rgb888p_size, self.strides, len(labels), self.confidence_threshold, self.nms_threshold, self.anchors, self.nms_option)
            # GFLDet模型的后处理
            elif self.model_type == "GFLDet":
                det_boxes = aicube.gfldet_post_process( results[0], results[1], results[2], self.model_input_size, self.rgb888p_size, self.strides, len(labels), self.confidence_threshold, self.nms_threshold, self.nms_option)
            # AnchorFreeDet模型的后处理
            elif self.model_type=="AnchorFreeDet":
                det_boxes = aicube.anchorfreedet_post_process( results[0], results[1], results[2], self.model_input_size, self.rgb888p_size, self.strides, len(labels), self.confidence_threshold, self.nms_threshold, self.nms_option)
            else:
                det_boxes=None
            return det_boxes

    # 将结果绘制到屏幕上
    def draw_result(self,pl,det_boxes):
        global change_flag
        global final_num
        global left_flag
        global right_flag
        global go_straight_falg
        global two_flag
        global four_flag
        with ScopedTiming("draw osd",self.debug_mode > 0):
            if det_boxes:
                #time.sleep(0.3)
                pl.osd_img.clear()
                for det_boxe in det_boxes:
                    # 获取每一个检测框的坐标,并将其从原图分辨率坐标转换到屏幕分辨率坐标,将框和类别信息绘制在屏幕上
                    if len(det_boxes) == 1 and change_flag == 0:

                            #找框,找出参数
#---------------------------------------------------------------------------------------------------------------------------------
                        x1, y1, x2, y2 = det_boxe[2],det_boxe[3],det_boxe[4],det_boxe[5]
                        sx=int(x1 * self.display_size[0] // self.rgb888p_size[0])
                        sy=int(y1 * self.display_size[1] // self.rgb888p_size[1])
                        w = int(float(x2 - x1) * self.display_size[0] // self.rgb888p_size[0])
                        h = int(float(y2 - y1) * self.display_size[1] // self.rgb888p_size[1])
                        pl.osd_img.draw_rectangle(sx , sy , w , h , color=self.get_color(det_boxe[0]) ,thickness = 4)
                        label = self.labels[det_boxe[0]]
                        score = str(round(det_boxe[1],2))
                        pl.osd_img.draw_string_advanced(sx, sy-50,32, label + " " + score , color=self.get_color(det_boxe[0]))
#---------------------------------------------------------------------------------------------------------------------------------
#                        print(sx)
#                        print(sy)
                        final_num = int(label)#存目标房间号
                        change_flag += 1      #改变标志位以防止误识别进入该判断
                        TSTM32.flag1 = final_num
                        uart.write(TSTM32_data())

                    if len(det_boxes) == 2:#确定是两个数字
#                        print(final_num)
#                        pl.osd_img.clear()
#                        time.sleep(0.3)
#                        if two_flag == 0 :#防止误识别
##                            two_flag += 1
                            #找框,找出参数
#--------------------------------------------------------------------------------------------------------------------------------
                        x1, y1, x2, y2 = det_boxe[2],det_boxe[3],det_boxe[4],det_boxe[5]
                        sx=int(x1 * self.display_size[0] // self.rgb888p_size[0])
                        sy=int(y1 * self.display_size[1] // self.rgb888p_size[1])
                        w = int(float(x2 - x1) * self.display_size[0] // self.rgb888p_size[0])
                        h = int(float(y2 - y1) * self.display_size[1] // self.rgb888p_size[1])
                        pl.osd_img.draw_rectangle(sx , sy , w , h , color=self.get_color(det_boxe[0]) ,thickness = 4)
                        label = self.labels[det_boxe[0]]
                        score = str(round(det_boxe[1],2))
                        pl.osd_img.draw_string_advanced(sx, sy-50,32, label + " " + score , color=self.get_color(det_boxe[0]))
#---------------------------------------------------------------------------------------------------------------------------------

                            #数据清理以及判断方向
                        if int(label) == final_num:
                            if sx < 300:
                                left_flag = 1
                                TSTM32.flag1 = left_flag
                                uart.write(TSTM32_data())
#                                uart.write(bytearray(left_flag))
                                left_flag = 0


                            else:
                                right_flag = 2
#                                print("右走")
                                TSTM32.flag1 = right_flag
                                uart.write(TSTM32_data())
#                                uart.write(bytearray(right_flag))
                                right_flag = 0

                    if len(det_boxes) == 4 :
                        #找框,找出参数
#---------------------------------------------------------------------------------------------------------------------------
                        x1, y1, x2, y2 = det_boxe[2],det_boxe[3],det_boxe[4],det_boxe[5]
                        sx=int(x1 * self.display_size[0] // self.rgb888p_size[0])
                        sy=int(y1 * self.display_size[1] // self.rgb888p_size[1])
                        w = int(float(x2 - x1) * self.display_size[0] // self.rgb888p_size[0])
                        h = int(float(y2 - y1) * self.display_size[1] // self.rgb888p_size[1])
                        pl.osd_img.draw_rectangle(sx , sy , w , h , color=self.get_color(det_boxe[0]) ,thickness = 4)
                        label = self.labels[det_boxe[0]]
                        score = str(round(det_boxe[1],2))
                        pl.osd_img.draw_string_advanced(sx, sy-50,32, label + " " + score , color=self.get_color(det_boxe[0]))
#----------------------------------------------------------------------------------------------------------------------------

                        #数据清理以及判断方向
                        if int(label) == final_num:
                            if sx < 300:
                                left_flag = 1
#                                print("左走")
                                TSTM32.flag1 = left_flag
                                uart.write(TSTM32_data())
#                                uart.write(bytearray(left_flag))
                                left_flag = 0


                            else:
                                right_flag = 2
#                                print("右走")
                                TSTM32.flag1 = left_flag
                                uart.write(TSTM32_data())
#                                uart.write(bytearray(right_flag))
                                right_flag = 0
            else:
                pl.osd_img.clear()
                pl.osd_img.draw_rectangle(0, 0, 128, 128, color=(0,0,0,0))

    # 计算padding参数
    def get_padding_param(self):
        ratiow = float(self.model_input_size[0]) / self.rgb888p_size[0];
        ratioh = float(self.model_input_size[1]) / self.rgb888p_size[1];
        ratio = min(ratiow, ratioh)
        new_w = int(ratio * self.rgb888p_size[0])
        new_h = int(ratio * self.rgb888p_size[1])
        dw = float(self.model_input_size[0]- new_w) / 2
        dh = float(self.model_input_size[1] - new_h) / 2
        top = int(round(dh - 0.1))
        bottom = int(round(dh + 0.1))
        left = int(round(dw - 0.1))
        right = int(round(dw - 0.1))
        return top,bottom,left,right

    # 根据当前类别索引获取框的颜色
    def get_color(self, x):
        idx=x%len(self.color_four)
        return self.color_four[idx]


if __name__=="__main__":
    # 添加显示模式,支持"hdmi"和"lcd"
    display_mode="lcd"
    if display_mode=="lcd":
        display_size=[800,480]
    else:
        display_size=[800,480]
    # kmodel路径
    kmodel_path="/sdcard/app/tests/ai_test_kmodel/best_AnchorBaseDet_can2_5_n_20241120121751.kmodel"
    # 检测类别标签
    labels=[
            "0",
            "9",
            "1",
            "2",
            "3",
            "4",
            "5",
            "6",
            "7",
            "8"
        ]
    # 类别置信度阈值
    confidence_threshold=0.5
    # nms阈值
    nms_threshold = 0.5
    # 训练中使用的锚框,在线训练平台和AICube部署包的deploy_config.json文件中包含该字段,只有AnchorBaseDet需要该参数
    anchors=[

                 104,
                 200,
                 140,
                 151,
                 121,
                 181
             ,

                 147,
                 180,
                 136,
                 212,
                 162,
                 207
             ,

                 179,
                 223,
                 202,
                 261,
                 265,
                 352

         ]
    # 初始化PipeLine,只关注传给AI的图像分辨率,显示的分辨率
    pl=PipeLine(rgb888p_size=[1280,720],display_size=display_size,display_mode=display_mode)
    pl.create()
    # 检测类实例,关注模型输入分辨率,传给AI的图像分辨率,显示的分辨率
    det=DetectionApp(kmodel_path,labels,model_input_size=[640,640],anchors=anchors,rgb888p_size=[1280,720],display_size=display_size,debug_mode=0)
    # 配置预处理过程
    det.config_preprocess()
    try:
        while True:
            os.exitpoint()
            with ScopedTiming("total",1):
                # 获取当前帧
                img=pl.get_frame()
                # 获得检测框
                det_boxes=det.run(img)
                # 绘制检测框和类别信息
                det.draw_result(pl,det_boxes)
                # 显示当前的绘制结果
                pl.show_image()
                gc.collect()
    except BaseException as e:
        sys.print_exception(e)
    finally:
        det.deinit()
        pl.destroy()

这是我的源码

你好,请参考,https://github.com/kendryte/canmv_k230/blob/canmv_k230/resources/examples/21-AI-With-Others/ai_uart.py

感谢佬,但是我想问一下我直接uart.write(1)为什么发不出去呢?

Micropython不支持这样写。

啊?可是我只需要发送一个数字怎么办呢?github的示例是字母所以要编码,数字不是应该直接发送吗?我这么写倒是也没报错