关于BiSeNetV2模型的支持

Viewed 71

问题描述


2.9是不是不支持BiSeNetV2这个模型,我生成ONNX转换成Kmodel,发现转换失败呢

3 Answers

报什么错了,转换脚本怎么写的?

代码发上去了,帮忙看一下

#!/usr/bin/env python3
"""
ONNX to K230 kmodel Converter
用于将 ONNX 模型转换为 K230 可执行的 kmodel 文件
"""

import os
import sys
import argparse
import numpy as np
import onnx
import nncase

def parse_args():
    parser = argparse.ArgumentParser(description='Convert ONNX to K230 kmodel')
    parser.add_argument('--model', type=str, required=True, help='Path to ONNX model file')
    parser.add_argument('--output', type=str, required=True, help='Path to output kmodel file')
    parser.add_argument('--input_shape', type=str, default='1,3,640,640', 
                        help='Input shape in format: batch,channels,height,width')
    parser.add_argument('--input_type', type=str, default='float32', 
                        choices=['float32', 'uint8'], help='Input data type')
    parser.add_argument('--quantize', action='store_true', help='Enable quantization')
    parser.add_argument('--calibrate_method', type=str, default='NoClip',
                        choices=['NoClip', 'Kld'], help='Calibration method')
    parser.add_argument('--samples', type=int, default=10, help='Number of calibration samples')
    return parser.parse_args()

def parse_input_shape(shape_str):
    """Parse input shape string like '1,3,640,640' to list"""
    return [int(x) for x in shape_str.split(',')]

def generate_calibration_data(shape, samples=10):
    """Generate random calibration data for quantization"""
    data = []
    for _ in range(samples):
        sample = np.random.randint(0, 256, shape).astype(np.uint8)
        data.append([sample])
    return np.array(data)

def main():
    args = parse_args()
    
    print("=" * 60)
    print("ONNX to K230 kmodel Converter")
    print("=" * 60)
    print(f"Input model: {args.model}")
    print(f"Output model: {args.output}")
    print(f"Input shape: {args.input_shape}")
    print(f"Input type: {args.input_type}")
    print(f"Quantization: {args.quantize}")
    print("=" * 60)
    
    # 1. Check input file
    if not os.path.exists(args.model):
        print(f"Error: ONNX file not found: {args.model}")
        sys.exit(1)
    
    # 2. Load and verify ONNX model
    print("\n[1/5] Loading ONNX model...")
    try:
        onnx_model = onnx.load(args.model)
        onnx.checker.check_model(onnx_model)
        print(f"      ONNX model loaded successfully")
        print(f"      Opset version: {onnx_model.opset_import[0].version}")
    except Exception as e:
        print(f"      Error: {e}")
        sys.exit(1)
    
    # 3. Parse input shape
    input_shape = parse_input_shape(args.input_shape)
    print(f"\n[2/5] Input configuration:")
    print(f"      Shape: {input_shape}")
    print(f"      Type: {args.input_type}")
    
    # 4. Setup compile options
    print("\n[3/5] Setting up compile options...")
    compile_options = nncase.CompileOptions()
    compile_options.target = "k230"
    compile_options.input_shape = [input_shape]
    compile_options.input_type = args.input_type
    compile_options.input_layout = "NCHW"
    
    if args.input_type == 'uint8':
        compile_options.input_range = [0, 1]
        compile_options.mean = [0, 0, 0]
        compile_options.std = [1, 1, 1]
    
    print(f"      Target: {compile_options.target}")
    print(f"      Input shape: {compile_options.input_shape}")
    print(f"      Input type: {compile_options.input_type}")
    
    # 5. Create compiler and import ONNX
    print("\n[4/5] Compiling model...")
    compiler = nncase.Compiler(compile_options)
    
    try:
        with open(args.model, 'rb') as f:
            model_content = f.read()
        import_options = nncase.ImportOptions()
        compiler.import_onnx(model_content, import_options)
        print("      ONNX import successful")
    except Exception as e:
        print(f"      Error: {e}")
        print("\n=== DEBUG INFO ===")
        print(f"ONNX file size: {os.path.getsize(args.model)} bytes")
        print(f"Input shape: {input_shape}")
        print("=================")
        sys.exit(1)
    
    # 6. Quantization (if enabled)
    if args.quantize:
        print("\n[5/6] Applying quantization...")
        ptq_options = nncase.PTQTensorOptions()
        ptq_options.samples_count = args.samples
        ptq_options.calibrate_method = args.calibrate_method
        ptq_options.w_quant_type = 'uint8'
        ptq_options.quant_type = 'uint8'
        
        # Generate calibration data
        calib_shape = input_shape
        calib_data = generate_calibration_data(calib_shape, args.samples)
        ptq_options.set_tensor_data(calib_data)
        compiler.use_ptq(ptq_options)
        print(f"      Method: {args.calibrate_method}")
        print(f"      Samples: {args.samples}")
    
    # 7. Compile
    print("\n[6/6] Compiling to kmodel...")
    try:
        compiler.compile()
        kmodel = compiler.gencode_tobytes()
        
        # Save to file
        os.makedirs(os.path.dirname(args.output) or '.', exist_ok=True)
        with open(args.output, 'wb') as f:
            f.write(kmodel)
        
        size_kb = os.path.getsize(args.output) / 1024
        print(f"      Compilation successful!")
        print(f"      Output file: {args.output}")
        print(f"      File size: {size_kb:.2f} KB")
        
    except Exception as e:
        print(f"      Error: {e}")
        sys.exit(1)
    
    print("\n" + "=" * 60)
    print("Conversion completed successfully!")
    print("=" * 60)

if __name__ == "__main__":
    main()

报什么错了?

错误通过回答发上来了

我想问一下,你们有看到我的问题吗,有帮忙在检查问题吗

[1/5] Loading ONNX model...
ONNX model loaded successfully
Opset version: 18

[2/5] Input configuration:
Shape: [1, 3, 640, 640]
Type: float32

[3/5] Setting up compile options...
Target: k230
Input shape: [[1, 3, 640, 640]]
Input type: float32

[4/5] Compiling model...
Unhandled exception. System.ArgumentException: The value cannot be an empty string. (Parameter 'path')
at System.ArgumentException.ThrowNullOrEmptyException(String argument, String paramName)
at System.IO.Directory.GetParent(String path)
at Nncase.Importer.OnnxImporter.GetTensor(TensorProto tensor)
at Nncase.Importer.OnnxImporter.GetInputExpr(NodeProto n, Int32 index)
at Nncase.Importer.OnnxImporter.GetInputExprs(NodeProto n, Int32 index0, Int32 index1)
at Nncase.Importer.OnnxImporter.VisitConv2D(NodeProto& op)
at Nncase.Importer.OnnxImporter.Visit(NodeProto op)
at Nncase.Importer.OnnxImporter.ConvertOp()
at Nncase.BaseImporter.Import()
at Nncase.Importers.ImportOnnx(Stream onnx, CompileSession compileSession)
at Nncase.Compiler.Interop.CApi.CompilerImportOnnxModule(IntPtr compilerHandle, IntPtr streamHandle)