使用 tflite_audio 在 Flutter 中运行 TFLite 模型,原始代码适用于 python

问题描述 投票:0回答:1

我正在尝试将 python 机器学习模型转换为 flutter 应用程序。我正在使用 tflite_audio,它是一个 flutter 包,对我来说,模型似乎已加载,但在音频识别时,代码被终止。

这是我的颤振代码:

import 'package:flutter/material.dart';
import 'dart:async';
import 'dart:developer';
import 'package:tflite_audio/tflite_audio.dart';
import 'package:flutter/services.dart';
import 'dart:convert';

class MyHomePage extends StatefulWidget {
  const MyHomePage({super.key});

  @override
  State<MyHomePage> createState() => _MyHomePageState();
}

class _MyHomePageState extends State<MyHomePage> {
  final GlobalKey<ScaffoldState> _scaffoldKey = GlobalKey<ScaffoldState>();
  Stream<Map<dynamic, dynamic>>? result;
  final isRecording = ValueNotifier<bool>(false);
  final String label = "assets/labels.txt";
  final String model = "assets/tf_lite_model.tflite";
  final String inputType = 'decodedWav';

  @override
  void initState() {
    print("----------- initState 0 -----------");
    super.initState();
    TfliteAudio.loadModel(
      model: model,
      label: label,
      inputType: inputType,
      numThreads: 5,
    );
    print("----------- initState 1 -----------");
    TfliteAudio.setSpectrogramParameters(
      nMFCC: 40,
      hopLength: 16384,
    );
    print("----------- initState 2 -----------");
  }

  @override
  Widget build(BuildContext context) {
    print("----------- Build -----------");
    return Scaffold(
      key: _scaffoldKey,
      appBar: AppBar(
        title: const Text("Live Prediction"),
        elevation: 10,
      ),
      body: StreamBuilder<Map<dynamic, dynamic>>(
        stream: result,
        builder: (BuildContext context,
            AsyncSnapshot<Map<dynamic, dynamic>> inferenceSnapshot) {
          print("----------- Build -- Builder -----------");
          return FutureBuilder(
              future: fetchLabelList(),
              builder: (BuildContext context,
                  AsyncSnapshot<List<String>> labelSnapshot) {
                switch (inferenceSnapshot.connectionState) {
                  case ConnectionState.none:
                    if (labelSnapshot.hasData) {
                      return labelListWidget(labelSnapshot.data);
                    } else {
                      return const CircularProgressIndicator();
                    }
                  case ConnectionState.waiting:
                    return Stack(
                      children: <Widget>[
                        Align(
                          alignment: Alignment.bottomRight,
                          child: inferenceTimeWidget('calculating...'),
                        ),
                        labelListWidget(labelSnapshot.data),
                      ],
                    );

                  default:
                    return Stack(
                      children: [
                        Align(
                            alignment: Alignment.bottomRight,
                            child: inferenceTimeWidget(
                                '${showResult(inferenceSnapshot, 'inferenceTime')}ms')),
                        labelListWidget(labelSnapshot.data,
                            showResult(inferenceSnapshot, 'recognitionResult'))
                      ],
                    );
                }
              });
        },
      ),
      floatingActionButtonLocation: FloatingActionButtonLocation.centerFloat,
      floatingActionButton: ValueListenableBuilder(
        valueListenable: isRecording,
        builder: ((context, value, widget) {
          if (value == false) {
            return FloatingActionButton(
              onPressed: () {
                isRecording.value = true;
                print("----------- Started 1 -----------");
                setState(() {
                  getResult();
                });
                print("----------- Started 2 -----------");
              },
              backgroundColor: Colors.blue,
              child: const Icon(Icons.mic),
            );
          } else {
            return FloatingActionButton(
              onPressed: () {
                log(" ---- > Audio Recognition Stopped");
                TfliteAudio.stopAudioRecognition();
              },
              backgroundColor: Colors.red,
              child: const Icon(Icons.adjust),
            );
          }
        }),
      ),
    );
  }

  Future<List<String>> fetchLabelList() async {
    print("----------- fetchLabelList -----------");
    List<String> labelList = [];
    await rootBundle.loadString(label).then((value) {
      for (String element in const LineSplitter().convert(value)) {
        labelList.add(element);
      }
    });
    return labelList;
  }

  Widget inferenceTimeWidget(String result) {
    print("----------- inferenceTimeWidget -----------");
    return Padding(
      padding: const EdgeInsets.all(20.0),
      child: Text(
        result,
        textAlign: TextAlign.center,
        style: const TextStyle(
          fontWeight: FontWeight.bold,
          fontSize: 20,
          color: Colors.black,
        ),
      ),
    );
  }

  Widget labelListWidget(List<String>? labelList, [String? result]) {
    print("----------- labelListWidget -----------");
    return Center(
      child: ListView(
        children: labelList!.map((labels) {
          if (labels == result) {
            return Padding(
              padding: const EdgeInsets.all(5),
              child: Text(labels.toString()),
            );
          } else {
            return Padding(
              padding: const EdgeInsets.all(5),
              child: Text(labels.toString()),
            );
          }
        }).toList(),
      ),
    );
  }

  String showResult(AsyncSnapshot snapShot, String key) =>
      snapShot.hasData ? snapShot.data[key].toString() : '0';

  void getResult() {
    print("----------- getResult 0 -----------");
    result = TfliteAudio.startAudioRecognition(
      sampleRate: 16000,
      bufferSize: 500,
      numOfInferences: 3,
      audioLength: 50000,
    );
    print("----------- getResult 1 -----------");

    result
        ?.listen(
            (event) => log("Recognition Result: ${event["recognitionResult"]}"))
        .onDone(() => isRecording.value = false);

    print("----------- getResult 2 -----------");
  }
}

这是原始的Python代码:

from keras.models import load_model
import tensorflow as tf
import numpy as np
from vggish_input import waveform_to_examples
import ubicoustics
import pyaudio
from pathlib import Path
import time
import argparse
import wget
import os
from reprint import output
from helpers import Interpolator, ratio_to_db, dbFS, rangemap

# thresholds
PREDICTION_THRES = 0.8 # confidence
DBLEVEL_THRES = -40 # dB

# Variables
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
CHUNK = RATE
MICROPHONES_DESCRIPTION = []
FPS = 60.0
OUTPUT_LINES = 33

###########################
# Model download
###########################
def download_model(url,output):
    return wget.download(url,output)

###########################
# Check Microphone
###########################
print("=====")
print("1 / 2: Checking Microphones... ")
print("=====")

import microphones
desc, mics, indices = microphones.list_microphones()
if (len(mics) == 0):
    print("Error: No microphone found.")
    exit()

#############
# Read Command Line Args
#############
MICROPHONE_INDEX = indices[0]
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--mic", help="Select which microphone / input device to use")
args = parser.parse_args()
try:
    if args.mic:
        MICROPHONE_INDEX = int(args.mic)
        print("User selected mic: %d" % MICROPHONE_INDEX)
    else:
        mic_in = input("Select microphone [%d]: " % MICROPHONE_INDEX).strip()
        if (mic_in!=''):
            MICROPHONE_INDEX = int(mic_in)
except:
    print("Invalid microphone")
    exit()

# Find description that matches the mic index
mic_desc = ""
for k in range(len(indices)):
    i = indices[k]
    if (i==MICROPHONE_INDEX):
        mic_desc = mics[k]
print("Using mic: %s" % mic_desc)

###########################
# Download model, if it doesn't exist
###########################
MODEL_URL = "https://www.dropbox.com/s/cq1d7uqg0l28211/example_model.hdf5?dl=1"
MODEL_PATH = "models/example_model.hdf5"
print("=====")
print("2 / 2: Checking model... ")
print("=====")
model_filename = "models/example_model.hdf5"
ubicoustics_model = Path(model_filename)
if (not ubicoustics_model.is_file()):
    print("Downloading example_model.hdf5 [867MB]: ")
    download_model(MODEL_URL, MODEL_PATH)

##############################
# Load Deep Learning Model
##############################
print("Using deep learning model: %s" % (model_filename))
model = load_model(model_filename)
context = ubicoustics.everything

label = dict()
for k in range(len(context)):
    label[k] = context[k]

##############################
# Setup Audio Callback
##############################
output_lines = []*OUTPUT_LINES
audio_rms = 0
candidate = ("-",0.0)

# Prediction Interpolators
interpolators = []
for k in range(31):
    interpolators.append(Interpolator())

# Audio Input Callback
def audio_samples(in_data, frame_count, time_info, status_flags):
    global output_lines
    global interpolators
    global audio_rms
    global candidate
    np_wav = np.fromstring(in_data, dtype=np.int16) / 32768.0 # Convert to [-1.0, +1.0]

    # Compute RMS and convert to dB
    rms = np.sqrt(np.mean(np_wav**2))
    db = dbFS(rms)
    interp = interpolators[30]
    interp.animate(interp.end, db, 1.0)

    # Make Predictions
    x = waveform_to_examples(np_wav, RATE)
    predictions = []
    if x.shape[0] != 0:
        x = x.reshape(len(x), 96, 64, 1)
        pred = model.predict(x)
        predictions.append(pred)

    for prediction in predictions:
        m = np.argmax(prediction[0])
        candidate = (ubicoustics.to_human_labels[label[m]],prediction[0,m])
        num_classes = len(prediction[0])
        for k in range(num_classes):
            interp = interpolators[k]
            prev = interp.end
            interp.animate(prev,prediction[0,k],1.0)
    return (in_data, pyaudio.paContinue)

##############################
# Main Execution
##############################
while(1):
    ##############################
    # Setup Audio
    ##############################
    p = pyaudio.PyAudio()
    stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK, stream_callback=audio_samples, input_device_index=MICROPHONE_INDEX)

    ##############################
    # Start Non-Blocking Stream
    ##############################
    os.system('cls' if os.name == 'nt' else 'clear')
    print("# Live Prediction Using Microphone: %s" % (mic_desc))
    stream.start_stream()
    while stream.is_active():
        with output(initial_len=OUTPUT_LINES, interval=0) as output_lines:
            while True:
                time.sleep(1.0/FPS) # 60fps
                for k in range(30):
                    interp = interpolators[k]
                    val = interp.update()
                    bar = ["|"] * int((val*100.0))
                    output_lines[k] = "%20s: %.2f %s" % (ubicoustics.to_human_labels[label[k]], val, "".join(bar))

                # dB Levels
                interp = interpolators[30]
                db = interp.update()
                val = rangemap(db, -50, 0, 0, 100)
                bar = ["|"] * min(100,int((val)))
                output_lines[30] = "%20s: %.1fdB [%s " % ("Audio Level", db, "".join(bar))

                # Display Thresholds
                output_lines[31] = "%20s: confidence = %.2f, db_level = %.1f" % ("Thresholds", PREDICTION_THRES, DBLEVEL_THRES)

                # Final Prediction
                pred = "-"
                event,conf = candidate
                if (conf > PREDICTION_THRES and db > DBLEVEL_THRES):
                    pred = event
                output_lines[32] = "%20s: %s" % ("Prediction", pred.upper())

这里是源代码的 github 存储库的链接:https://github.com/FIGLAB/ubicoustics

最后,这是我得到的日志:

I/ViewRootImpl@376a335[MainActivity]( 8167): ViewPostIme pointer 0
W/System  ( 8167): A resource failed to call close.
I/ViewRootImpl@376a335[MainActivity]( 8167): ViewPostIme pointer 1
I/flutter ( 8167): ----------- Started 1 -----------
I/flutter ( 8167): ----------- getResult 0 -----------
I/flutter ( 8167): ----------- getResult 1 -----------
I/flutter ( 8167): ----------- getResult 2 -----------
D/TfliteAudio( 8167): Parameters: {detectionThreshold=0.3, minimumTimeBetweenSamples=0, method=setAudioRecognitionStream, numOfInferences=3, averageWindowDuration=0, audioLength=50000, sampleRate=16000, suppressionTime=0, bufferSize=500}
I/flutter ( 8167): ----------- Started 2 -----------
D/TfliteAudio( 8167): AudioLength does not need to be adjusted. Length: 50000
D/TfliteAudio( 8167): Transpose Audio: false
D/TfliteAudio( 8167): Check for permission. Request code: 13
D/TfliteAudio( 8167): Permission already granted.
I/flutter ( 8167): ----------- Build -----------
I/flutter ( 8167): ----------- Build -- Builder -----------
I/flutter ( 8167): ----------- fetchLabelList -----------
I/flutter ( 8167): ----------- inferenceTimeWidget -----------
I/flutter ( 8167): ----------- labelListWidget -----------
V/Recording( 8167): Recording started
I/flutter ( 8167): ----------- inferenceTimeWidget -----------
I/flutter ( 8167): ----------- labelListWidget -----------
V/RecordingData( 8167): recordingOffset: 500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 1000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 1500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 2000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 2500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 3000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 3500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 4000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 4500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 5000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 5500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 6000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 6500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 7000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 7500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 8000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 8500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 9000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 9500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 10000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 10500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 11000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 11500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 12000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 12500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 13000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 13500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 14000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 14500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 15000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 15500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 16000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 16500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 17000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 17500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 18000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 18500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 19000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 19500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 20000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 20500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 21000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 21500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 22000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 22500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 23000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 23500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 24000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 24500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 25000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 25500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 26000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 26500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 27000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 27500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 28000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 28500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 29000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 29500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 30000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 30500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 31000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 31500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 32000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 32500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 33000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 33500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 34000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 34500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 35000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 35500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 36000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 36500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 37000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 37500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 38000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 38500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 39000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 39500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 40000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 40500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 41000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 41500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 42000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 42500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 43000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 43500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 44000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 44500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 45000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 45500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 46000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 46500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 47000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 47500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 48000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 48500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 49000/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 49500/50000 | inferenceCount: 1/3
V/RecordingData( 8167): recordingOffset: 50000/50000 | inferenceCount: 1/3
V/TfliteAudio( 8167): Recognition started.
V/RecordingData( 8167): recordingOffset: 500/50000 | inferenceCount: 2/3
W/System.err( 8167): io.reactivex.rxjava3.exceptions.OnErrorNotImplementedException: The exception was not handled due to missing onError handler in the subscribe() method call. Further reading: https://github.com/ReactiveX/RxJava/wiki/Error-Handling | java.lang.IllegalArgumentException: Invalid input Tensor index: 1
W/System.err( 8167):    at io.reactivex.rxjava3.internal.functions.Functions$OnErrorMissingConsumer.accept(Functions.java:718)
W/System.err( 8167):    at io.reactivex.rxjava3.internal.functions.Functions$OnErrorMissingConsumer.accept(Functions.java:715)
W/System.err( 8167):    at io.reactivex.rxjava3.internal.observers.LambdaObserver.onError(LambdaObserver.java:77)
W/System.err( 8167):    at io.reactivex.rxjava3.internal.observers.LambdaObserver.onNext(LambdaObserver.java:67)
W/System.err( 8167):    at io.reactivex.rxjava3.internal.operators.observable.ObservableDoOnEach$DoOnEachObserver.onNext(ObservableDoOnEach.java:101)
W/System.err( 8167):    at io.reactivex.rxjava3.internal.operators.observable.ObservableObserveOn$ObserveOnObserver.drainNormal(ObservableObserveOn.java:202)
W/System.err( 8167):    at io.reactivex.rxjava3.internal.operators.observable.ObservableObserveOn$ObserveOnObserver.run(ObservableObserveOn.java:256)
W/System.err( 8167):    at io.reactivex.rxjava3.internal.schedulers.ScheduledRunnable.run(ScheduledRunnable.java:65)
W/System.err( 8167):    at io.reactivex.rxjava3.internal.schedulers.ScheduledRunnable.call(ScheduledRunnable.java:56)
W/System.err( 8167):    at java.util.concurrent.FutureTask.run(FutureTask.java:264)
W/System.err( 8167):    at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:307)
W/System.err( 8167):    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1137)
W/System.err( 8167):    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:637)
W/System.err( 8167):    at java.lang.Thread.run(Thread.java:1012)
W/System.err( 8167): Caused by: java.lang.IllegalArgumentException: Invalid input Tensor index: 1
W/System.err( 8167):    at org.tensorflow.lite.NativeInterpreterWrapper.getInputTensor(NativeInterpreterWrapper.java:381)
W/System.err( 8167):    at org.tensorflow.lite.NativeInterpreterWrapper.run(NativeInterpreterWrapper.java:241)
W/System.err( 8167):    at org.tensorflow.lite.InterpreterImpl.runForMultipleInputsOutputs(InterpreterImpl.java:101)
W/System.err( 8167):    at org.tensorflow.lite.Interpreter.runForMultipleInputsOutputs(Interpreter.java:95)
W/System.err( 8167):    at flutter.tflite_audio.TfliteAudioPlugin.startRecognition(TfliteAudioPlugin.java:668)
W/System.err( 8167):    at flutter.tflite_audio.TfliteAudioPlugin.$r8$lambda$mVKyUfG-fEFVCdcB9f3xVhmIueo(Unknown Source:0)
W/System.err( 8167):    at flutter.tflite_audio.TfliteAudioPlugin$$ExternalSyntheticLambda6.accept(Unknown Source:4)
W/System.err( 8167):    at io.reactivex.rxjava3.internal.observers.LambdaObserver.onNext(LambdaObserver.java:63)
W/System.err( 8167):    ... 10 more
E/AndroidRuntime( 8167): FATAL EXCEPTION: RxComputationThreadPool-1
E/AndroidRuntime( 8167): Process: com.example.test_app, PID: 8167
E/AndroidRuntime( 8167): io.reactivex.rxjava3.exceptions.OnErrorNotImplementedException: The exception was not handled due to missing onError handler in the subscribe() method call. Further reading: https://github.com/ReactiveX/RxJava/wiki/Error-Handling | java.lang.IllegalArgumentException: Invalid input Tensor index: 1
E/AndroidRuntime( 8167):    at io.reactivex.rxjava3.internal.functions.Functions$OnErrorMissingConsumer.accept(Functions.java:718)
E/AndroidRuntime( 8167):    at io.reactivex.rxjava3.internal.functions.Functions$OnErrorMissingConsumer.accept(Functions.java:715)
E/AndroidRuntime( 8167):    at io.reactivex.rxjava3.internal.observers.LambdaObserver.onError(LambdaObserver.java:77)
E/AndroidRuntime( 8167):    at io.reactivex.rxjava3.internal.observers.LambdaObserver.onNext(LambdaObserver.java:67)
E/AndroidRuntime( 8167):    at io.reactivex.rxjava3.internal.operators.observable.ObservableDoOnEach$DoOnEachObserver.onNext(ObservableDoOnEach.java:101)
E/AndroidRuntime( 8167):    at io.reactivex.rxjava3.internal.operators.observable.ObservableObserveOn$ObserveOnObserver.drainNormal(ObservableObserveOn.java:202)
E/AndroidRuntime( 8167):    at io.reactivex.rxjava3.internal.operators.observable.ObservableObserveOn$ObserveOnObserver.run(ObservableObserveOn.java:256)
E/AndroidRuntime( 8167):    at io.reactivex.rxjava3.internal.schedulers.ScheduledRunnable.run(ScheduledRunnable.java:65)
E/AndroidRuntime( 8167):    at io.reactivex.rxjava3.internal.schedulers.ScheduledRunnable.call(ScheduledRunnable.java:56)
E/AndroidRuntime( 8167):    at java.util.concurrent.FutureTask.run(FutureTask.java:264)
E/AndroidRuntime( 8167):    at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:307)
E/AndroidRuntime( 8167):    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1137)
E/AndroidRuntime( 8167):    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:637)
E/AndroidRuntime( 8167):    at java.lang.Thread.run(Thread.java:1012)
E/AndroidRuntime( 8167): Caused by: java.lang.IllegalArgumentException: Invalid input Tensor index: 1
E/AndroidRuntime( 8167):    at org.tensorflow.lite.NativeInterpreterWrapper.getInputTensor(NativeInterpreterWrapper.java:381)
E/AndroidRuntime( 8167):    at org.tensorflow.lite.NativeInterpreterWrapper.run(NativeInterpreterWrapper.java:241)
E/AndroidRuntime( 8167):    at org.tensorflow.lite.InterpreterImpl.runForMultipleInputsOutputs(InterpreterImpl.java:101)
E/AndroidRuntime( 8167):    at org.tensorflow.lite.Interpreter.runForMultipleInputsOutputs(Interpreter.java:95)
E/AndroidRuntime( 8167):    at flutter.tflite_audio.TfliteAudioPlugin.startRecognition(TfliteAudioPlugin.java:668)
E/AndroidRuntime( 8167):    at flutter.tflite_audio.TfliteAudioPlugin.$r8$lambda$mVKyUfG-fEFVCdcB9f3xVhmIueo(Unknown Source:0)
E/AndroidRuntime( 8167):    at flutter.tflite_audio.TfliteAudioPlugin$$ExternalSyntheticLambda6.accept(Unknown Source:4)
E/AndroidRuntime( 8167):    at io.reactivex.rxjava3.internal.observers.LambdaObserver.onNext(LambdaObserver.java:63)
E/AndroidRuntime( 8167):    ... 10 more
V/RecordingData( 8167): recordingOffset: 1000/50000 | inferenceCount: 2/3
V/RecordingData( 8167): recordingOffset: 1500/50000 | inferenceCount: 2/3
I/Process ( 8167): Sending signal. PID: 8167 SIG: 9
Lost connection to device.

Exited.

我正在尝试将此链接here中的ML代码转换为flutter应用程序,但随着音频识别开始,我被终止了。

python flutter tensorflow machine-learning tflite
1个回答
0
投票

我遇到了类似的问题,但是对于 tflite_audio,你找到解决方案了吗?

© www.soinside.com 2019 - 2024. All rights reserved.