如何将音频效果应用到文件并写入文件系统 - iOS

问题描述 投票:0回答:3

我正在构建一个应用程序,它应该允许用户将音频过滤器应用于录制的音频,例如混响、增强。

我无法找到有关如何将过滤器应用于文件本身的任何可行的信息来源,因为稍后需要将处理后的文件上传到服务器。

我目前正在使用 AudioKit 进行可视化,并且我知道它能够进行音频处理,但仅限于播放。请提出进一步研究的任何建议。

ios swift audio avfoundation audiokit
3个回答
9
投票

AudioKit 有一个不需要 iOS 11 的离线渲染节点。下面是一个示例,需要player.schedule(...) 和player.start(at.) 位,因为 AKAudioPlayer 的底层 AVAudioPlayerNode 将在调用线程上阻塞等待如果您使用

player.play()
开始下一次渲染。

import UIKit
import AudioKit

class ViewController: UIViewController {

    var player: AKAudioPlayer?
    var reverb = AKReverb()
    var boost = AKBooster()
    var offlineRender = AKOfflineRenderNode()

    override func viewDidLoad() {
        super.viewDidLoad()

        guard let url = Bundle.main.url(forResource: "theFunkiestFunkingFunk", withExtension: "mp3") else {
            return
        }
        var audioFile: AKAudioFile?
        do {
            audioFile = try AKAudioFile.init(forReading: url)
            player = try AKAudioPlayer.init(file: audioFile!)
        } catch {
            print(error)
            return
        }
        guard let player = player else {
            return
        }


        player >>> reverb >>> boost >>> offlineRender

        AudioKit.output = offlineRender
        AudioKit.start()


        let docs = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first!
        let dstURL = docs.appendingPathComponent("rendered.caf")

        offlineRender.internalRenderEnabled = false
        player.schedule(from: 0, to: player.duration, avTime: nil)
        let sampleTimeZero = AVAudioTime(sampleTime: 0, atRate: AudioKit.format.sampleRate)
        player.play(at: sampleTimeZero)
        do {
            try offlineRender.renderToURL(dstURL, seconds: player.duration)
        } catch {
            print(error)
            return
        }
        offlineRender.internalRenderEnabled = true

        print("Done! Rendered to " + dstURL.path)
    }
}

7
投票

您可以使用音频单元插件中新引入的“手动渲染”功能(请参见下面的示例)。

如果您需要支持较旧的 macOS/iOS 版本,如果您无法使用 AudioKit 实现相同的目标,我会感到惊讶(即使我自己没有尝试过)。例如,使用

AKSamplePlayer
作为您的第一个节点(它将读取您的音频文件),然后构建并连接您的效果并使用
AKNodeRecorder
作为您的最后一个节点。

使用新音频单元功能的手动渲染示例

import AVFoundation

//: ## Source File
//: Open the audio file to process
let sourceFile: AVAudioFile
let format: AVAudioFormat
do {
    let sourceFileURL = Bundle.main.url(forResource: "mixLoop", withExtension: "caf")!
    sourceFile = try AVAudioFile(forReading: sourceFileURL)
    format = sourceFile.processingFormat
} catch {
    fatalError("could not open source audio file, \(error)")
}

//: ## Engine Setup
//:    player -> reverb -> mainMixer -> output
//: ### Create and configure the engine and its nodes
let engine = AVAudioEngine()
let player = AVAudioPlayerNode()
let reverb = AVAudioUnitReverb()

engine.attach(player)
engine.attach(reverb)

// set desired reverb parameters
reverb.loadFactoryPreset(.mediumHall)
reverb.wetDryMix = 50

// make connections
engine.connect(player, to: reverb, format: format)
engine.connect(reverb, to: engine.mainMixerNode, format: format)

// schedule source file
player.scheduleFile(sourceFile, at: nil)
//: ### Enable offline manual rendering mode
do {
    let maxNumberOfFrames: AVAudioFrameCount = 4096 // maximum number of frames the engine will be asked to render in any single render call
    try engine.enableManualRenderingMode(.offline, format: format, maximumFrameCount: maxNumberOfFrames)
} catch {
    fatalError("could not enable manual rendering mode, \(error)")
}
//: ### Start the engine and player
do {
    try engine.start()
    player.play()
} catch {
    fatalError("could not start engine, \(error)")
}
//: ## Offline Render
//: ### Create an output buffer and an output file
//: Output buffer format must be same as engine's manual rendering output format
let outputFile: AVAudioFile
do {
    let documentsPath = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0]
    let outputURL = URL(fileURLWithPath: documentsPath + "/mixLoopProcessed.caf")
    outputFile = try AVAudioFile(forWriting: outputURL, settings: sourceFile.fileFormat.settings)
} catch {
    fatalError("could not open output audio file, \(error)")
}

// buffer to which the engine will render the processed data
let buffer: AVAudioPCMBuffer = AVAudioPCMBuffer(pcmFormat: engine.manualRenderingFormat, frameCapacity: engine.manualRenderingMaximumFrameCount)!
//: ### Render loop
//: Pull the engine for desired number of frames, write the output to the destination file
while engine.manualRenderingSampleTime < sourceFile.length {
    do {
        let framesToRender = min(buffer.frameCapacity, AVAudioFrameCount(sourceFile.length - engine.manualRenderingSampleTime))
        let status = try engine.renderOffline(framesToRender, to: buffer)
        switch status {
        case .success:
            // data rendered successfully
            try outputFile.write(from: buffer)

        case .insufficientDataFromInputNode:
            // applicable only if using the input node as one of the sources
            break

        case .cannotDoInCurrentContext:
            // engine could not render in the current render call, retry in next iteration
            break

        case .error:
            // error occurred while rendering
            fatalError("render failed")
        }
    } catch {
        fatalError("render failed, \(error)")
    }
}

player.stop()
engine.stop()

print("Output \(outputFile.url)")
print("AVAudioEngine offline rendering completed")

您可以在那里找到更多有关 AudioUnit 格式更新的文档和示例。


0
投票

所有滤镜+渲染期间启用/禁用滤镜。

func combine_playerNode_render() {
    do {

        // this file should be in resources
        let FILE_URL_PIANO_STEREO = Bundle.main.url(
            forResource: "piano-stereo", withExtension: "wav"
        )!

        let FILE_PATH_RESULT = "/tmp/result.wav"

        let avEngine     = AVAudioEngine()
        let avPlayerNode = AVAudioPlayerNode()
        let avPitch      = AVAudioUnitTimePitch()
        let avSpeed      = AVAudioUnitVarispeed()
        let avDelay      = AVAudioUnitDelay()
        let avDistortion = AVAudioUnitDistortion()
        let avReverb     = AVAudioUnitReverb()
        let avEqualizer  = AVAudioUnitEQ()

        // load
        let avFileSrc = try AVAudioFile(
            forReading: FILE_URL_PIANO_STEREO
        )
        let avBufferSrc = try AVAudioPCMBuffer(
            file: avFileSrc
        )!

        // init
        avEngine.attach(avPlayerNode)
        avEngine.attach(avPitch)
        avEngine.attach(avSpeed)
        avEngine.attach(avDelay)
        avEngine.attach(avDistortion)
        avEngine.attach(avReverb)
        avEngine.attach(avEqualizer)

        avEngine.connect(
                    avPlayerNode,
                to: avPitch,
            format: avBufferSrc.format)
        avEngine.connect(
                    avPitch,
                to: avSpeed,
            format: avBufferSrc.format)
        avEngine.connect(
                    avSpeed,
                to: avDelay,
            format: avBufferSrc.format)
        avEngine.connect(
                    avDelay,
                to: avDistortion,
            format: avBufferSrc.format)
        avEngine.connect(
                    avDistortion,
                to: avReverb,
            format: avBufferSrc.format)
        avEngine.connect(
                    avReverb,
                to: avEqualizer,
            format: avBufferSrc.format)
        avEngine.connect(
                    avEqualizer,
                to: avEngine.mainMixerNode,
            format: avBufferSrc.format
        )

        // render
        avPlayerNode.scheduleBuffer(avBufferSrc)

        try avEngine.enableManualRenderingMode(
           .offline,
            format: avBufferSrc.format,
            maximumFrameCount: avBufferSrc.frameLength
        )

        try avEngine.start()
        avPlayerNode.play()

        let avFileDst = try AVAudioFile(
            forWriting  : URL(filePath: FILE_PATH_RESULT),
            settings    : avBufferSrc.format.settings,
            commonFormat: avBufferSrc.format.commonFormat,
            interleaved : avBufferSrc.format.isInterleaved
        )

        let avBufferOut = AVAudioPCMBuffer(
            pcmFormat    : avEngine.manualRenderingFormat,
            frameCapacity: avEngine.manualRenderingMaximumFrameCount
        )!

        while avEngine.manualRenderingSampleTime < avFileSrc.length {

            // demonstration of changing filters dynamically
            if avEngine.manualRenderingSampleTime > 3000 {
                avDelay     .wetDryMix = 100.0
                avDistortion.wetDryMix = 100.0
                avReverb    .wetDryMix = 100.0
            } else {
                avDelay     .wetDryMix = 0.0
                avDistortion.wetDryMix = 0.0
                avReverb    .wetDryMix = 0.0
            }

            let status = try avEngine.renderOffline(
                1024, to: avBufferOut
            )

            switch status {
                case .success:
                    try avFileDst.write(
                        from: avBufferOut
                    )
                case .error: fatalError("render failed")
                case .insufficientDataFromInputNode: break
                case .cannotDoInCurrentContext     : break
                default                            : break
            }

        }
        avPlayerNode.stop()
        avEngine.stop()
        print("rendering completed: \(avFileDst.url)")

    } catch {
        print("Error: \(error).")
    }
}
© www.soinside.com 2019 - 2024. All rights reserved.