我修改了Tim Boldstad提供的代码http://timbolstad.com/2010/03/16/core-audio-getting-started-pt2/(愿上帝保佑他),并添加了一个小滑块,可以将输出音调频率从40hz更改为200000hz。我现在希望能够在生成的音调上使用LPF。
首先,any1有详细的指南来解释如何做到这一点吗。我试过简单地在两者之间添加一个节点,但它不起作用,显然,在向过滤器提供音频样本输入之前,我需要将16位整数样本转换为浮动8.24格式,然后我必须将其转换回16位整数。这是问题所在吗?或者我错误地连接了节点?我应该在哪里设置滤波器截止频率和其他参数?
有人能解释一下AudioUnitGetProperty的作用吗?苹果关于这些主题的文档极其零散,毫无价值:(
-(void) initializeAUGraph
{
OSStatus result= noErr;
result = NewAUGraph(&mGraph);
AUNode outputNode;
AUNode mixerNode;
AUNode effectsNode;
AudioComponentDescription effects_desc;
effects_desc.componentType = kAudioUnitType_Effect;
effects_desc.componentSubType = kAudioUnitSubType_LowPassFilter;
effects_desc.componentFlags = 0;
effects_desc.componentFlagsMask = 0;
effects_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
AudioComponentDescription mixer_desc;
mixer_desc.componentType=kAudioUnitType_Mixer;
mixer_desc.componentSubType=kAudioUnitSubType_MultiChannelMixer;
mixer_desc.componentFlags=0;
mixer_desc.componentFlagsMask=0;
mixer_desc.componentManufacturer=kAudioUnitManufacturer_Apple;
AudioComponentDescription output_desc;
output_desc.componentType = kAudioUnitType_Output;
output_desc.componentSubType = kAudioUnitSubType_RemoteIO;
output_desc.componentFlags = 0;
output_desc.componentFlagsMask = 0;
output_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
result= AUGraphAddNode(mGraph, &output_desc, &outputNode);
result= AUGraphAddNode(mGraph, &mixer_desc, &mixerNode);
result=AUGraphAddNode(mGraph, &effects_desc, &effectsNode);
result=AUGraphConnectNodeInput(mGraph, mixerNode, 0, effectsNode, 0);
result=AUGraphConnectNodeInput(mGraph, effectsNode, 0, outputNode, 0);
result=AUGraphOpen(mGraph);
//getting mixxer
result = AUGraphNodeInfo(mGraph, mixerNode, NULL, &mMixer);
result = AUGraphNodeInfo(mGraph, effectsNode, NULL, &mEffects);
UInt32 numbuses = 1;
UInt32 size = sizeof(numbuses);
result = AudioUnitSetProperty(mMixer, kAudioUnitProperty_ElementCount, kAudioUnitScope_Input, 0, &numbuses, size);
//=====
CAStreamBasicDescription desc;
// Loop through and setup a callback for each source you want to send to the mixer.
// Right now we are only doing a single bus so we could do without the loop.
for (int i = 0; i < numbuses; ++i)
{
// Setup render callback struct
// This struct describes the function that will be called
// to provide a buffer of audio samples for the mixer unit.
AURenderCallbackStruct renderCallbackStruct;
renderCallbackStruct.inputProc = &renderInput;
renderCallbackStruct.inputProcRefCon = self;
// Set a callback for the specified node's specified input
result = AUGraphSetNodeInputCallback(mGraph, mixerNode, i, &renderCallbackStruct);
// Get a CAStreamBasicDescription from the mixer bus.
size = sizeof(desc);
result = AudioUnitGetProperty( mMixer,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
i,
&desc,
&size);
// Initializes the structure to 0 to ensure there are no spurious values.
memset (&desc, 0, sizeof (desc));
// Make modifications to the CAStreamBasicDescription
// We're going to use 16 bit Signed Ints because they're easier to deal with
// The Mixer unit will accept either 16 bit signed integers or
// 32 bit 8.24 fixed point integers.
desc.mSampleRate = kGraphSampleRate; // set sample rate
desc.mFormatID = kAudioFormatLinearPCM;
desc.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
desc.mBitsPerChannel = sizeof(AudioSampleType) * 8; // AudioSampleType == 16 bit signed ints
desc.mChannelsPerFrame = 1;
desc.mFramesPerPacket = 1;
desc.mBytesPerFrame = ( desc.mBitsPerChannel / 8 ) * desc.mChannelsPerFrame;
desc.mBytesPerPacket = desc.mBytesPerFrame * desc.mFramesPerPacket;
printf("Mixer file format: "); desc.Print();
// Apply the modified CAStreamBasicDescription to the mixer input bus
result = AudioUnitSetProperty( mMixer,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
i,
&desc,
sizeof(desc));
}
// Apply the CAStreamBasicDescription to the mixer output bus
result = AudioUnitSetProperty( mMixer,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
0,
&desc,
sizeof(desc));
//************************************************************
//*** Setup the audio output stream ***
//************************************************************
// Get a CAStreamBasicDescription from the output Audio Unit
result = AudioUnitGetProperty( mMixer,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
0,
&desc,
&size);
// Initializes the structure to 0 to ensure there are no spurious values.
memset (&desc, 0, sizeof (desc));
// Make modifications to the CAStreamBasicDescription
// AUCanonical on the iPhone is the 8.24 integer format that is native to the iPhone.
// The Mixer unit does the format shifting for you.
desc.SetAUCanonical(1, true);
desc.mSampleRate = kGraphSampleRate;
// Apply the modified CAStreamBasicDescription to the output Audio Unit
result = AudioUnitSetProperty( mMixer,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
0,
&desc,
sizeof(desc));
// Once everything is set up call initialize to validate connections
result = AUGraphInitialize(mGraph);
}
有人能解释AudioUnitGetProperty的作用吗?
好吧,它从音频单元获取属性的值。"属性"通常是作为程序员处理的东西(例如音频流格式、连接状态),而"参数"通常是你向用户暴露的东西(如低通截止频率、混音器音量)。注意,有AudioUnitGetParameter
和AudioUnitSetParameter
函数来补充AudioUnitGetProperty
和AudioUnitSetProperty
函数。
你基本上应该"只知道"音频单元的属性/参数是什么,以及它们所期望的值。这方面最好的文档来源是AudioUnit.framework中的两个标题,即AudioUnitProperties.h
和AudioUnitParameters.h
。下一个最好的源代码是Xcode的自动完成。例如,AULowPass的参数是kLowPassParam_CutoffFrequency
和kLowPassParam_Resonance
,所以您只需键入kLowPassParam
,Xcode就会显示可用的内容。其他AU通常遵循此方案。
。。。但它不起作用,显然
我需要更多信息。你是说你就是听不到区别吗?AULowPass从一个非常高的截止频率开始,所以除非你把它设置得更低,否则你可能根本听不到任何区别。
试着将截止频率设置得很低,例如500hz。你这样做:
AudioUnitSetParameter(mEffects,
kLowPassParam_CutoffFrequency,
kAudioUnitScope_Global,
0,
500,
0);