Question

I am working on a app which has following requirements:

  1. Record real time audio from iOS device (iPhone/iPad) and send to server over network
  2. Play received audio from network server on iOS device(iPhone/iPad)

Above mentioned things need to be done simultaneously.

I have used AudioUnit for this.

I have run into a problem where I am hearing same audio what i speak into iPhone Mic instead of audio received from network server.

I have searched a lot on how to avoid this but haven't got the solution.

If anyone has had same problem and found any solution, sharing it will help a lot.

here is my code for initializing audio Unit

-(void)initializeAudioUnit
{

    audioUnit = NULL;
    // Describe audio component
    AudioComponentDescription desc;
    desc.componentType = kAudioUnitType_Output;
    desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
    desc.componentFlags = 0;
    desc.componentFlagsMask = 0;
    desc.componentManufacturer = kAudioUnitManufacturer_Apple;

    // Get component
    AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);

    // Get audio units
    status = AudioComponentInstanceNew(inputComponent, &audioUnit);


    UInt32 flag = 1;
    //enable IO for recording
    status = AudioUnitSetProperty(audioUnit,
                              kAudioOutputUnitProperty_EnableIO,
                              kAudioUnitScope_Input,
                              kInputBus,
                              &flag,
                              sizeof(flag));

    status = AudioUnitSetProperty(audioUnit, kAudioOutputUnitProperty_EnableIO,
                                kAudioUnitScope_Output,
                                kOutputBus,
                                &flag,
                                sizeof(flag));


    AudioStreamBasicDescription audioStreamBasicDescription;

    // Describe format
    audioStreamBasicDescription.mSampleRate         = 16000;
    audioStreamBasicDescription.mFormatID           = kAudioFormatLinearPCM;
    audioStreamBasicDescription.mFormatFlags        = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked |kLinearPCMFormatFlagIsNonInterleaved;
    audioStreamBasicDescription.mFramesPerPacket    = 1;
    audioStreamBasicDescription.mChannelsPerFrame   = 1;
    audioStreamBasicDescription.mBitsPerChannel     = 16;
    audioStreamBasicDescription.mBytesPerPacket     = 2;
    audioStreamBasicDescription.mBytesPerFrame      = 2;



    status = AudioUnitSetProperty(audioUnit,
                              kAudioUnitProperty_StreamFormat,
                              kAudioUnitScope_Output,
                              kInputBus,
                              &audioStreamBasicDescription,
                              sizeof(audioStreamBasicDescription));
    NSLog(@"Status[%d]",(int)status);


status = AudioUnitSetProperty(audioUnit,
                                kAudioUnitProperty_StreamFormat,
                                kAudioUnitScope_Input,
                                kOutputBus,
                                &audioStreamBasicDescription,
                                sizeof(audioStreamBasicDescription));
NSLog(@"Status[%d]",(int)status);


    AURenderCallbackStruct callbackStruct;


    // Set input callback
    callbackStruct.inputProc = recordingCallback;
    callbackStruct.inputProcRefCon = (__bridge void *)(self);
    status = AudioUnitSetProperty(audioUnit,
                              kAudioOutputUnitProperty_SetInputCallback,
                              kAudioUnitScope_Global,
                              kInputBus,
                              &callbackStruct,
                              sizeof(callbackStruct));

  callbackStruct.inputProc = playbackCallback;
      callbackStruct.inputProcRefCon = (__bridge void *)(self);
  status = AudioUnitSetProperty(audioUnit,
                                kAudioUnitProperty_SetRenderCallback,
                                kAudioUnitScope_Global,
                                kOutputBus,
                                &callbackStruct,
                                sizeof(callbackStruct));
    flag=0;

status = AudioUnitSetProperty(audioUnit,
                                kAudioUnitProperty_ShouldAllocateBuffer,
                                kAudioUnitScope_Output,
                                kInputBus,
                                &flag,
                                sizeof(flag));

}

Recording Call Back

static OSStatus recordingCallback (void *inRefCon,AudioUnitRenderActionFlags *ioActionFlags,const AudioTimeStamp *inTimeStamp,UInt32 inBusNumber,UInt32    inNumberFrames,AudioBufferList *ioData) 
{    
    MyAudioViewController *THIS = (__bridge MyAudioViewController *)inRefCon;

    AudioBuffer tempBuffer;
    tempBuffer.mNumberChannels = 1;
    tempBuffer.mDataByteSize = inNumberFrames * 2;
    tempBuffer.mData = malloc(inNumberFrames *2);

    AudioBufferList bufferList;
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0] = tempBuffer;





    OSStatus status;
    status = AudioUnitRender(THIS->audioUnit,
                         ioActionFlags,
                         inTimeStamp,
                         kInputBus,
                         inNumberFrames,
                         &bufferList);

    if (noErr != status) {

        printf("AudioUnitRender error: %d", (int)status);
        return noErr;
    }

    tempBuffer.mDataByteSize, &encodedSize,(__bridge void *)(THIS));

    [THIS processAudio:&bufferList];

    free(bufferList.mBuffers[0].mData);

    return noErr;
}

Playback Call Back

static OSStatus playbackCallback(void *inRefCon,AudioUnitRenderActionFlags *ioActionFlags,const AudioTimeStamp *inTimeStamp,UInt32 inBusNumber,UInt32 inNumberFrames,AudioBufferList *ioData) {


NSLog(@"In play back call back");


MyAudioViewController *THIS = (__bridge MyAudioViewController *)inRefCon;



int32_t availableBytes=0;


  char *inBuffer = GetDataFromCircularBuffer(&THIS->mybuffer, &availableBytes);
  NSLog(@"bytes available in buffer[%d]",availableBytes);
  decodeSpeexData(inBuffer, availableBytes,(__bridge void *)(THIS));
  ConsumeReadBytes(&(THIS->mybuffer), availableBytes); 

  memcpy(targetBuffer, THIS->outTemp, inNumberFrames*2);


 return noErr;
}

Process Audio recorded from MIC

- (void) processAudio: (AudioBufferList*) bufferList
{
    AudioBuffer sourceBuffer = bufferList->mBuffers[0];

    //    NSLog(@"Origin size: %d", (int)sourceBuffer.mDataByteSize);
    int size = 0;
    encodeAudioDataSpeex((spx_int16_t*)sourceBuffer.mData, sourceBuffer.mDataByteSize, &size, (__bridge void *)(self));
    [self performSelectorOnMainThread:@selector(SendAudioData:) withObject:[NSData dataWithBytes:self->jitterBuffer length:size] waitUntilDone:NO];

    NSLog(@"Encoded size: %i", size);

} 
Was it helpful?

Solution

Your playbackCallback render callback, which you have not shown, is responsible for the audio that is sent to the RemoteIO speaker output. If this RemoteIO render callback puts no data in its callback buffers, whatever junk that was left in buffers (stuff that was previously in the record callback buffers perhaps) might be sent to the speaker instead.

Also, it is strongly recommended by Apple DTS that your recordingCallback not include any memory management calls, such as malloc(). So this may be a bug helping cause the problem as well.

Licensed under: CC-BY-SA with attribution
Not affiliated with StackOverflow
scroll top